1
2
3
4
5
6
7
8
9
10
11
12#include <linux/init.h>
13#include <linux/initrd.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/major.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
19#include <linux/highmem.h>
20#include <linux/mutex.h>
21#include <linux/pagemap.h>
22#include <linux/radix-tree.h>
23#include <linux/fs.h>
24#include <linux/slab.h>
25#include <linux/backing-dev.h>
26#include <linux/debugfs.h>
27
28#include <linux/uaccess.h>
29
30
31
32
33
34
35
36
37struct brd_device {
38 int brd_number;
39 struct gendisk *brd_disk;
40 struct list_head brd_list;
41
42
43
44
45
46 spinlock_t brd_lock;
47 struct radix_tree_root brd_pages;
48 u64 brd_nr_pages;
49};
50
51
52
53
54static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
55{
56 pgoff_t idx;
57 struct page *page;
58
59
60
61
62
63
64
65
66
67
68
69
70 rcu_read_lock();
71 idx = sector >> PAGE_SECTORS_SHIFT;
72 page = radix_tree_lookup(&brd->brd_pages, idx);
73 rcu_read_unlock();
74
75 BUG_ON(page && page->index != idx);
76
77 return page;
78}
79
80
81
82
83
84
85static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
86{
87 pgoff_t idx;
88 struct page *page;
89 gfp_t gfp_flags;
90
91 page = brd_lookup_page(brd, sector);
92 if (page)
93 return page;
94
95
96
97
98
99 gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
100 page = alloc_page(gfp_flags);
101 if (!page)
102 return NULL;
103
104 if (radix_tree_preload(GFP_NOIO)) {
105 __free_page(page);
106 return NULL;
107 }
108
109 spin_lock(&brd->brd_lock);
110 idx = sector >> PAGE_SECTORS_SHIFT;
111 page->index = idx;
112 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
113 __free_page(page);
114 page = radix_tree_lookup(&brd->brd_pages, idx);
115 BUG_ON(!page);
116 BUG_ON(page->index != idx);
117 } else {
118 brd->brd_nr_pages++;
119 }
120 spin_unlock(&brd->brd_lock);
121
122 radix_tree_preload_end();
123
124 return page;
125}
126
127
128
129
130
131#define FREE_BATCH 16
132static void brd_free_pages(struct brd_device *brd)
133{
134 unsigned long pos = 0;
135 struct page *pages[FREE_BATCH];
136 int nr_pages;
137
138 do {
139 int i;
140
141 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
142 (void **)pages, pos, FREE_BATCH);
143
144 for (i = 0; i < nr_pages; i++) {
145 void *ret;
146
147 BUG_ON(pages[i]->index < pos);
148 pos = pages[i]->index;
149 ret = radix_tree_delete(&brd->brd_pages, pos);
150 BUG_ON(!ret || ret != pages[i]);
151 __free_page(pages[i]);
152 }
153
154 pos++;
155
156
157
158
159
160 cond_resched();
161
162
163
164
165
166
167 } while (nr_pages == FREE_BATCH);
168}
169
170
171
172
173static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
174{
175 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
176 size_t copy;
177
178 copy = min_t(size_t, n, PAGE_SIZE - offset);
179 if (!brd_insert_page(brd, sector))
180 return -ENOSPC;
181 if (copy < n) {
182 sector += copy >> SECTOR_SHIFT;
183 if (!brd_insert_page(brd, sector))
184 return -ENOSPC;
185 }
186 return 0;
187}
188
189
190
191
192static void copy_to_brd(struct brd_device *brd, const void *src,
193 sector_t sector, size_t n)
194{
195 struct page *page;
196 void *dst;
197 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
198 size_t copy;
199
200 copy = min_t(size_t, n, PAGE_SIZE - offset);
201 page = brd_lookup_page(brd, sector);
202 BUG_ON(!page);
203
204 dst = kmap_atomic(page);
205 memcpy(dst + offset, src, copy);
206 kunmap_atomic(dst);
207
208 if (copy < n) {
209 src += copy;
210 sector += copy >> SECTOR_SHIFT;
211 copy = n - copy;
212 page = brd_lookup_page(brd, sector);
213 BUG_ON(!page);
214
215 dst = kmap_atomic(page);
216 memcpy(dst, src, copy);
217 kunmap_atomic(dst);
218 }
219}
220
221
222
223
224static void copy_from_brd(void *dst, struct brd_device *brd,
225 sector_t sector, size_t n)
226{
227 struct page *page;
228 void *src;
229 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
230 size_t copy;
231
232 copy = min_t(size_t, n, PAGE_SIZE - offset);
233 page = brd_lookup_page(brd, sector);
234 if (page) {
235 src = kmap_atomic(page);
236 memcpy(dst, src + offset, copy);
237 kunmap_atomic(src);
238 } else
239 memset(dst, 0, copy);
240
241 if (copy < n) {
242 dst += copy;
243 sector += copy >> SECTOR_SHIFT;
244 copy = n - copy;
245 page = brd_lookup_page(brd, sector);
246 if (page) {
247 src = kmap_atomic(page);
248 memcpy(dst, src, copy);
249 kunmap_atomic(src);
250 } else
251 memset(dst, 0, copy);
252 }
253}
254
255
256
257
258static int brd_do_bvec(struct brd_device *brd, struct page *page,
259 unsigned int len, unsigned int off, unsigned int op,
260 sector_t sector)
261{
262 void *mem;
263 int err = 0;
264
265 if (op_is_write(op)) {
266 err = copy_to_brd_setup(brd, sector, len);
267 if (err)
268 goto out;
269 }
270
271 mem = kmap_atomic(page);
272 if (!op_is_write(op)) {
273 copy_from_brd(mem + off, brd, sector, len);
274 flush_dcache_page(page);
275 } else {
276 flush_dcache_page(page);
277 copy_to_brd(brd, mem + off, sector, len);
278 }
279 kunmap_atomic(mem);
280
281out:
282 return err;
283}
284
285static blk_qc_t brd_submit_bio(struct bio *bio)
286{
287 struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
288 sector_t sector = bio->bi_iter.bi_sector;
289 struct bio_vec bvec;
290 struct bvec_iter iter;
291
292 bio_for_each_segment(bvec, bio, iter) {
293 unsigned int len = bvec.bv_len;
294 int err;
295
296
297 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
298 (len & (SECTOR_SIZE - 1)));
299
300 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
301 bio_op(bio), sector);
302 if (err)
303 goto io_error;
304 sector += len >> SECTOR_SHIFT;
305 }
306
307 bio_endio(bio);
308 return BLK_QC_T_NONE;
309io_error:
310 bio_io_error(bio);
311 return BLK_QC_T_NONE;
312}
313
314static int brd_rw_page(struct block_device *bdev, sector_t sector,
315 struct page *page, unsigned int op)
316{
317 struct brd_device *brd = bdev->bd_disk->private_data;
318 int err;
319
320 if (PageTransHuge(page))
321 return -ENOTSUPP;
322 err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
323 page_endio(page, op_is_write(op), err);
324 return err;
325}
326
327static const struct block_device_operations brd_fops = {
328 .owner = THIS_MODULE,
329 .submit_bio = brd_submit_bio,
330 .rw_page = brd_rw_page,
331};
332
333
334
335
336static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
337module_param(rd_nr, int, 0444);
338MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
339
340unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
341module_param(rd_size, ulong, 0444);
342MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
343
344static int max_part = 1;
345module_param(max_part, int, 0444);
346MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
347
348MODULE_LICENSE("GPL");
349MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
350MODULE_ALIAS("rd");
351
352#ifndef MODULE
353
354static int __init ramdisk_size(char *str)
355{
356 rd_size = simple_strtol(str, NULL, 0);
357 return 1;
358}
359__setup("ramdisk_size=", ramdisk_size);
360#endif
361
362
363
364
365
366static LIST_HEAD(brd_devices);
367static DEFINE_MUTEX(brd_devices_mutex);
368static struct dentry *brd_debugfs_dir;
369
370static int brd_alloc(int i)
371{
372 struct brd_device *brd;
373 struct gendisk *disk;
374 char buf[DISK_NAME_LEN];
375
376 mutex_lock(&brd_devices_mutex);
377 list_for_each_entry(brd, &brd_devices, brd_list) {
378 if (brd->brd_number == i) {
379 mutex_unlock(&brd_devices_mutex);
380 return -EEXIST;
381 }
382 }
383 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
384 if (!brd) {
385 mutex_unlock(&brd_devices_mutex);
386 return -ENOMEM;
387 }
388 brd->brd_number = i;
389 list_add_tail(&brd->brd_list, &brd_devices);
390 mutex_unlock(&brd_devices_mutex);
391
392 spin_lock_init(&brd->brd_lock);
393 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
394
395 snprintf(buf, DISK_NAME_LEN, "ram%d", i);
396 if (!IS_ERR_OR_NULL(brd_debugfs_dir))
397 debugfs_create_u64(buf, 0444, brd_debugfs_dir,
398 &brd->brd_nr_pages);
399
400 disk = brd->brd_disk = blk_alloc_disk(NUMA_NO_NODE);
401 if (!disk)
402 goto out_free_dev;
403
404 disk->major = RAMDISK_MAJOR;
405 disk->first_minor = i * max_part;
406 disk->minors = max_part;
407 disk->fops = &brd_fops;
408 disk->private_data = brd;
409 disk->flags = GENHD_FL_EXT_DEVT;
410 strlcpy(disk->disk_name, buf, DISK_NAME_LEN);
411 set_capacity(disk, rd_size * 2);
412
413
414
415
416
417
418
419
420 blk_queue_physical_block_size(disk->queue, PAGE_SIZE);
421
422
423 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
424 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
425 add_disk(disk);
426
427 return 0;
428
429out_free_dev:
430 mutex_lock(&brd_devices_mutex);
431 list_del(&brd->brd_list);
432 mutex_unlock(&brd_devices_mutex);
433 kfree(brd);
434 return -ENOMEM;
435}
436
437static void brd_probe(dev_t dev)
438{
439 brd_alloc(MINOR(dev) / max_part);
440}
441
442static void brd_del_one(struct brd_device *brd)
443{
444 del_gendisk(brd->brd_disk);
445 blk_cleanup_disk(brd->brd_disk);
446 brd_free_pages(brd);
447 mutex_lock(&brd_devices_mutex);
448 list_del(&brd->brd_list);
449 mutex_unlock(&brd_devices_mutex);
450 kfree(brd);
451}
452
453static inline void brd_check_and_reset_par(void)
454{
455 if (unlikely(!max_part))
456 max_part = 1;
457
458
459
460
461
462 if ((1U << MINORBITS) % max_part != 0)
463 max_part = 1UL << fls(max_part);
464
465 if (max_part > DISK_MAX_PARTS) {
466 pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
467 DISK_MAX_PARTS, DISK_MAX_PARTS);
468 max_part = DISK_MAX_PARTS;
469 }
470}
471
472static int __init brd_init(void)
473{
474 struct brd_device *brd, *next;
475 int err, i;
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492 if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe))
493 return -EIO;
494
495 brd_check_and_reset_par();
496
497 brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
498
499 for (i = 0; i < rd_nr; i++) {
500 err = brd_alloc(i);
501 if (err)
502 goto out_free;
503 }
504
505 pr_info("brd: module loaded\n");
506 return 0;
507
508out_free:
509 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
510 debugfs_remove_recursive(brd_debugfs_dir);
511
512 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
513 brd_del_one(brd);
514
515 pr_info("brd: module NOT loaded !!!\n");
516 return err;
517}
518
519static void __exit brd_exit(void)
520{
521 struct brd_device *brd, *next;
522
523 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
524 debugfs_remove_recursive(brd_debugfs_dir);
525
526 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
527 brd_del_one(brd);
528
529 pr_info("brd: module unloaded\n");
530}
531
532module_init(brd_init);
533module_exit(brd_exit);
534
535