1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include <linux/module.h>
39#include <linux/stat.h>
40#include <linux/slab.h>
41#include <linux/ioctl.h>
42#include <linux/capability.h>
43#include <linux/uaccess.h>
44#include <linux/compat.h>
45#include <linux/math64.h>
46#include <mtd/ubi-user.h>
47#include "ubi.h"
48
49
50
51
52
53
54
55
56
57static int get_exclusive(struct ubi_volume_desc *desc)
58{
59 int users, err;
60 struct ubi_volume *vol = desc->vol;
61
62 spin_lock(&vol->ubi->volumes_lock);
63 users = vol->readers + vol->writers + vol->exclusive;
64 ubi_assert(users > 0);
65 if (users > 1) {
66 dbg_err("%d users for volume %d", users, vol->vol_id);
67 err = -EBUSY;
68 } else {
69 vol->readers = vol->writers = 0;
70 vol->exclusive = 1;
71 err = desc->mode;
72 desc->mode = UBI_EXCLUSIVE;
73 }
74 spin_unlock(&vol->ubi->volumes_lock);
75
76 return err;
77}
78
79
80
81
82
83
84static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
85{
86 struct ubi_volume *vol = desc->vol;
87
88 spin_lock(&vol->ubi->volumes_lock);
89 ubi_assert(vol->readers == 0 && vol->writers == 0);
90 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
91 vol->exclusive = 0;
92 if (mode == UBI_READONLY)
93 vol->readers = 1;
94 else if (mode == UBI_READWRITE)
95 vol->writers = 1;
96 else
97 vol->exclusive = 1;
98 spin_unlock(&vol->ubi->volumes_lock);
99
100 desc->mode = mode;
101}
102
103static int vol_cdev_open(struct inode *inode, struct file *file)
104{
105 struct ubi_volume_desc *desc;
106 int vol_id = iminor(inode) - 1, mode, ubi_num;
107
108 ubi_num = ubi_major2num(imajor(inode));
109 if (ubi_num < 0)
110 return ubi_num;
111
112 if (file->f_mode & FMODE_WRITE)
113 mode = UBI_READWRITE;
114 else
115 mode = UBI_READONLY;
116
117 dbg_gen("open device %d, volume %d, mode %d",
118 ubi_num, vol_id, mode);
119
120 desc = ubi_open_volume(ubi_num, vol_id, mode);
121 if (IS_ERR(desc))
122 return PTR_ERR(desc);
123
124 file->private_data = desc;
125 return 0;
126}
127
128static int vol_cdev_release(struct inode *inode, struct file *file)
129{
130 struct ubi_volume_desc *desc = file->private_data;
131 struct ubi_volume *vol = desc->vol;
132
133 dbg_gen("release device %d, volume %d, mode %d",
134 vol->ubi->ubi_num, vol->vol_id, desc->mode);
135
136 if (vol->updating) {
137 ubi_warn("update of volume %d not finished, volume is damaged",
138 vol->vol_id);
139 ubi_assert(!vol->changing_leb);
140 vol->updating = 0;
141 vfree(vol->upd_buf);
142 } else if (vol->changing_leb) {
143 dbg_gen("only %lld of %lld bytes received for atomic LEB change"
144 " for volume %d:%d, cancel", vol->upd_received,
145 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
146 vol->changing_leb = 0;
147 vfree(vol->upd_buf);
148 }
149
150 ubi_close_volume(desc);
151 return 0;
152}
153
154static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
155{
156 struct ubi_volume_desc *desc = file->private_data;
157 struct ubi_volume *vol = desc->vol;
158 loff_t new_offset;
159
160 if (vol->updating) {
161
162 dbg_err("updating");
163 return -EBUSY;
164 }
165
166 switch (origin) {
167 case 0:
168 new_offset = offset;
169 break;
170 case 1:
171 new_offset = file->f_pos + offset;
172 break;
173 case 2:
174 new_offset = vol->used_bytes + offset;
175 break;
176 default:
177 return -EINVAL;
178 }
179
180 if (new_offset < 0 || new_offset > vol->used_bytes) {
181 dbg_err("bad seek %lld", new_offset);
182 return -EINVAL;
183 }
184
185 dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld",
186 vol->vol_id, offset, origin, new_offset);
187
188 file->f_pos = new_offset;
189 return new_offset;
190}
191
192static int vol_cdev_fsync(struct file *file, int datasync)
193{
194 struct ubi_volume_desc *desc = file->private_data;
195 struct ubi_device *ubi = desc->vol->ubi;
196
197 return ubi_sync(ubi->ubi_num);
198}
199
200
201static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
202 loff_t *offp)
203{
204 struct ubi_volume_desc *desc = file->private_data;
205 struct ubi_volume *vol = desc->vol;
206 struct ubi_device *ubi = vol->ubi;
207 int err, lnum, off, len, tbuf_size;
208 size_t count_save = count;
209 void *tbuf;
210
211 dbg_gen("read %zd bytes from offset %lld of volume %d",
212 count, *offp, vol->vol_id);
213
214 if (vol->updating) {
215 dbg_err("updating");
216 return -EBUSY;
217 }
218 if (vol->upd_marker) {
219 dbg_err("damaged volume, update marker is set");
220 return -EBADF;
221 }
222 if (*offp == vol->used_bytes || count == 0)
223 return 0;
224
225 if (vol->corrupted)
226 dbg_gen("read from corrupted volume %d", vol->vol_id);
227
228 if (*offp + count > vol->used_bytes)
229 count_save = count = vol->used_bytes - *offp;
230
231 tbuf_size = vol->usable_leb_size;
232 if (count < tbuf_size)
233 tbuf_size = ALIGN(count, ubi->min_io_size);
234 tbuf = vmalloc(tbuf_size);
235 if (!tbuf)
236 return -ENOMEM;
237
238 len = count > tbuf_size ? tbuf_size : count;
239 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
240
241 do {
242 cond_resched();
243
244 if (off + len >= vol->usable_leb_size)
245 len = vol->usable_leb_size - off;
246
247 err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
248 if (err)
249 break;
250
251 off += len;
252 if (off == vol->usable_leb_size) {
253 lnum += 1;
254 off -= vol->usable_leb_size;
255 }
256
257 count -= len;
258 *offp += len;
259
260 err = copy_to_user(buf, tbuf, len);
261 if (err) {
262 err = -EFAULT;
263 break;
264 }
265
266 buf += len;
267 len = count > tbuf_size ? tbuf_size : count;
268 } while (count);
269
270 vfree(tbuf);
271 return err ? err : count_save - count;
272}
273
274
275
276
277
278static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
279 size_t count, loff_t *offp)
280{
281 struct ubi_volume_desc *desc = file->private_data;
282 struct ubi_volume *vol = desc->vol;
283 struct ubi_device *ubi = vol->ubi;
284 int lnum, off, len, tbuf_size, err = 0;
285 size_t count_save = count;
286 char *tbuf;
287
288 if (!vol->direct_writes)
289 return -EPERM;
290
291 dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
292 count, *offp, vol->vol_id);
293
294 if (vol->vol_type == UBI_STATIC_VOLUME)
295 return -EROFS;
296
297 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
298 if (off & (ubi->min_io_size - 1)) {
299 dbg_err("unaligned position");
300 return -EINVAL;
301 }
302
303 if (*offp + count > vol->used_bytes)
304 count_save = count = vol->used_bytes - *offp;
305
306
307 if (count & (ubi->min_io_size - 1)) {
308 dbg_err("unaligned write length");
309 return -EINVAL;
310 }
311
312 tbuf_size = vol->usable_leb_size;
313 if (count < tbuf_size)
314 tbuf_size = ALIGN(count, ubi->min_io_size);
315 tbuf = vmalloc(tbuf_size);
316 if (!tbuf)
317 return -ENOMEM;
318
319 len = count > tbuf_size ? tbuf_size : count;
320
321 while (count) {
322 cond_resched();
323
324 if (off + len >= vol->usable_leb_size)
325 len = vol->usable_leb_size - off;
326
327 err = copy_from_user(tbuf, buf, len);
328 if (err) {
329 err = -EFAULT;
330 break;
331 }
332
333 err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
334 UBI_UNKNOWN);
335 if (err)
336 break;
337
338 off += len;
339 if (off == vol->usable_leb_size) {
340 lnum += 1;
341 off -= vol->usable_leb_size;
342 }
343
344 count -= len;
345 *offp += len;
346 buf += len;
347 len = count > tbuf_size ? tbuf_size : count;
348 }
349
350 vfree(tbuf);
351 return err ? err : count_save - count;
352}
353
354static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
355 size_t count, loff_t *offp)
356{
357 int err = 0;
358 struct ubi_volume_desc *desc = file->private_data;
359 struct ubi_volume *vol = desc->vol;
360 struct ubi_device *ubi = vol->ubi;
361
362 if (!vol->updating && !vol->changing_leb)
363 return vol_cdev_direct_write(file, buf, count, offp);
364
365 if (vol->updating)
366 err = ubi_more_update_data(ubi, vol, buf, count);
367 else
368 err = ubi_more_leb_change_data(ubi, vol, buf, count);
369
370 if (err < 0) {
371 ubi_err("cannot accept more %zd bytes of data, error %d",
372 count, err);
373 return err;
374 }
375
376 if (err) {
377
378
379
380
381 count = err;
382
383 if (vol->changing_leb) {
384 revoke_exclusive(desc, UBI_READWRITE);
385 return count;
386 }
387
388 err = ubi_check_volume(ubi, vol->vol_id);
389 if (err < 0)
390 return err;
391
392 if (err) {
393 ubi_warn("volume %d on UBI device %d is corrupted",
394 vol->vol_id, ubi->ubi_num);
395 vol->corrupted = 1;
396 }
397 vol->checked = 1;
398 ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
399 revoke_exclusive(desc, UBI_READWRITE);
400 }
401
402 return count;
403}
404
405static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
406 unsigned long arg)
407{
408 int err = 0;
409 struct ubi_volume_desc *desc = file->private_data;
410 struct ubi_volume *vol = desc->vol;
411 struct ubi_device *ubi = vol->ubi;
412 void __user *argp = (void __user *)arg;
413
414 switch (cmd) {
415
416 case UBI_IOCVOLUP:
417 {
418 int64_t bytes, rsvd_bytes;
419
420 if (!capable(CAP_SYS_RESOURCE)) {
421 err = -EPERM;
422 break;
423 }
424
425 err = copy_from_user(&bytes, argp, sizeof(int64_t));
426 if (err) {
427 err = -EFAULT;
428 break;
429 }
430
431 if (desc->mode == UBI_READONLY) {
432 err = -EROFS;
433 break;
434 }
435
436 rsvd_bytes = (long long)vol->reserved_pebs *
437 ubi->leb_size-vol->data_pad;
438 if (bytes < 0 || bytes > rsvd_bytes) {
439 err = -EINVAL;
440 break;
441 }
442
443 err = get_exclusive(desc);
444 if (err < 0)
445 break;
446
447 err = ubi_start_update(ubi, vol, bytes);
448 if (bytes == 0)
449 revoke_exclusive(desc, UBI_READWRITE);
450 break;
451 }
452
453
454 case UBI_IOCEBCH:
455 {
456 struct ubi_leb_change_req req;
457
458 err = copy_from_user(&req, argp,
459 sizeof(struct ubi_leb_change_req));
460 if (err) {
461 err = -EFAULT;
462 break;
463 }
464
465 if (desc->mode == UBI_READONLY ||
466 vol->vol_type == UBI_STATIC_VOLUME) {
467 err = -EROFS;
468 break;
469 }
470
471
472 err = -EINVAL;
473 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
474 req.bytes < 0 || req.lnum >= vol->usable_leb_size)
475 break;
476 if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
477 req.dtype != UBI_UNKNOWN)
478 break;
479
480 err = get_exclusive(desc);
481 if (err < 0)
482 break;
483
484 err = ubi_start_leb_change(ubi, vol, &req);
485 if (req.bytes == 0)
486 revoke_exclusive(desc, UBI_READWRITE);
487 break;
488 }
489
490
491 case UBI_IOCEBER:
492 {
493 int32_t lnum;
494
495 err = get_user(lnum, (__user int32_t *)argp);
496 if (err) {
497 err = -EFAULT;
498 break;
499 }
500
501 if (desc->mode == UBI_READONLY ||
502 vol->vol_type == UBI_STATIC_VOLUME) {
503 err = -EROFS;
504 break;
505 }
506
507 if (lnum < 0 || lnum >= vol->reserved_pebs) {
508 err = -EINVAL;
509 break;
510 }
511
512 dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
513 err = ubi_eba_unmap_leb(ubi, vol, lnum);
514 if (err)
515 break;
516
517 err = ubi_wl_flush(ubi);
518 break;
519 }
520
521
522 case UBI_IOCEBMAP:
523 {
524 struct ubi_map_req req;
525
526 err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
527 if (err) {
528 err = -EFAULT;
529 break;
530 }
531 err = ubi_leb_map(desc, req.lnum, req.dtype);
532 break;
533 }
534
535
536 case UBI_IOCEBUNMAP:
537 {
538 int32_t lnum;
539
540 err = get_user(lnum, (__user int32_t *)argp);
541 if (err) {
542 err = -EFAULT;
543 break;
544 }
545 err = ubi_leb_unmap(desc, lnum);
546 break;
547 }
548
549
550 case UBI_IOCEBISMAP:
551 {
552 int32_t lnum;
553
554 err = get_user(lnum, (__user int32_t *)argp);
555 if (err) {
556 err = -EFAULT;
557 break;
558 }
559 err = ubi_is_mapped(desc, lnum);
560 break;
561 }
562
563
564 case UBI_IOCSETPROP:
565 {
566 struct ubi_set_prop_req req;
567
568 err = copy_from_user(&req, argp,
569 sizeof(struct ubi_set_prop_req));
570 if (err) {
571 err = -EFAULT;
572 break;
573 }
574 switch (req.property) {
575 case UBI_PROP_DIRECT_WRITE:
576 mutex_lock(&ubi->device_mutex);
577 desc->vol->direct_writes = !!req.value;
578 mutex_unlock(&ubi->device_mutex);
579 break;
580 default:
581 err = -EINVAL;
582 break;
583 }
584 break;
585 }
586
587 default:
588 err = -ENOTTY;
589 break;
590 }
591 return err;
592}
593
594
595
596
597
598
599
600
601static int verify_mkvol_req(const struct ubi_device *ubi,
602 const struct ubi_mkvol_req *req)
603{
604 int n, err = -EINVAL;
605
606 if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
607 req->name_len < 0)
608 goto bad;
609
610 if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
611 req->vol_id != UBI_VOL_NUM_AUTO)
612 goto bad;
613
614 if (req->alignment == 0)
615 goto bad;
616
617 if (req->bytes == 0)
618 goto bad;
619
620 if (req->vol_type != UBI_DYNAMIC_VOLUME &&
621 req->vol_type != UBI_STATIC_VOLUME)
622 goto bad;
623
624 if (req->alignment > ubi->leb_size)
625 goto bad;
626
627 n = req->alignment & (ubi->min_io_size - 1);
628 if (req->alignment != 1 && n)
629 goto bad;
630
631 if (req->name_len > UBI_VOL_NAME_MAX) {
632 err = -ENAMETOOLONG;
633 goto bad;
634 }
635
636 n = strnlen(req->name, req->name_len + 1);
637 if (n != req->name_len)
638 goto bad;
639
640 return 0;
641
642bad:
643 dbg_err("bad volume creation request");
644 ubi_dbg_dump_mkvol_req(req);
645 return err;
646}
647
648
649
650
651
652
653
654
655static int verify_rsvol_req(const struct ubi_device *ubi,
656 const struct ubi_rsvol_req *req)
657{
658 if (req->bytes <= 0)
659 return -EINVAL;
660
661 if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
662 return -EINVAL;
663
664 return 0;
665}
666
667
668
669
670
671
672
673
674
675
676
677static int rename_volumes(struct ubi_device *ubi,
678 struct ubi_rnvol_req *req)
679{
680 int i, n, err;
681 struct list_head rename_list;
682 struct ubi_rename_entry *re, *re1;
683
684 if (req->count < 0 || req->count > UBI_MAX_RNVOL)
685 return -EINVAL;
686
687 if (req->count == 0)
688 return 0;
689
690
691 for (i = 0; i < req->count; i++) {
692 if (req->ents[i].vol_id < 0 ||
693 req->ents[i].vol_id >= ubi->vtbl_slots)
694 return -EINVAL;
695 if (req->ents[i].name_len < 0)
696 return -EINVAL;
697 if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
698 return -ENAMETOOLONG;
699 req->ents[i].name[req->ents[i].name_len] = '\0';
700 n = strlen(req->ents[i].name);
701 if (n != req->ents[i].name_len)
702 err = -EINVAL;
703 }
704
705
706 for (i = 0; i < req->count - 1; i++) {
707 for (n = i + 1; n < req->count; n++) {
708 if (req->ents[i].vol_id == req->ents[n].vol_id) {
709 dbg_err("duplicated volume id %d",
710 req->ents[i].vol_id);
711 return -EINVAL;
712 }
713 if (!strcmp(req->ents[i].name, req->ents[n].name)) {
714 dbg_err("duplicated volume name \"%s\"",
715 req->ents[i].name);
716 return -EINVAL;
717 }
718 }
719 }
720
721
722 INIT_LIST_HEAD(&rename_list);
723 for (i = 0; i < req->count; i++) {
724 int vol_id = req->ents[i].vol_id;
725 int name_len = req->ents[i].name_len;
726 const char *name = req->ents[i].name;
727
728 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
729 if (!re) {
730 err = -ENOMEM;
731 goto out_free;
732 }
733
734 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
735 if (IS_ERR(re->desc)) {
736 err = PTR_ERR(re->desc);
737 dbg_err("cannot open volume %d, error %d", vol_id, err);
738 kfree(re);
739 goto out_free;
740 }
741
742
743 if (re->desc->vol->name_len == name_len &&
744 !memcmp(re->desc->vol->name, name, name_len)) {
745 ubi_close_volume(re->desc);
746 kfree(re);
747 continue;
748 }
749
750 re->new_name_len = name_len;
751 memcpy(re->new_name, name, name_len);
752 list_add_tail(&re->list, &rename_list);
753 dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
754 vol_id, re->desc->vol->name, name);
755 }
756
757 if (list_empty(&rename_list))
758 return 0;
759
760
761 list_for_each_entry(re, &rename_list, list) {
762 struct ubi_volume_desc *desc;
763 int no_remove_needed = 0;
764
765
766
767
768
769
770
771 list_for_each_entry(re1, &rename_list, list) {
772 if (re->new_name_len == re1->desc->vol->name_len &&
773 !memcmp(re->new_name, re1->desc->vol->name,
774 re1->desc->vol->name_len)) {
775 no_remove_needed = 1;
776 break;
777 }
778 }
779
780 if (no_remove_needed)
781 continue;
782
783
784
785
786
787 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
788 UBI_EXCLUSIVE);
789 if (IS_ERR(desc)) {
790 err = PTR_ERR(desc);
791 if (err == -ENODEV)
792
793 continue;
794
795
796 dbg_err("cannot open volume \"%s\", error %d",
797 re->new_name, err);
798 goto out_free;
799 }
800
801 re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
802 if (!re1) {
803 err = -ENOMEM;
804 ubi_close_volume(desc);
805 goto out_free;
806 }
807
808 re1->remove = 1;
809 re1->desc = desc;
810 list_add(&re1->list, &rename_list);
811 dbg_msg("will remove volume %d, name \"%s\"",
812 re1->desc->vol->vol_id, re1->desc->vol->name);
813 }
814
815 mutex_lock(&ubi->device_mutex);
816 err = ubi_rename_volumes(ubi, &rename_list);
817 mutex_unlock(&ubi->device_mutex);
818
819out_free:
820 list_for_each_entry_safe(re, re1, &rename_list, list) {
821 ubi_close_volume(re->desc);
822 list_del(&re->list);
823 kfree(re);
824 }
825 return err;
826}
827
828static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
829 unsigned long arg)
830{
831 int err = 0;
832 struct ubi_device *ubi;
833 struct ubi_volume_desc *desc;
834 void __user *argp = (void __user *)arg;
835
836 if (!capable(CAP_SYS_RESOURCE))
837 return -EPERM;
838
839 ubi = ubi_get_by_major(imajor(file->f_mapping->host));
840 if (!ubi)
841 return -ENODEV;
842
843 switch (cmd) {
844
845 case UBI_IOCMKVOL:
846 {
847 struct ubi_mkvol_req req;
848
849 dbg_gen("create volume");
850 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
851 if (err) {
852 err = -EFAULT;
853 break;
854 }
855
856 err = verify_mkvol_req(ubi, &req);
857 if (err)
858 break;
859
860 mutex_lock(&ubi->device_mutex);
861 err = ubi_create_volume(ubi, &req);
862 mutex_unlock(&ubi->device_mutex);
863 if (err)
864 break;
865
866 err = put_user(req.vol_id, (__user int32_t *)argp);
867 if (err)
868 err = -EFAULT;
869
870 break;
871 }
872
873
874 case UBI_IOCRMVOL:
875 {
876 int vol_id;
877
878 dbg_gen("remove volume");
879 err = get_user(vol_id, (__user int32_t *)argp);
880 if (err) {
881 err = -EFAULT;
882 break;
883 }
884
885 desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
886 if (IS_ERR(desc)) {
887 err = PTR_ERR(desc);
888 break;
889 }
890
891 mutex_lock(&ubi->device_mutex);
892 err = ubi_remove_volume(desc, 0);
893 mutex_unlock(&ubi->device_mutex);
894
895
896
897
898
899
900 ubi_close_volume(desc);
901 break;
902 }
903
904
905 case UBI_IOCRSVOL:
906 {
907 int pebs;
908 struct ubi_rsvol_req req;
909
910 dbg_gen("re-size volume");
911 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
912 if (err) {
913 err = -EFAULT;
914 break;
915 }
916
917 err = verify_rsvol_req(ubi, &req);
918 if (err)
919 break;
920
921 desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
922 if (IS_ERR(desc)) {
923 err = PTR_ERR(desc);
924 break;
925 }
926
927 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
928 desc->vol->usable_leb_size);
929
930 mutex_lock(&ubi->device_mutex);
931 err = ubi_resize_volume(desc, pebs);
932 mutex_unlock(&ubi->device_mutex);
933 ubi_close_volume(desc);
934 break;
935 }
936
937
938 case UBI_IOCRNVOL:
939 {
940 struct ubi_rnvol_req *req;
941
942 dbg_msg("re-name volumes");
943 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
944 if (!req) {
945 err = -ENOMEM;
946 break;
947 };
948
949 err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
950 if (err) {
951 err = -EFAULT;
952 kfree(req);
953 break;
954 }
955
956 err = rename_volumes(ubi, req);
957 kfree(req);
958 break;
959 }
960
961 default:
962 err = -ENOTTY;
963 break;
964 }
965
966 ubi_put_device(ubi);
967 return err;
968}
969
970static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
971 unsigned long arg)
972{
973 int err = 0;
974 void __user *argp = (void __user *)arg;
975
976 if (!capable(CAP_SYS_RESOURCE))
977 return -EPERM;
978
979 switch (cmd) {
980
981 case UBI_IOCATT:
982 {
983 struct ubi_attach_req req;
984 struct mtd_info *mtd;
985
986 dbg_gen("attach MTD device");
987 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
988 if (err) {
989 err = -EFAULT;
990 break;
991 }
992
993 if (req.mtd_num < 0 ||
994 (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
995 err = -EINVAL;
996 break;
997 }
998
999 mtd = get_mtd_device(NULL, req.mtd_num);
1000 if (IS_ERR(mtd)) {
1001 err = PTR_ERR(mtd);
1002 break;
1003 }
1004
1005
1006
1007
1008
1009 mutex_lock(&ubi_devices_mutex);
1010 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
1011 mutex_unlock(&ubi_devices_mutex);
1012 if (err < 0)
1013 put_mtd_device(mtd);
1014 else
1015
1016 err = put_user(err, (__user int32_t *)argp);
1017
1018 break;
1019 }
1020
1021
1022 case UBI_IOCDET:
1023 {
1024 int ubi_num;
1025
1026 dbg_gen("dettach MTD device");
1027 err = get_user(ubi_num, (__user int32_t *)argp);
1028 if (err) {
1029 err = -EFAULT;
1030 break;
1031 }
1032
1033 mutex_lock(&ubi_devices_mutex);
1034 err = ubi_detach_mtd_dev(ubi_num, 0);
1035 mutex_unlock(&ubi_devices_mutex);
1036 break;
1037 }
1038
1039 default:
1040 err = -ENOTTY;
1041 break;
1042 }
1043
1044 return err;
1045}
1046
1047#ifdef CONFIG_COMPAT
1048static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
1049 unsigned long arg)
1050{
1051 unsigned long translated_arg = (unsigned long)compat_ptr(arg);
1052
1053 return vol_cdev_ioctl(file, cmd, translated_arg);
1054}
1055
1056static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
1057 unsigned long arg)
1058{
1059 unsigned long translated_arg = (unsigned long)compat_ptr(arg);
1060
1061 return ubi_cdev_ioctl(file, cmd, translated_arg);
1062}
1063
1064static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
1065 unsigned long arg)
1066{
1067 unsigned long translated_arg = (unsigned long)compat_ptr(arg);
1068
1069 return ctrl_cdev_ioctl(file, cmd, translated_arg);
1070}
1071#else
1072#define vol_cdev_compat_ioctl NULL
1073#define ubi_cdev_compat_ioctl NULL
1074#define ctrl_cdev_compat_ioctl NULL
1075#endif
1076
1077
1078const struct file_operations ubi_vol_cdev_operations = {
1079 .owner = THIS_MODULE,
1080 .open = vol_cdev_open,
1081 .release = vol_cdev_release,
1082 .llseek = vol_cdev_llseek,
1083 .read = vol_cdev_read,
1084 .write = vol_cdev_write,
1085 .fsync = vol_cdev_fsync,
1086 .unlocked_ioctl = vol_cdev_ioctl,
1087 .compat_ioctl = vol_cdev_compat_ioctl,
1088};
1089
1090
1091const struct file_operations ubi_cdev_operations = {
1092 .owner = THIS_MODULE,
1093 .llseek = no_llseek,
1094 .unlocked_ioctl = ubi_cdev_ioctl,
1095 .compat_ioctl = ubi_cdev_compat_ioctl,
1096};
1097
1098
1099const struct file_operations ubi_ctrl_cdev_operations = {
1100 .owner = THIS_MODULE,
1101 .unlocked_ioctl = ctrl_cdev_ioctl,
1102 .compat_ioctl = ctrl_cdev_compat_ioctl,
1103 .llseek = noop_llseek,
1104};
1105