1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/syscalls.h>
126#include <linux/time.h>
127#include <linux/rcupdate.h>
128#include <linux/pid_namespace.h>
129
130#include <asm/uaccess.h>
131
132#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
135
136int leases_enable = 1;
137int lease_break_time = 45;
138
139#define for_each_lock(inode, lockp) \
140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
141
142static LIST_HEAD(file_lock_list);
143static LIST_HEAD(blocked_list);
144static DEFINE_SPINLOCK(file_lock_lock);
145
146
147
148
149
150void lock_flocks(void)
151{
152 spin_lock(&file_lock_lock);
153}
154EXPORT_SYMBOL_GPL(lock_flocks);
155
156void unlock_flocks(void)
157{
158 spin_unlock(&file_lock_lock);
159}
160EXPORT_SYMBOL_GPL(unlock_flocks);
161
162static struct kmem_cache *filelock_cache __read_mostly;
163
164
165struct file_lock *locks_alloc_lock(void)
166{
167 return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
168}
169EXPORT_SYMBOL_GPL(locks_alloc_lock);
170
171void locks_release_private(struct file_lock *fl)
172{
173 if (fl->fl_ops) {
174 if (fl->fl_ops->fl_release_private)
175 fl->fl_ops->fl_release_private(fl);
176 fl->fl_ops = NULL;
177 }
178 if (fl->fl_lmops) {
179 if (fl->fl_lmops->fl_release_private)
180 fl->fl_lmops->fl_release_private(fl);
181 fl->fl_lmops = NULL;
182 }
183
184}
185EXPORT_SYMBOL_GPL(locks_release_private);
186
187
188void locks_free_lock(struct file_lock *fl)
189{
190 BUG_ON(waitqueue_active(&fl->fl_wait));
191 BUG_ON(!list_empty(&fl->fl_block));
192 BUG_ON(!list_empty(&fl->fl_link));
193
194 locks_release_private(fl);
195 kmem_cache_free(filelock_cache, fl);
196}
197EXPORT_SYMBOL(locks_free_lock);
198
199void locks_init_lock(struct file_lock *fl)
200{
201 INIT_LIST_HEAD(&fl->fl_link);
202 INIT_LIST_HEAD(&fl->fl_block);
203 init_waitqueue_head(&fl->fl_wait);
204 fl->fl_next = NULL;
205 fl->fl_fasync = NULL;
206 fl->fl_owner = NULL;
207 fl->fl_pid = 0;
208 fl->fl_nspid = NULL;
209 fl->fl_file = NULL;
210 fl->fl_flags = 0;
211 fl->fl_type = 0;
212 fl->fl_start = fl->fl_end = 0;
213 fl->fl_ops = NULL;
214 fl->fl_lmops = NULL;
215}
216
217EXPORT_SYMBOL(locks_init_lock);
218
219
220
221
222
223static void init_once(void *foo)
224{
225 struct file_lock *lock = (struct file_lock *) foo;
226
227 locks_init_lock(lock);
228}
229
230static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
231{
232 if (fl->fl_ops) {
233 if (fl->fl_ops->fl_copy_lock)
234 fl->fl_ops->fl_copy_lock(new, fl);
235 new->fl_ops = fl->fl_ops;
236 }
237 if (fl->fl_lmops)
238 new->fl_lmops = fl->fl_lmops;
239}
240
241
242
243
244void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
245{
246 new->fl_owner = fl->fl_owner;
247 new->fl_pid = fl->fl_pid;
248 new->fl_file = NULL;
249 new->fl_flags = fl->fl_flags;
250 new->fl_type = fl->fl_type;
251 new->fl_start = fl->fl_start;
252 new->fl_end = fl->fl_end;
253 new->fl_ops = NULL;
254 new->fl_lmops = NULL;
255}
256EXPORT_SYMBOL(__locks_copy_lock);
257
258void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
259{
260 locks_release_private(new);
261
262 __locks_copy_lock(new, fl);
263 new->fl_file = fl->fl_file;
264 new->fl_ops = fl->fl_ops;
265 new->fl_lmops = fl->fl_lmops;
266
267 locks_copy_private(new, fl);
268}
269
270EXPORT_SYMBOL(locks_copy_lock);
271
272static inline int flock_translate_cmd(int cmd) {
273 if (cmd & LOCK_MAND)
274 return cmd & (LOCK_MAND | LOCK_RW);
275 switch (cmd) {
276 case LOCK_SH:
277 return F_RDLCK;
278 case LOCK_EX:
279 return F_WRLCK;
280 case LOCK_UN:
281 return F_UNLCK;
282 }
283 return -EINVAL;
284}
285
286
287static int flock_make_lock(struct file *filp, struct file_lock **lock,
288 unsigned int cmd)
289{
290 struct file_lock *fl;
291 int type = flock_translate_cmd(cmd);
292 if (type < 0)
293 return type;
294
295 fl = locks_alloc_lock();
296 if (fl == NULL)
297 return -ENOMEM;
298
299 fl->fl_file = filp;
300 fl->fl_pid = current->tgid;
301 fl->fl_flags = FL_FLOCK;
302 fl->fl_type = type;
303 fl->fl_end = OFFSET_MAX;
304
305 *lock = fl;
306 return 0;
307}
308
309static int assign_type(struct file_lock *fl, int type)
310{
311 switch (type) {
312 case F_RDLCK:
313 case F_WRLCK:
314 case F_UNLCK:
315 fl->fl_type = type;
316 break;
317 default:
318 return -EINVAL;
319 }
320 return 0;
321}
322
323
324
325
326static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
327 struct flock *l)
328{
329 off_t start, end;
330
331 switch (l->l_whence) {
332 case SEEK_SET:
333 start = 0;
334 break;
335 case SEEK_CUR:
336 start = filp->f_pos;
337 break;
338 case SEEK_END:
339 start = i_size_read(filp->f_path.dentry->d_inode);
340 break;
341 default:
342 return -EINVAL;
343 }
344
345
346
347 start += l->l_start;
348 if (start < 0)
349 return -EINVAL;
350 fl->fl_end = OFFSET_MAX;
351 if (l->l_len > 0) {
352 end = start + l->l_len - 1;
353 fl->fl_end = end;
354 } else if (l->l_len < 0) {
355 end = start - 1;
356 fl->fl_end = end;
357 start += l->l_len;
358 if (start < 0)
359 return -EINVAL;
360 }
361 fl->fl_start = start;
362 if (fl->fl_end < fl->fl_start)
363 return -EOVERFLOW;
364
365 fl->fl_owner = current->files;
366 fl->fl_pid = current->tgid;
367 fl->fl_file = filp;
368 fl->fl_flags = FL_POSIX;
369 fl->fl_ops = NULL;
370 fl->fl_lmops = NULL;
371
372 return assign_type(fl, l->l_type);
373}
374
375#if BITS_PER_LONG == 32
376static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
377 struct flock64 *l)
378{
379 loff_t start;
380
381 switch (l->l_whence) {
382 case SEEK_SET:
383 start = 0;
384 break;
385 case SEEK_CUR:
386 start = filp->f_pos;
387 break;
388 case SEEK_END:
389 start = i_size_read(filp->f_path.dentry->d_inode);
390 break;
391 default:
392 return -EINVAL;
393 }
394
395 start += l->l_start;
396 if (start < 0)
397 return -EINVAL;
398 fl->fl_end = OFFSET_MAX;
399 if (l->l_len > 0) {
400 fl->fl_end = start + l->l_len - 1;
401 } else if (l->l_len < 0) {
402 fl->fl_end = start - 1;
403 start += l->l_len;
404 if (start < 0)
405 return -EINVAL;
406 }
407 fl->fl_start = start;
408 if (fl->fl_end < fl->fl_start)
409 return -EOVERFLOW;
410
411 fl->fl_owner = current->files;
412 fl->fl_pid = current->tgid;
413 fl->fl_file = filp;
414 fl->fl_flags = FL_POSIX;
415 fl->fl_ops = NULL;
416 fl->fl_lmops = NULL;
417
418 switch (l->l_type) {
419 case F_RDLCK:
420 case F_WRLCK:
421 case F_UNLCK:
422 fl->fl_type = l->l_type;
423 break;
424 default:
425 return -EINVAL;
426 }
427
428 return (0);
429}
430#endif
431
432
433static void lease_break_callback(struct file_lock *fl)
434{
435 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
436}
437
438static void lease_release_private_callback(struct file_lock *fl)
439{
440 if (!fl->fl_file)
441 return;
442
443 f_delown(fl->fl_file);
444 fl->fl_file->f_owner.signum = 0;
445}
446
447static const struct lock_manager_operations lease_manager_ops = {
448 .fl_break = lease_break_callback,
449 .fl_release_private = lease_release_private_callback,
450 .fl_change = lease_modify,
451};
452
453
454
455
456static int lease_init(struct file *filp, int type, struct file_lock *fl)
457 {
458 if (assign_type(fl, type) != 0)
459 return -EINVAL;
460
461 fl->fl_owner = current->files;
462 fl->fl_pid = current->tgid;
463
464 fl->fl_file = filp;
465 fl->fl_flags = FL_LEASE;
466 fl->fl_start = 0;
467 fl->fl_end = OFFSET_MAX;
468 fl->fl_ops = NULL;
469 fl->fl_lmops = &lease_manager_ops;
470 return 0;
471}
472
473
474static struct file_lock *lease_alloc(struct file *filp, int type)
475{
476 struct file_lock *fl = locks_alloc_lock();
477 int error = -ENOMEM;
478
479 if (fl == NULL)
480 return ERR_PTR(error);
481
482 error = lease_init(filp, type, fl);
483 if (error) {
484 locks_free_lock(fl);
485 return ERR_PTR(error);
486 }
487 return fl;
488}
489
490
491
492static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
493{
494 return ((fl1->fl_end >= fl2->fl_start) &&
495 (fl2->fl_end >= fl1->fl_start));
496}
497
498
499
500
501static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
502{
503 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
504 return fl2->fl_lmops == fl1->fl_lmops &&
505 fl1->fl_lmops->fl_compare_owner(fl1, fl2);
506 return fl1->fl_owner == fl2->fl_owner;
507}
508
509
510
511
512static void __locks_delete_block(struct file_lock *waiter)
513{
514 list_del_init(&waiter->fl_block);
515 list_del_init(&waiter->fl_link);
516 waiter->fl_next = NULL;
517}
518
519
520
521static void locks_delete_block(struct file_lock *waiter)
522{
523 lock_flocks();
524 __locks_delete_block(waiter);
525 unlock_flocks();
526}
527
528
529
530
531
532
533static void locks_insert_block(struct file_lock *blocker,
534 struct file_lock *waiter)
535{
536 BUG_ON(!list_empty(&waiter->fl_block));
537 list_add_tail(&waiter->fl_block, &blocker->fl_block);
538 waiter->fl_next = blocker;
539 if (IS_POSIX(blocker))
540 list_add(&waiter->fl_link, &blocked_list);
541}
542
543
544
545
546
547static void locks_wake_up_blocks(struct file_lock *blocker)
548{
549 while (!list_empty(&blocker->fl_block)) {
550 struct file_lock *waiter;
551
552 waiter = list_first_entry(&blocker->fl_block,
553 struct file_lock, fl_block);
554 __locks_delete_block(waiter);
555 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
556 waiter->fl_lmops->fl_notify(waiter);
557 else
558 wake_up(&waiter->fl_wait);
559 }
560}
561
562
563
564
565static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
566{
567 list_add(&fl->fl_link, &file_lock_list);
568
569 fl->fl_nspid = get_pid(task_tgid(current));
570
571
572 fl->fl_next = *pos;
573 *pos = fl;
574}
575
576
577
578
579
580
581
582static void locks_delete_lock(struct file_lock **thisfl_p)
583{
584 struct file_lock *fl = *thisfl_p;
585
586 *thisfl_p = fl->fl_next;
587 fl->fl_next = NULL;
588 list_del_init(&fl->fl_link);
589
590 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
591 if (fl->fl_fasync != NULL) {
592 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
593 fl->fl_fasync = NULL;
594 }
595
596 if (fl->fl_nspid) {
597 put_pid(fl->fl_nspid);
598 fl->fl_nspid = NULL;
599 }
600
601 locks_wake_up_blocks(fl);
602 locks_free_lock(fl);
603}
604
605
606
607
608static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
609{
610 if (sys_fl->fl_type == F_WRLCK)
611 return 1;
612 if (caller_fl->fl_type == F_WRLCK)
613 return 1;
614 return 0;
615}
616
617
618
619
620static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
621{
622
623
624
625 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
626 return (0);
627
628
629 if (!locks_overlap(caller_fl, sys_fl))
630 return 0;
631
632 return (locks_conflict(caller_fl, sys_fl));
633}
634
635
636
637
638static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
639{
640
641
642
643 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
644 return (0);
645 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
646 return 0;
647
648 return (locks_conflict(caller_fl, sys_fl));
649}
650
651void
652posix_test_lock(struct file *filp, struct file_lock *fl)
653{
654 struct file_lock *cfl;
655
656 lock_flocks();
657 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
658 if (!IS_POSIX(cfl))
659 continue;
660 if (posix_locks_conflict(fl, cfl))
661 break;
662 }
663 if (cfl) {
664 __locks_copy_lock(fl, cfl);
665 if (cfl->fl_nspid)
666 fl->fl_pid = pid_vnr(cfl->fl_nspid);
667 } else
668 fl->fl_type = F_UNLCK;
669 unlock_flocks();
670 return;
671}
672EXPORT_SYMBOL(posix_test_lock);
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699#define MAX_DEADLK_ITERATIONS 10
700
701
702static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
703{
704 struct file_lock *fl;
705
706 list_for_each_entry(fl, &blocked_list, fl_link) {
707 if (posix_same_owner(fl, block_fl))
708 return fl->fl_next;
709 }
710 return NULL;
711}
712
713static int posix_locks_deadlock(struct file_lock *caller_fl,
714 struct file_lock *block_fl)
715{
716 int i = 0;
717
718 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
719 if (i++ > MAX_DEADLK_ITERATIONS)
720 return 0;
721 if (posix_same_owner(caller_fl, block_fl))
722 return 1;
723 }
724 return 0;
725}
726
727
728
729
730
731
732
733
734static int flock_lock_file(struct file *filp, struct file_lock *request)
735{
736 struct file_lock *new_fl = NULL;
737 struct file_lock **before;
738 struct inode * inode = filp->f_path.dentry->d_inode;
739 int error = 0;
740 int found = 0;
741
742 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
743 new_fl = locks_alloc_lock();
744 if (!new_fl)
745 return -ENOMEM;
746 }
747
748 lock_flocks();
749 if (request->fl_flags & FL_ACCESS)
750 goto find_conflict;
751
752 for_each_lock(inode, before) {
753 struct file_lock *fl = *before;
754 if (IS_POSIX(fl))
755 break;
756 if (IS_LEASE(fl))
757 continue;
758 if (filp != fl->fl_file)
759 continue;
760 if (request->fl_type == fl->fl_type)
761 goto out;
762 found = 1;
763 locks_delete_lock(before);
764 break;
765 }
766
767 if (request->fl_type == F_UNLCK) {
768 if ((request->fl_flags & FL_EXISTS) && !found)
769 error = -ENOENT;
770 goto out;
771 }
772
773
774
775
776
777 if (found) {
778 unlock_flocks();
779 cond_resched();
780 lock_flocks();
781 }
782
783find_conflict:
784 for_each_lock(inode, before) {
785 struct file_lock *fl = *before;
786 if (IS_POSIX(fl))
787 break;
788 if (IS_LEASE(fl))
789 continue;
790 if (!flock_locks_conflict(request, fl))
791 continue;
792 error = -EAGAIN;
793 if (!(request->fl_flags & FL_SLEEP))
794 goto out;
795 error = FILE_LOCK_DEFERRED;
796 locks_insert_block(fl, request);
797 goto out;
798 }
799 if (request->fl_flags & FL_ACCESS)
800 goto out;
801 locks_copy_lock(new_fl, request);
802 locks_insert_lock(before, new_fl);
803 new_fl = NULL;
804 error = 0;
805
806out:
807 unlock_flocks();
808 if (new_fl)
809 locks_free_lock(new_fl);
810 return error;
811}
812
813static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
814{
815 struct file_lock *fl;
816 struct file_lock *new_fl = NULL;
817 struct file_lock *new_fl2 = NULL;
818 struct file_lock *left = NULL;
819 struct file_lock *right = NULL;
820 struct file_lock **before;
821 int error, added = 0;
822
823
824
825
826
827
828
829 if (!(request->fl_flags & FL_ACCESS) &&
830 (request->fl_type != F_UNLCK ||
831 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
832 new_fl = locks_alloc_lock();
833 new_fl2 = locks_alloc_lock();
834 }
835
836 lock_flocks();
837 if (request->fl_type != F_UNLCK) {
838 for_each_lock(inode, before) {
839 fl = *before;
840 if (!IS_POSIX(fl))
841 continue;
842 if (!posix_locks_conflict(request, fl))
843 continue;
844 if (conflock)
845 __locks_copy_lock(conflock, fl);
846 error = -EAGAIN;
847 if (!(request->fl_flags & FL_SLEEP))
848 goto out;
849 error = -EDEADLK;
850 if (posix_locks_deadlock(request, fl))
851 goto out;
852 error = FILE_LOCK_DEFERRED;
853 locks_insert_block(fl, request);
854 goto out;
855 }
856 }
857
858
859 error = 0;
860 if (request->fl_flags & FL_ACCESS)
861 goto out;
862
863
864
865
866
867 before = &inode->i_flock;
868
869
870 while ((fl = *before) && (!IS_POSIX(fl) ||
871 !posix_same_owner(request, fl))) {
872 before = &fl->fl_next;
873 }
874
875
876 while ((fl = *before) && posix_same_owner(request, fl)) {
877
878
879 if (request->fl_type == fl->fl_type) {
880
881
882
883
884 if (fl->fl_end < request->fl_start - 1)
885 goto next_lock;
886
887
888
889 if (fl->fl_start - 1 > request->fl_end)
890 break;
891
892
893
894
895
896
897 if (fl->fl_start > request->fl_start)
898 fl->fl_start = request->fl_start;
899 else
900 request->fl_start = fl->fl_start;
901 if (fl->fl_end < request->fl_end)
902 fl->fl_end = request->fl_end;
903 else
904 request->fl_end = fl->fl_end;
905 if (added) {
906 locks_delete_lock(before);
907 continue;
908 }
909 request = fl;
910 added = 1;
911 }
912 else {
913
914
915
916 if (fl->fl_end < request->fl_start)
917 goto next_lock;
918 if (fl->fl_start > request->fl_end)
919 break;
920 if (request->fl_type == F_UNLCK)
921 added = 1;
922 if (fl->fl_start < request->fl_start)
923 left = fl;
924
925
926
927 if (fl->fl_end > request->fl_end) {
928 right = fl;
929 break;
930 }
931 if (fl->fl_start >= request->fl_start) {
932
933
934
935 if (added) {
936 locks_delete_lock(before);
937 continue;
938 }
939
940
941
942
943
944 locks_wake_up_blocks(fl);
945 fl->fl_start = request->fl_start;
946 fl->fl_end = request->fl_end;
947 fl->fl_type = request->fl_type;
948 locks_release_private(fl);
949 locks_copy_private(fl, request);
950 request = fl;
951 added = 1;
952 }
953 }
954
955
956 next_lock:
957 before = &fl->fl_next;
958 }
959
960
961
962
963
964
965
966 error = -ENOLCK;
967 if (right && left == right && !new_fl2)
968 goto out;
969
970 error = 0;
971 if (!added) {
972 if (request->fl_type == F_UNLCK) {
973 if (request->fl_flags & FL_EXISTS)
974 error = -ENOENT;
975 goto out;
976 }
977
978 if (!new_fl) {
979 error = -ENOLCK;
980 goto out;
981 }
982 locks_copy_lock(new_fl, request);
983 locks_insert_lock(before, new_fl);
984 new_fl = NULL;
985 }
986 if (right) {
987 if (left == right) {
988
989
990
991 left = new_fl2;
992 new_fl2 = NULL;
993 locks_copy_lock(left, right);
994 locks_insert_lock(before, left);
995 }
996 right->fl_start = request->fl_end + 1;
997 locks_wake_up_blocks(right);
998 }
999 if (left) {
1000 left->fl_end = request->fl_start - 1;
1001 locks_wake_up_blocks(left);
1002 }
1003 out:
1004 unlock_flocks();
1005
1006
1007
1008 if (new_fl)
1009 locks_free_lock(new_fl);
1010 if (new_fl2)
1011 locks_free_lock(new_fl2);
1012 return error;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029int posix_lock_file(struct file *filp, struct file_lock *fl,
1030 struct file_lock *conflock)
1031{
1032 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1033}
1034EXPORT_SYMBOL(posix_lock_file);
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1046{
1047 int error;
1048 might_sleep ();
1049 for (;;) {
1050 error = posix_lock_file(filp, fl, NULL);
1051 if (error != FILE_LOCK_DEFERRED)
1052 break;
1053 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1054 if (!error)
1055 continue;
1056
1057 locks_delete_block(fl);
1058 break;
1059 }
1060 return error;
1061}
1062EXPORT_SYMBOL(posix_lock_file_wait);
1063
1064
1065
1066
1067
1068
1069
1070
1071int locks_mandatory_locked(struct inode *inode)
1072{
1073 fl_owner_t owner = current->files;
1074 struct file_lock *fl;
1075
1076
1077
1078
1079 lock_flocks();
1080 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1081 if (!IS_POSIX(fl))
1082 continue;
1083 if (fl->fl_owner != owner)
1084 break;
1085 }
1086 unlock_flocks();
1087 return fl ? -EAGAIN : 0;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103int locks_mandatory_area(int read_write, struct inode *inode,
1104 struct file *filp, loff_t offset,
1105 size_t count)
1106{
1107 struct file_lock fl;
1108 int error;
1109
1110 locks_init_lock(&fl);
1111 fl.fl_owner = current->files;
1112 fl.fl_pid = current->tgid;
1113 fl.fl_file = filp;
1114 fl.fl_flags = FL_POSIX | FL_ACCESS;
1115 if (filp && !(filp->f_flags & O_NONBLOCK))
1116 fl.fl_flags |= FL_SLEEP;
1117 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1118 fl.fl_start = offset;
1119 fl.fl_end = offset + count - 1;
1120
1121 for (;;) {
1122 error = __posix_lock_file(inode, &fl, NULL);
1123 if (error != FILE_LOCK_DEFERRED)
1124 break;
1125 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1126 if (!error) {
1127
1128
1129
1130
1131 if (__mandatory_lock(inode))
1132 continue;
1133 }
1134
1135 locks_delete_block(&fl);
1136 break;
1137 }
1138
1139 return error;
1140}
1141
1142EXPORT_SYMBOL(locks_mandatory_area);
1143
1144
1145int lease_modify(struct file_lock **before, int arg)
1146{
1147 struct file_lock *fl = *before;
1148 int error = assign_type(fl, arg);
1149
1150 if (error)
1151 return error;
1152 locks_wake_up_blocks(fl);
1153 if (arg == F_UNLCK)
1154 locks_delete_lock(before);
1155 return 0;
1156}
1157
1158EXPORT_SYMBOL(lease_modify);
1159
1160static void time_out_leases(struct inode *inode)
1161{
1162 struct file_lock **before;
1163 struct file_lock *fl;
1164
1165 before = &inode->i_flock;
1166 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1167 if ((fl->fl_break_time == 0)
1168 || time_before(jiffies, fl->fl_break_time)) {
1169 before = &fl->fl_next;
1170 continue;
1171 }
1172 lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1173 if (fl == *before)
1174 before = &fl->fl_next;
1175 }
1176}
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188int __break_lease(struct inode *inode, unsigned int mode)
1189{
1190 int error = 0, future;
1191 struct file_lock *new_fl, *flock;
1192 struct file_lock *fl;
1193 unsigned long break_time;
1194 int i_have_this_lease = 0;
1195 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1196
1197 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1198
1199 lock_flocks();
1200
1201 time_out_leases(inode);
1202
1203 flock = inode->i_flock;
1204 if ((flock == NULL) || !IS_LEASE(flock))
1205 goto out;
1206
1207 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1208 if (fl->fl_owner == current->files)
1209 i_have_this_lease = 1;
1210
1211 if (want_write) {
1212
1213 future = F_UNLCK | F_INPROGRESS;
1214 } else if (flock->fl_type & F_INPROGRESS) {
1215
1216 future = flock->fl_type;
1217 } else if (flock->fl_type & F_WRLCK) {
1218
1219 future = F_RDLCK | F_INPROGRESS;
1220 } else {
1221
1222 goto out;
1223 }
1224
1225 if (IS_ERR(new_fl) && !i_have_this_lease
1226 && ((mode & O_NONBLOCK) == 0)) {
1227 error = PTR_ERR(new_fl);
1228 goto out;
1229 }
1230
1231 break_time = 0;
1232 if (lease_break_time > 0) {
1233 break_time = jiffies + lease_break_time * HZ;
1234 if (break_time == 0)
1235 break_time++;
1236 }
1237
1238 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1239 if (fl->fl_type != future) {
1240 fl->fl_type = future;
1241 fl->fl_break_time = break_time;
1242
1243 fl->fl_lmops->fl_break(fl);
1244 }
1245 }
1246
1247 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1248 error = -EWOULDBLOCK;
1249 goto out;
1250 }
1251
1252restart:
1253 break_time = flock->fl_break_time;
1254 if (break_time != 0) {
1255 break_time -= jiffies;
1256 if (break_time == 0)
1257 break_time++;
1258 }
1259 locks_insert_block(flock, new_fl);
1260 unlock_flocks();
1261 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1262 !new_fl->fl_next, break_time);
1263 lock_flocks();
1264 __locks_delete_block(new_fl);
1265 if (error >= 0) {
1266 if (error == 0)
1267 time_out_leases(inode);
1268
1269 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1270 flock = flock->fl_next) {
1271 if (flock->fl_type & F_INPROGRESS)
1272 goto restart;
1273 }
1274 error = 0;
1275 }
1276
1277out:
1278 unlock_flocks();
1279 if (!IS_ERR(new_fl))
1280 locks_free_lock(new_fl);
1281 return error;
1282}
1283
1284EXPORT_SYMBOL(__break_lease);
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295void lease_get_mtime(struct inode *inode, struct timespec *time)
1296{
1297 struct file_lock *flock = inode->i_flock;
1298 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1299 *time = current_fs_time(inode->i_sb);
1300 else
1301 *time = inode->i_mtime;
1302}
1303
1304EXPORT_SYMBOL(lease_get_mtime);
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329int fcntl_getlease(struct file *filp)
1330{
1331 struct file_lock *fl;
1332 int type = F_UNLCK;
1333
1334 lock_flocks();
1335 time_out_leases(filp->f_path.dentry->d_inode);
1336 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1337 fl = fl->fl_next) {
1338 if (fl->fl_file == filp) {
1339 type = fl->fl_type & ~F_INPROGRESS;
1340 break;
1341 }
1342 }
1343 unlock_flocks();
1344 return type;
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1359{
1360 struct file_lock *fl, **before, **my_before = NULL, *lease;
1361 struct dentry *dentry = filp->f_path.dentry;
1362 struct inode *inode = dentry->d_inode;
1363 int error, rdlease_count = 0, wrlease_count = 0;
1364
1365 lease = *flp;
1366
1367 error = -EACCES;
1368 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1369 goto out;
1370 error = -EINVAL;
1371 if (!S_ISREG(inode->i_mode))
1372 goto out;
1373 error = security_file_lock(filp, arg);
1374 if (error)
1375 goto out;
1376
1377 time_out_leases(inode);
1378
1379 BUG_ON(!(*flp)->fl_lmops->fl_break);
1380
1381 if (arg != F_UNLCK) {
1382 error = -EAGAIN;
1383 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1384 goto out;
1385 if ((arg == F_WRLCK)
1386 && ((dentry->d_count > 1)
1387 || (atomic_read(&inode->i_count) > 1)))
1388 goto out;
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399 for (before = &inode->i_flock;
1400 ((fl = *before) != NULL) && IS_LEASE(fl);
1401 before = &fl->fl_next) {
1402 if (fl->fl_file == filp)
1403 my_before = before;
1404 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1405
1406
1407
1408
1409
1410 wrlease_count++;
1411 else
1412 rdlease_count++;
1413 }
1414
1415 error = -EAGAIN;
1416 if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1417 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1418 goto out;
1419
1420 if (my_before != NULL) {
1421 error = lease->fl_lmops->fl_change(my_before, arg);
1422 if (!error)
1423 *flp = *my_before;
1424 goto out;
1425 }
1426
1427 if (arg == F_UNLCK)
1428 goto out;
1429
1430 error = -EINVAL;
1431 if (!leases_enable)
1432 goto out;
1433
1434 locks_insert_lock(before, lease);
1435 return 0;
1436
1437out:
1438 return error;
1439}
1440EXPORT_SYMBOL(generic_setlease);
1441
1442static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1443{
1444 if (filp->f_op && filp->f_op->setlease)
1445 return filp->f_op->setlease(filp, arg, lease);
1446 else
1447 return generic_setlease(filp, arg, lease);
1448}
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1478{
1479 int error;
1480
1481 lock_flocks();
1482 error = __vfs_setlease(filp, arg, lease);
1483 unlock_flocks();
1484
1485 return error;
1486}
1487EXPORT_SYMBOL_GPL(vfs_setlease);
1488
1489static int do_fcntl_delete_lease(struct file *filp)
1490{
1491 struct file_lock fl, *flp = &fl;
1492
1493 lease_init(filp, F_UNLCK, flp);
1494
1495 return vfs_setlease(filp, F_UNLCK, &flp);
1496}
1497
1498static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1499{
1500 struct file_lock *fl, *ret;
1501 struct fasync_struct *new;
1502 int error;
1503
1504 fl = lease_alloc(filp, arg);
1505 if (IS_ERR(fl))
1506 return PTR_ERR(fl);
1507
1508 new = fasync_alloc();
1509 if (!new) {
1510 locks_free_lock(fl);
1511 return -ENOMEM;
1512 }
1513 ret = fl;
1514 lock_flocks();
1515 error = __vfs_setlease(filp, arg, &ret);
1516 if (error) {
1517 unlock_flocks();
1518 locks_free_lock(fl);
1519 goto out_free_fasync;
1520 }
1521 if (ret != fl)
1522 locks_free_lock(fl);
1523
1524
1525
1526
1527
1528
1529
1530 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1531 new = NULL;
1532
1533 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1534 unlock_flocks();
1535
1536out_free_fasync:
1537 if (new)
1538 fasync_free(new);
1539 return error;
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1553{
1554 if (arg == F_UNLCK)
1555 return do_fcntl_delete_lease(filp);
1556 return do_fcntl_add_lease(fd, filp, arg);
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1567{
1568 int error;
1569 might_sleep();
1570 for (;;) {
1571 error = flock_lock_file(filp, fl);
1572 if (error != FILE_LOCK_DEFERRED)
1573 break;
1574 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1575 if (!error)
1576 continue;
1577
1578 locks_delete_block(fl);
1579 break;
1580 }
1581 return error;
1582}
1583
1584EXPORT_SYMBOL(flock_lock_file_wait);
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1606{
1607 struct file *filp;
1608 struct file_lock *lock;
1609 int can_sleep, unlock;
1610 int error;
1611
1612 error = -EBADF;
1613 filp = fget(fd);
1614 if (!filp)
1615 goto out;
1616
1617 can_sleep = !(cmd & LOCK_NB);
1618 cmd &= ~LOCK_NB;
1619 unlock = (cmd == LOCK_UN);
1620
1621 if (!unlock && !(cmd & LOCK_MAND) &&
1622 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1623 goto out_putf;
1624
1625 error = flock_make_lock(filp, &lock, cmd);
1626 if (error)
1627 goto out_putf;
1628 if (can_sleep)
1629 lock->fl_flags |= FL_SLEEP;
1630
1631 error = security_file_lock(filp, lock->fl_type);
1632 if (error)
1633 goto out_free;
1634
1635 if (filp->f_op && filp->f_op->flock)
1636 error = filp->f_op->flock(filp,
1637 (can_sleep) ? F_SETLKW : F_SETLK,
1638 lock);
1639 else
1640 error = flock_lock_file_wait(filp, lock);
1641
1642 out_free:
1643 locks_free_lock(lock);
1644
1645 out_putf:
1646 fput(filp);
1647 out:
1648 return error;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659int vfs_test_lock(struct file *filp, struct file_lock *fl)
1660{
1661 if (filp->f_op && filp->f_op->lock)
1662 return filp->f_op->lock(filp, F_GETLK, fl);
1663 posix_test_lock(filp, fl);
1664 return 0;
1665}
1666EXPORT_SYMBOL_GPL(vfs_test_lock);
1667
1668static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1669{
1670 flock->l_pid = fl->fl_pid;
1671#if BITS_PER_LONG == 32
1672
1673
1674
1675
1676 if (fl->fl_start > OFFT_OFFSET_MAX)
1677 return -EOVERFLOW;
1678 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1679 return -EOVERFLOW;
1680#endif
1681 flock->l_start = fl->fl_start;
1682 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1683 fl->fl_end - fl->fl_start + 1;
1684 flock->l_whence = 0;
1685 flock->l_type = fl->fl_type;
1686 return 0;
1687}
1688
1689#if BITS_PER_LONG == 32
1690static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1691{
1692 flock->l_pid = fl->fl_pid;
1693 flock->l_start = fl->fl_start;
1694 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1695 fl->fl_end - fl->fl_start + 1;
1696 flock->l_whence = 0;
1697 flock->l_type = fl->fl_type;
1698}
1699#endif
1700
1701
1702
1703
1704int fcntl_getlk(struct file *filp, struct flock __user *l)
1705{
1706 struct file_lock file_lock;
1707 struct flock flock;
1708 int error;
1709
1710 error = -EFAULT;
1711 if (copy_from_user(&flock, l, sizeof(flock)))
1712 goto out;
1713 error = -EINVAL;
1714 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1715 goto out;
1716
1717 error = flock_to_posix_lock(filp, &file_lock, &flock);
1718 if (error)
1719 goto out;
1720
1721 error = vfs_test_lock(filp, &file_lock);
1722 if (error)
1723 goto out;
1724
1725 flock.l_type = file_lock.fl_type;
1726 if (file_lock.fl_type != F_UNLCK) {
1727 error = posix_lock_to_flock(&flock, &file_lock);
1728 if (error)
1729 goto out;
1730 }
1731 error = -EFAULT;
1732 if (!copy_to_user(l, &flock, sizeof(flock)))
1733 error = 0;
1734out:
1735 return error;
1736}
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1772{
1773 if (filp->f_op && filp->f_op->lock)
1774 return filp->f_op->lock(filp, cmd, fl);
1775 else
1776 return posix_lock_file(filp, fl, conf);
1777}
1778EXPORT_SYMBOL_GPL(vfs_lock_file);
1779
1780static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1781 struct file_lock *fl)
1782{
1783 int error;
1784
1785 error = security_file_lock(filp, fl->fl_type);
1786 if (error)
1787 return error;
1788
1789 for (;;) {
1790 error = vfs_lock_file(filp, cmd, fl, NULL);
1791 if (error != FILE_LOCK_DEFERRED)
1792 break;
1793 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1794 if (!error)
1795 continue;
1796
1797 locks_delete_block(fl);
1798 break;
1799 }
1800
1801 return error;
1802}
1803
1804
1805
1806
1807int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1808 struct flock __user *l)
1809{
1810 struct file_lock *file_lock = locks_alloc_lock();
1811 struct flock flock;
1812 struct inode *inode;
1813 struct file *f;
1814 int error;
1815
1816 if (file_lock == NULL)
1817 return -ENOLCK;
1818
1819
1820
1821
1822 error = -EFAULT;
1823 if (copy_from_user(&flock, l, sizeof(flock)))
1824 goto out;
1825
1826 inode = filp->f_path.dentry->d_inode;
1827
1828
1829
1830
1831 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1832 error = -EAGAIN;
1833 goto out;
1834 }
1835
1836again:
1837 error = flock_to_posix_lock(filp, file_lock, &flock);
1838 if (error)
1839 goto out;
1840 if (cmd == F_SETLKW) {
1841 file_lock->fl_flags |= FL_SLEEP;
1842 }
1843
1844 error = -EBADF;
1845 switch (flock.l_type) {
1846 case F_RDLCK:
1847 if (!(filp->f_mode & FMODE_READ))
1848 goto out;
1849 break;
1850 case F_WRLCK:
1851 if (!(filp->f_mode & FMODE_WRITE))
1852 goto out;
1853 break;
1854 case F_UNLCK:
1855 break;
1856 default:
1857 error = -EINVAL;
1858 goto out;
1859 }
1860
1861 error = do_lock_file_wait(filp, cmd, file_lock);
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872 spin_lock(¤t->files->file_lock);
1873 f = fcheck(fd);
1874 spin_unlock(¤t->files->file_lock);
1875 if (!error && f != filp && flock.l_type != F_UNLCK) {
1876 flock.l_type = F_UNLCK;
1877 goto again;
1878 }
1879
1880out:
1881 locks_free_lock(file_lock);
1882 return error;
1883}
1884
1885#if BITS_PER_LONG == 32
1886
1887
1888
1889int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1890{
1891 struct file_lock file_lock;
1892 struct flock64 flock;
1893 int error;
1894
1895 error = -EFAULT;
1896 if (copy_from_user(&flock, l, sizeof(flock)))
1897 goto out;
1898 error = -EINVAL;
1899 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1900 goto out;
1901
1902 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1903 if (error)
1904 goto out;
1905
1906 error = vfs_test_lock(filp, &file_lock);
1907 if (error)
1908 goto out;
1909
1910 flock.l_type = file_lock.fl_type;
1911 if (file_lock.fl_type != F_UNLCK)
1912 posix_lock_to_flock64(&flock, &file_lock);
1913
1914 error = -EFAULT;
1915 if (!copy_to_user(l, &flock, sizeof(flock)))
1916 error = 0;
1917
1918out:
1919 return error;
1920}
1921
1922
1923
1924
1925int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1926 struct flock64 __user *l)
1927{
1928 struct file_lock *file_lock = locks_alloc_lock();
1929 struct flock64 flock;
1930 struct inode *inode;
1931 struct file *f;
1932 int error;
1933
1934 if (file_lock == NULL)
1935 return -ENOLCK;
1936
1937
1938
1939
1940 error = -EFAULT;
1941 if (copy_from_user(&flock, l, sizeof(flock)))
1942 goto out;
1943
1944 inode = filp->f_path.dentry->d_inode;
1945
1946
1947
1948
1949 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1950 error = -EAGAIN;
1951 goto out;
1952 }
1953
1954again:
1955 error = flock64_to_posix_lock(filp, file_lock, &flock);
1956 if (error)
1957 goto out;
1958 if (cmd == F_SETLKW64) {
1959 file_lock->fl_flags |= FL_SLEEP;
1960 }
1961
1962 error = -EBADF;
1963 switch (flock.l_type) {
1964 case F_RDLCK:
1965 if (!(filp->f_mode & FMODE_READ))
1966 goto out;
1967 break;
1968 case F_WRLCK:
1969 if (!(filp->f_mode & FMODE_WRITE))
1970 goto out;
1971 break;
1972 case F_UNLCK:
1973 break;
1974 default:
1975 error = -EINVAL;
1976 goto out;
1977 }
1978
1979 error = do_lock_file_wait(filp, cmd, file_lock);
1980
1981
1982
1983
1984
1985 spin_lock(¤t->files->file_lock);
1986 f = fcheck(fd);
1987 spin_unlock(¤t->files->file_lock);
1988 if (!error && f != filp && flock.l_type != F_UNLCK) {
1989 flock.l_type = F_UNLCK;
1990 goto again;
1991 }
1992
1993out:
1994 locks_free_lock(file_lock);
1995 return error;
1996}
1997#endif
1998
1999
2000
2001
2002
2003
2004void locks_remove_posix(struct file *filp, fl_owner_t owner)
2005{
2006 struct file_lock lock;
2007
2008
2009
2010
2011
2012
2013 if (!filp->f_path.dentry->d_inode->i_flock)
2014 return;
2015
2016 lock.fl_type = F_UNLCK;
2017 lock.fl_flags = FL_POSIX | FL_CLOSE;
2018 lock.fl_start = 0;
2019 lock.fl_end = OFFSET_MAX;
2020 lock.fl_owner = owner;
2021 lock.fl_pid = current->tgid;
2022 lock.fl_file = filp;
2023 lock.fl_ops = NULL;
2024 lock.fl_lmops = NULL;
2025
2026 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2027
2028 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2029 lock.fl_ops->fl_release_private(&lock);
2030}
2031
2032EXPORT_SYMBOL(locks_remove_posix);
2033
2034
2035
2036
2037void locks_remove_flock(struct file *filp)
2038{
2039 struct inode * inode = filp->f_path.dentry->d_inode;
2040 struct file_lock *fl;
2041 struct file_lock **before;
2042
2043 if (!inode->i_flock)
2044 return;
2045
2046 if (filp->f_op && filp->f_op->flock) {
2047 struct file_lock fl = {
2048 .fl_pid = current->tgid,
2049 .fl_file = filp,
2050 .fl_flags = FL_FLOCK,
2051 .fl_type = F_UNLCK,
2052 .fl_end = OFFSET_MAX,
2053 };
2054 filp->f_op->flock(filp, F_SETLKW, &fl);
2055 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2056 fl.fl_ops->fl_release_private(&fl);
2057 }
2058
2059 lock_flocks();
2060 before = &inode->i_flock;
2061
2062 while ((fl = *before) != NULL) {
2063 if (fl->fl_file == filp) {
2064 if (IS_FLOCK(fl)) {
2065 locks_delete_lock(before);
2066 continue;
2067 }
2068 if (IS_LEASE(fl)) {
2069 lease_modify(before, F_UNLCK);
2070 continue;
2071 }
2072
2073 BUG();
2074 }
2075 before = &fl->fl_next;
2076 }
2077 unlock_flocks();
2078}
2079
2080
2081
2082
2083
2084
2085
2086
2087int
2088posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2089{
2090 int status = 0;
2091
2092 lock_flocks();
2093 if (waiter->fl_next)
2094 __locks_delete_block(waiter);
2095 else
2096 status = -ENOENT;
2097 unlock_flocks();
2098 return status;
2099}
2100
2101EXPORT_SYMBOL(posix_unblock_lock);
2102
2103
2104
2105
2106
2107
2108
2109
2110int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2111{
2112 if (filp->f_op && filp->f_op->lock)
2113 return filp->f_op->lock(filp, F_CANCELLK, fl);
2114 return 0;
2115}
2116
2117EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2118
2119#ifdef CONFIG_PROC_FS
2120#include <linux/proc_fs.h>
2121#include <linux/seq_file.h>
2122
2123static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2124 loff_t id, char *pfx)
2125{
2126 struct inode *inode = NULL;
2127 unsigned int fl_pid;
2128
2129 if (fl->fl_nspid)
2130 fl_pid = pid_vnr(fl->fl_nspid);
2131 else
2132 fl_pid = fl->fl_pid;
2133
2134 if (fl->fl_file != NULL)
2135 inode = fl->fl_file->f_path.dentry->d_inode;
2136
2137 seq_printf(f, "%lld:%s ", id, pfx);
2138 if (IS_POSIX(fl)) {
2139 seq_printf(f, "%6s %s ",
2140 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2141 (inode == NULL) ? "*NOINODE*" :
2142 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2143 } else if (IS_FLOCK(fl)) {
2144 if (fl->fl_type & LOCK_MAND) {
2145 seq_printf(f, "FLOCK MSNFS ");
2146 } else {
2147 seq_printf(f, "FLOCK ADVISORY ");
2148 }
2149 } else if (IS_LEASE(fl)) {
2150 seq_printf(f, "LEASE ");
2151 if (fl->fl_type & F_INPROGRESS)
2152 seq_printf(f, "BREAKING ");
2153 else if (fl->fl_file)
2154 seq_printf(f, "ACTIVE ");
2155 else
2156 seq_printf(f, "BREAKER ");
2157 } else {
2158 seq_printf(f, "UNKNOWN UNKNOWN ");
2159 }
2160 if (fl->fl_type & LOCK_MAND) {
2161 seq_printf(f, "%s ",
2162 (fl->fl_type & LOCK_READ)
2163 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2164 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2165 } else {
2166 seq_printf(f, "%s ",
2167 (fl->fl_type & F_INPROGRESS)
2168 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2169 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2170 }
2171 if (inode) {
2172#ifdef WE_CAN_BREAK_LSLK_NOW
2173 seq_printf(f, "%d %s:%ld ", fl_pid,
2174 inode->i_sb->s_id, inode->i_ino);
2175#else
2176
2177 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2178 MAJOR(inode->i_sb->s_dev),
2179 MINOR(inode->i_sb->s_dev), inode->i_ino);
2180#endif
2181 } else {
2182 seq_printf(f, "%d <none>:0 ", fl_pid);
2183 }
2184 if (IS_POSIX(fl)) {
2185 if (fl->fl_end == OFFSET_MAX)
2186 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2187 else
2188 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2189 } else {
2190 seq_printf(f, "0 EOF\n");
2191 }
2192}
2193
2194static int locks_show(struct seq_file *f, void *v)
2195{
2196 struct file_lock *fl, *bfl;
2197
2198 fl = list_entry(v, struct file_lock, fl_link);
2199
2200 lock_get_status(f, fl, *((loff_t *)f->private), "");
2201
2202 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2203 lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2204
2205 return 0;
2206}
2207
2208static void *locks_start(struct seq_file *f, loff_t *pos)
2209{
2210 loff_t *p = f->private;
2211
2212 lock_flocks();
2213 *p = (*pos + 1);
2214 return seq_list_start(&file_lock_list, *pos);
2215}
2216
2217static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2218{
2219 loff_t *p = f->private;
2220 ++*p;
2221 return seq_list_next(v, &file_lock_list, pos);
2222}
2223
2224static void locks_stop(struct seq_file *f, void *v)
2225{
2226 unlock_flocks();
2227}
2228
2229static const struct seq_operations locks_seq_operations = {
2230 .start = locks_start,
2231 .next = locks_next,
2232 .stop = locks_stop,
2233 .show = locks_show,
2234};
2235
2236static int locks_open(struct inode *inode, struct file *filp)
2237{
2238 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2239}
2240
2241static const struct file_operations proc_locks_operations = {
2242 .open = locks_open,
2243 .read = seq_read,
2244 .llseek = seq_lseek,
2245 .release = seq_release_private,
2246};
2247
2248static int __init proc_locks_init(void)
2249{
2250 proc_create("locks", 0, NULL, &proc_locks_operations);
2251 return 0;
2252}
2253module_init(proc_locks_init);
2254#endif
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2270{
2271 struct file_lock *fl;
2272 int result = 1;
2273 lock_flocks();
2274 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2275 if (IS_POSIX(fl)) {
2276 if (fl->fl_type == F_RDLCK)
2277 continue;
2278 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2279 continue;
2280 } else if (IS_FLOCK(fl)) {
2281 if (!(fl->fl_type & LOCK_MAND))
2282 continue;
2283 if (fl->fl_type & LOCK_READ)
2284 continue;
2285 } else
2286 continue;
2287 result = 0;
2288 break;
2289 }
2290 unlock_flocks();
2291 return result;
2292}
2293
2294EXPORT_SYMBOL(lock_may_read);
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2310{
2311 struct file_lock *fl;
2312 int result = 1;
2313 lock_flocks();
2314 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2315 if (IS_POSIX(fl)) {
2316 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2317 continue;
2318 } else if (IS_FLOCK(fl)) {
2319 if (!(fl->fl_type & LOCK_MAND))
2320 continue;
2321 if (fl->fl_type & LOCK_WRITE)
2322 continue;
2323 } else
2324 continue;
2325 result = 0;
2326 break;
2327 }
2328 unlock_flocks();
2329 return result;
2330}
2331
2332EXPORT_SYMBOL(lock_may_write);
2333
2334static int __init filelock_init(void)
2335{
2336 filelock_cache = kmem_cache_create("file_lock_cache",
2337 sizeof(struct file_lock), 0, SLAB_PANIC,
2338 init_once);
2339 return 0;
2340}
2341
2342core_initcall(filelock_init);
2343