1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/syscalls.h>
126#include <linux/time.h>
127#include <linux/rcupdate.h>
128#include <linux/pid_namespace.h>
129
130#include <asm/uaccess.h>
131
132#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
135
136int leases_enable = 1;
137int lease_break_time = 45;
138
139#define for_each_lock(inode, lockp) \
140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
141
142static LIST_HEAD(file_lock_list);
143static LIST_HEAD(blocked_list);
144static DEFINE_SPINLOCK(file_lock_lock);
145
146
147
148
149void lock_flocks(void)
150{
151 spin_lock(&file_lock_lock);
152}
153EXPORT_SYMBOL_GPL(lock_flocks);
154
155void unlock_flocks(void)
156{
157 spin_unlock(&file_lock_lock);
158}
159EXPORT_SYMBOL_GPL(unlock_flocks);
160
161static struct kmem_cache *filelock_cache __read_mostly;
162
163
164struct file_lock *locks_alloc_lock(void)
165{
166 return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
167}
168EXPORT_SYMBOL_GPL(locks_alloc_lock);
169
170void locks_release_private(struct file_lock *fl)
171{
172 if (fl->fl_ops) {
173 if (fl->fl_ops->fl_release_private)
174 fl->fl_ops->fl_release_private(fl);
175 fl->fl_ops = NULL;
176 }
177 if (fl->fl_lmops) {
178 if (fl->fl_lmops->fl_release_private)
179 fl->fl_lmops->fl_release_private(fl);
180 fl->fl_lmops = NULL;
181 }
182
183}
184EXPORT_SYMBOL_GPL(locks_release_private);
185
186
187void locks_free_lock(struct file_lock *fl)
188{
189 BUG_ON(waitqueue_active(&fl->fl_wait));
190 BUG_ON(!list_empty(&fl->fl_block));
191 BUG_ON(!list_empty(&fl->fl_link));
192
193 locks_release_private(fl);
194 kmem_cache_free(filelock_cache, fl);
195}
196EXPORT_SYMBOL(locks_free_lock);
197
198void locks_init_lock(struct file_lock *fl)
199{
200 INIT_LIST_HEAD(&fl->fl_link);
201 INIT_LIST_HEAD(&fl->fl_block);
202 init_waitqueue_head(&fl->fl_wait);
203 fl->fl_next = NULL;
204 fl->fl_fasync = NULL;
205 fl->fl_owner = NULL;
206 fl->fl_pid = 0;
207 fl->fl_nspid = NULL;
208 fl->fl_file = NULL;
209 fl->fl_flags = 0;
210 fl->fl_type = 0;
211 fl->fl_start = fl->fl_end = 0;
212 fl->fl_ops = NULL;
213 fl->fl_lmops = NULL;
214}
215
216EXPORT_SYMBOL(locks_init_lock);
217
218
219
220
221
222static void init_once(void *foo)
223{
224 struct file_lock *lock = (struct file_lock *) foo;
225
226 locks_init_lock(lock);
227}
228
229static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
230{
231 if (fl->fl_ops) {
232 if (fl->fl_ops->fl_copy_lock)
233 fl->fl_ops->fl_copy_lock(new, fl);
234 new->fl_ops = fl->fl_ops;
235 }
236 if (fl->fl_lmops)
237 new->fl_lmops = fl->fl_lmops;
238}
239
240
241
242
243void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
244{
245 new->fl_owner = fl->fl_owner;
246 new->fl_pid = fl->fl_pid;
247 new->fl_file = NULL;
248 new->fl_flags = fl->fl_flags;
249 new->fl_type = fl->fl_type;
250 new->fl_start = fl->fl_start;
251 new->fl_end = fl->fl_end;
252 new->fl_ops = NULL;
253 new->fl_lmops = NULL;
254}
255EXPORT_SYMBOL(__locks_copy_lock);
256
257void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
258{
259 locks_release_private(new);
260
261 __locks_copy_lock(new, fl);
262 new->fl_file = fl->fl_file;
263 new->fl_ops = fl->fl_ops;
264 new->fl_lmops = fl->fl_lmops;
265
266 locks_copy_private(new, fl);
267}
268
269EXPORT_SYMBOL(locks_copy_lock);
270
271static inline int flock_translate_cmd(int cmd) {
272 if (cmd & LOCK_MAND)
273 return cmd & (LOCK_MAND | LOCK_RW);
274 switch (cmd) {
275 case LOCK_SH:
276 return F_RDLCK;
277 case LOCK_EX:
278 return F_WRLCK;
279 case LOCK_UN:
280 return F_UNLCK;
281 }
282 return -EINVAL;
283}
284
285
286static int flock_make_lock(struct file *filp, struct file_lock **lock,
287 unsigned int cmd)
288{
289 struct file_lock *fl;
290 int type = flock_translate_cmd(cmd);
291 if (type < 0)
292 return type;
293
294 fl = locks_alloc_lock();
295 if (fl == NULL)
296 return -ENOMEM;
297
298 fl->fl_file = filp;
299 fl->fl_pid = current->tgid;
300 fl->fl_flags = FL_FLOCK;
301 fl->fl_type = type;
302 fl->fl_end = OFFSET_MAX;
303
304 *lock = fl;
305 return 0;
306}
307
308static int assign_type(struct file_lock *fl, int type)
309{
310 switch (type) {
311 case F_RDLCK:
312 case F_WRLCK:
313 case F_UNLCK:
314 fl->fl_type = type;
315 break;
316 default:
317 return -EINVAL;
318 }
319 return 0;
320}
321
322
323
324
325static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
326 struct flock *l)
327{
328 off_t start, end;
329
330 switch (l->l_whence) {
331 case SEEK_SET:
332 start = 0;
333 break;
334 case SEEK_CUR:
335 start = filp->f_pos;
336 break;
337 case SEEK_END:
338 start = i_size_read(filp->f_path.dentry->d_inode);
339 break;
340 default:
341 return -EINVAL;
342 }
343
344
345
346 start += l->l_start;
347 if (start < 0)
348 return -EINVAL;
349 fl->fl_end = OFFSET_MAX;
350 if (l->l_len > 0) {
351 end = start + l->l_len - 1;
352 fl->fl_end = end;
353 } else if (l->l_len < 0) {
354 end = start - 1;
355 fl->fl_end = end;
356 start += l->l_len;
357 if (start < 0)
358 return -EINVAL;
359 }
360 fl->fl_start = start;
361 if (fl->fl_end < fl->fl_start)
362 return -EOVERFLOW;
363
364 fl->fl_owner = current->files;
365 fl->fl_pid = current->tgid;
366 fl->fl_file = filp;
367 fl->fl_flags = FL_POSIX;
368 fl->fl_ops = NULL;
369 fl->fl_lmops = NULL;
370
371 return assign_type(fl, l->l_type);
372}
373
374#if BITS_PER_LONG == 32
375static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
376 struct flock64 *l)
377{
378 loff_t start;
379
380 switch (l->l_whence) {
381 case SEEK_SET:
382 start = 0;
383 break;
384 case SEEK_CUR:
385 start = filp->f_pos;
386 break;
387 case SEEK_END:
388 start = i_size_read(filp->f_path.dentry->d_inode);
389 break;
390 default:
391 return -EINVAL;
392 }
393
394 start += l->l_start;
395 if (start < 0)
396 return -EINVAL;
397 fl->fl_end = OFFSET_MAX;
398 if (l->l_len > 0) {
399 fl->fl_end = start + l->l_len - 1;
400 } else if (l->l_len < 0) {
401 fl->fl_end = start - 1;
402 start += l->l_len;
403 if (start < 0)
404 return -EINVAL;
405 }
406 fl->fl_start = start;
407 if (fl->fl_end < fl->fl_start)
408 return -EOVERFLOW;
409
410 fl->fl_owner = current->files;
411 fl->fl_pid = current->tgid;
412 fl->fl_file = filp;
413 fl->fl_flags = FL_POSIX;
414 fl->fl_ops = NULL;
415 fl->fl_lmops = NULL;
416
417 return assign_type(fl, l->l_type);
418}
419#endif
420
421
422static void lease_break_callback(struct file_lock *fl)
423{
424 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
425}
426
427static void lease_release_private_callback(struct file_lock *fl)
428{
429 if (!fl->fl_file)
430 return;
431
432 f_delown(fl->fl_file);
433 fl->fl_file->f_owner.signum = 0;
434}
435
436static const struct lock_manager_operations lease_manager_ops = {
437 .fl_break = lease_break_callback,
438 .fl_release_private = lease_release_private_callback,
439 .fl_change = lease_modify,
440};
441
442
443
444
445static int lease_init(struct file *filp, int type, struct file_lock *fl)
446 {
447 if (assign_type(fl, type) != 0)
448 return -EINVAL;
449
450 fl->fl_owner = current->files;
451 fl->fl_pid = current->tgid;
452
453 fl->fl_file = filp;
454 fl->fl_flags = FL_LEASE;
455 fl->fl_start = 0;
456 fl->fl_end = OFFSET_MAX;
457 fl->fl_ops = NULL;
458 fl->fl_lmops = &lease_manager_ops;
459 return 0;
460}
461
462
463static struct file_lock *lease_alloc(struct file *filp, int type)
464{
465 struct file_lock *fl = locks_alloc_lock();
466 int error = -ENOMEM;
467
468 if (fl == NULL)
469 return ERR_PTR(error);
470
471 error = lease_init(filp, type, fl);
472 if (error) {
473 locks_free_lock(fl);
474 return ERR_PTR(error);
475 }
476 return fl;
477}
478
479
480
481static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
482{
483 return ((fl1->fl_end >= fl2->fl_start) &&
484 (fl2->fl_end >= fl1->fl_start));
485}
486
487
488
489
490static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
491{
492 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
493 return fl2->fl_lmops == fl1->fl_lmops &&
494 fl1->fl_lmops->fl_compare_owner(fl1, fl2);
495 return fl1->fl_owner == fl2->fl_owner;
496}
497
498
499
500
501static void __locks_delete_block(struct file_lock *waiter)
502{
503 list_del_init(&waiter->fl_block);
504 list_del_init(&waiter->fl_link);
505 waiter->fl_next = NULL;
506}
507
508
509
510static void locks_delete_block(struct file_lock *waiter)
511{
512 lock_flocks();
513 __locks_delete_block(waiter);
514 unlock_flocks();
515}
516
517
518
519
520
521
522static void locks_insert_block(struct file_lock *blocker,
523 struct file_lock *waiter)
524{
525 BUG_ON(!list_empty(&waiter->fl_block));
526 list_add_tail(&waiter->fl_block, &blocker->fl_block);
527 waiter->fl_next = blocker;
528 if (IS_POSIX(blocker))
529 list_add(&waiter->fl_link, &blocked_list);
530}
531
532
533
534
535
536static void locks_wake_up_blocks(struct file_lock *blocker)
537{
538 while (!list_empty(&blocker->fl_block)) {
539 struct file_lock *waiter;
540
541 waiter = list_first_entry(&blocker->fl_block,
542 struct file_lock, fl_block);
543 __locks_delete_block(waiter);
544 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
545 waiter->fl_lmops->fl_notify(waiter);
546 else
547 wake_up(&waiter->fl_wait);
548 }
549}
550
551
552
553
554static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
555{
556 list_add(&fl->fl_link, &file_lock_list);
557
558 fl->fl_nspid = get_pid(task_tgid(current));
559
560
561 fl->fl_next = *pos;
562 *pos = fl;
563}
564
565
566
567
568
569
570
571static void locks_delete_lock(struct file_lock **thisfl_p)
572{
573 struct file_lock *fl = *thisfl_p;
574
575 *thisfl_p = fl->fl_next;
576 fl->fl_next = NULL;
577 list_del_init(&fl->fl_link);
578
579 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
580 if (fl->fl_fasync != NULL) {
581 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
582 fl->fl_fasync = NULL;
583 }
584
585 if (fl->fl_nspid) {
586 put_pid(fl->fl_nspid);
587 fl->fl_nspid = NULL;
588 }
589
590 locks_wake_up_blocks(fl);
591 locks_free_lock(fl);
592}
593
594
595
596
597static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
598{
599 if (sys_fl->fl_type == F_WRLCK)
600 return 1;
601 if (caller_fl->fl_type == F_WRLCK)
602 return 1;
603 return 0;
604}
605
606
607
608
609static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
610{
611
612
613
614 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
615 return (0);
616
617
618 if (!locks_overlap(caller_fl, sys_fl))
619 return 0;
620
621 return (locks_conflict(caller_fl, sys_fl));
622}
623
624
625
626
627static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
628{
629
630
631
632 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
633 return (0);
634 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
635 return 0;
636
637 return (locks_conflict(caller_fl, sys_fl));
638}
639
640void
641posix_test_lock(struct file *filp, struct file_lock *fl)
642{
643 struct file_lock *cfl;
644
645 lock_flocks();
646 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
647 if (!IS_POSIX(cfl))
648 continue;
649 if (posix_locks_conflict(fl, cfl))
650 break;
651 }
652 if (cfl) {
653 __locks_copy_lock(fl, cfl);
654 if (cfl->fl_nspid)
655 fl->fl_pid = pid_vnr(cfl->fl_nspid);
656 } else
657 fl->fl_type = F_UNLCK;
658 unlock_flocks();
659 return;
660}
661EXPORT_SYMBOL(posix_test_lock);
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688#define MAX_DEADLK_ITERATIONS 10
689
690
691static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
692{
693 struct file_lock *fl;
694
695 list_for_each_entry(fl, &blocked_list, fl_link) {
696 if (posix_same_owner(fl, block_fl))
697 return fl->fl_next;
698 }
699 return NULL;
700}
701
702static int posix_locks_deadlock(struct file_lock *caller_fl,
703 struct file_lock *block_fl)
704{
705 int i = 0;
706
707 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
708 if (i++ > MAX_DEADLK_ITERATIONS)
709 return 0;
710 if (posix_same_owner(caller_fl, block_fl))
711 return 1;
712 }
713 return 0;
714}
715
716
717
718
719
720
721
722
723static int flock_lock_file(struct file *filp, struct file_lock *request)
724{
725 struct file_lock *new_fl = NULL;
726 struct file_lock **before;
727 struct inode * inode = filp->f_path.dentry->d_inode;
728 int error = 0;
729 int found = 0;
730
731 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
732 new_fl = locks_alloc_lock();
733 if (!new_fl)
734 return -ENOMEM;
735 }
736
737 lock_flocks();
738 if (request->fl_flags & FL_ACCESS)
739 goto find_conflict;
740
741 for_each_lock(inode, before) {
742 struct file_lock *fl = *before;
743 if (IS_POSIX(fl))
744 break;
745 if (IS_LEASE(fl))
746 continue;
747 if (filp != fl->fl_file)
748 continue;
749 if (request->fl_type == fl->fl_type)
750 goto out;
751 found = 1;
752 locks_delete_lock(before);
753 break;
754 }
755
756 if (request->fl_type == F_UNLCK) {
757 if ((request->fl_flags & FL_EXISTS) && !found)
758 error = -ENOENT;
759 goto out;
760 }
761
762
763
764
765
766 if (found) {
767 unlock_flocks();
768 cond_resched();
769 lock_flocks();
770 }
771
772find_conflict:
773 for_each_lock(inode, before) {
774 struct file_lock *fl = *before;
775 if (IS_POSIX(fl))
776 break;
777 if (IS_LEASE(fl))
778 continue;
779 if (!flock_locks_conflict(request, fl))
780 continue;
781 error = -EAGAIN;
782 if (!(request->fl_flags & FL_SLEEP))
783 goto out;
784 error = FILE_LOCK_DEFERRED;
785 locks_insert_block(fl, request);
786 goto out;
787 }
788 if (request->fl_flags & FL_ACCESS)
789 goto out;
790 locks_copy_lock(new_fl, request);
791 locks_insert_lock(before, new_fl);
792 new_fl = NULL;
793 error = 0;
794
795out:
796 unlock_flocks();
797 if (new_fl)
798 locks_free_lock(new_fl);
799 return error;
800}
801
802static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
803{
804 struct file_lock *fl;
805 struct file_lock *new_fl = NULL;
806 struct file_lock *new_fl2 = NULL;
807 struct file_lock *left = NULL;
808 struct file_lock *right = NULL;
809 struct file_lock **before;
810 int error, added = 0;
811
812
813
814
815
816
817
818 if (!(request->fl_flags & FL_ACCESS) &&
819 (request->fl_type != F_UNLCK ||
820 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
821 new_fl = locks_alloc_lock();
822 new_fl2 = locks_alloc_lock();
823 }
824
825 lock_flocks();
826 if (request->fl_type != F_UNLCK) {
827 for_each_lock(inode, before) {
828 fl = *before;
829 if (!IS_POSIX(fl))
830 continue;
831 if (!posix_locks_conflict(request, fl))
832 continue;
833 if (conflock)
834 __locks_copy_lock(conflock, fl);
835 error = -EAGAIN;
836 if (!(request->fl_flags & FL_SLEEP))
837 goto out;
838 error = -EDEADLK;
839 if (posix_locks_deadlock(request, fl))
840 goto out;
841 error = FILE_LOCK_DEFERRED;
842 locks_insert_block(fl, request);
843 goto out;
844 }
845 }
846
847
848 error = 0;
849 if (request->fl_flags & FL_ACCESS)
850 goto out;
851
852
853
854
855
856 before = &inode->i_flock;
857
858
859 while ((fl = *before) && (!IS_POSIX(fl) ||
860 !posix_same_owner(request, fl))) {
861 before = &fl->fl_next;
862 }
863
864
865 while ((fl = *before) && posix_same_owner(request, fl)) {
866
867
868 if (request->fl_type == fl->fl_type) {
869
870
871
872
873 if (fl->fl_end < request->fl_start - 1)
874 goto next_lock;
875
876
877
878 if (fl->fl_start - 1 > request->fl_end)
879 break;
880
881
882
883
884
885
886 if (fl->fl_start > request->fl_start)
887 fl->fl_start = request->fl_start;
888 else
889 request->fl_start = fl->fl_start;
890 if (fl->fl_end < request->fl_end)
891 fl->fl_end = request->fl_end;
892 else
893 request->fl_end = fl->fl_end;
894 if (added) {
895 locks_delete_lock(before);
896 continue;
897 }
898 request = fl;
899 added = 1;
900 }
901 else {
902
903
904
905 if (fl->fl_end < request->fl_start)
906 goto next_lock;
907 if (fl->fl_start > request->fl_end)
908 break;
909 if (request->fl_type == F_UNLCK)
910 added = 1;
911 if (fl->fl_start < request->fl_start)
912 left = fl;
913
914
915
916 if (fl->fl_end > request->fl_end) {
917 right = fl;
918 break;
919 }
920 if (fl->fl_start >= request->fl_start) {
921
922
923
924 if (added) {
925 locks_delete_lock(before);
926 continue;
927 }
928
929
930
931
932
933 locks_wake_up_blocks(fl);
934 fl->fl_start = request->fl_start;
935 fl->fl_end = request->fl_end;
936 fl->fl_type = request->fl_type;
937 locks_release_private(fl);
938 locks_copy_private(fl, request);
939 request = fl;
940 added = 1;
941 }
942 }
943
944
945 next_lock:
946 before = &fl->fl_next;
947 }
948
949
950
951
952
953
954
955 error = -ENOLCK;
956 if (right && left == right && !new_fl2)
957 goto out;
958
959 error = 0;
960 if (!added) {
961 if (request->fl_type == F_UNLCK) {
962 if (request->fl_flags & FL_EXISTS)
963 error = -ENOENT;
964 goto out;
965 }
966
967 if (!new_fl) {
968 error = -ENOLCK;
969 goto out;
970 }
971 locks_copy_lock(new_fl, request);
972 locks_insert_lock(before, new_fl);
973 new_fl = NULL;
974 }
975 if (right) {
976 if (left == right) {
977
978
979
980 left = new_fl2;
981 new_fl2 = NULL;
982 locks_copy_lock(left, right);
983 locks_insert_lock(before, left);
984 }
985 right->fl_start = request->fl_end + 1;
986 locks_wake_up_blocks(right);
987 }
988 if (left) {
989 left->fl_end = request->fl_start - 1;
990 locks_wake_up_blocks(left);
991 }
992 out:
993 unlock_flocks();
994
995
996
997 if (new_fl)
998 locks_free_lock(new_fl);
999 if (new_fl2)
1000 locks_free_lock(new_fl2);
1001 return error;
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018int posix_lock_file(struct file *filp, struct file_lock *fl,
1019 struct file_lock *conflock)
1020{
1021 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1022}
1023EXPORT_SYMBOL(posix_lock_file);
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1035{
1036 int error;
1037 might_sleep ();
1038 for (;;) {
1039 error = posix_lock_file(filp, fl, NULL);
1040 if (error != FILE_LOCK_DEFERRED)
1041 break;
1042 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1043 if (!error)
1044 continue;
1045
1046 locks_delete_block(fl);
1047 break;
1048 }
1049 return error;
1050}
1051EXPORT_SYMBOL(posix_lock_file_wait);
1052
1053
1054
1055
1056
1057
1058
1059
1060int locks_mandatory_locked(struct inode *inode)
1061{
1062 fl_owner_t owner = current->files;
1063 struct file_lock *fl;
1064
1065
1066
1067
1068 lock_flocks();
1069 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1070 if (!IS_POSIX(fl))
1071 continue;
1072 if (fl->fl_owner != owner)
1073 break;
1074 }
1075 unlock_flocks();
1076 return fl ? -EAGAIN : 0;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092int locks_mandatory_area(int read_write, struct inode *inode,
1093 struct file *filp, loff_t offset,
1094 size_t count)
1095{
1096 struct file_lock fl;
1097 int error;
1098
1099 locks_init_lock(&fl);
1100 fl.fl_owner = current->files;
1101 fl.fl_pid = current->tgid;
1102 fl.fl_file = filp;
1103 fl.fl_flags = FL_POSIX | FL_ACCESS;
1104 if (filp && !(filp->f_flags & O_NONBLOCK))
1105 fl.fl_flags |= FL_SLEEP;
1106 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1107 fl.fl_start = offset;
1108 fl.fl_end = offset + count - 1;
1109
1110 for (;;) {
1111 error = __posix_lock_file(inode, &fl, NULL);
1112 if (error != FILE_LOCK_DEFERRED)
1113 break;
1114 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1115 if (!error) {
1116
1117
1118
1119
1120 if (__mandatory_lock(inode))
1121 continue;
1122 }
1123
1124 locks_delete_block(&fl);
1125 break;
1126 }
1127
1128 return error;
1129}
1130
1131EXPORT_SYMBOL(locks_mandatory_area);
1132
1133
1134int lease_modify(struct file_lock **before, int arg)
1135{
1136 struct file_lock *fl = *before;
1137 int error = assign_type(fl, arg);
1138
1139 if (error)
1140 return error;
1141 locks_wake_up_blocks(fl);
1142 if (arg == F_UNLCK)
1143 locks_delete_lock(before);
1144 return 0;
1145}
1146
1147EXPORT_SYMBOL(lease_modify);
1148
1149static void time_out_leases(struct inode *inode)
1150{
1151 struct file_lock **before;
1152 struct file_lock *fl;
1153
1154 before = &inode->i_flock;
1155 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1156 if ((fl->fl_break_time == 0)
1157 || time_before(jiffies, fl->fl_break_time)) {
1158 before = &fl->fl_next;
1159 continue;
1160 }
1161 lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1162 if (fl == *before)
1163 before = &fl->fl_next;
1164 }
1165}
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177int __break_lease(struct inode *inode, unsigned int mode)
1178{
1179 int error = 0, future;
1180 struct file_lock *new_fl, *flock;
1181 struct file_lock *fl;
1182 unsigned long break_time;
1183 int i_have_this_lease = 0;
1184 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1185
1186 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1187
1188 lock_flocks();
1189
1190 time_out_leases(inode);
1191
1192 flock = inode->i_flock;
1193 if ((flock == NULL) || !IS_LEASE(flock))
1194 goto out;
1195
1196 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1197 if (fl->fl_owner == current->files)
1198 i_have_this_lease = 1;
1199
1200 if (want_write) {
1201
1202 future = F_UNLCK | F_INPROGRESS;
1203 } else if (flock->fl_type & F_INPROGRESS) {
1204
1205 future = flock->fl_type;
1206 } else if (flock->fl_type & F_WRLCK) {
1207
1208 future = F_RDLCK | F_INPROGRESS;
1209 } else {
1210
1211 goto out;
1212 }
1213
1214 if (IS_ERR(new_fl) && !i_have_this_lease
1215 && ((mode & O_NONBLOCK) == 0)) {
1216 error = PTR_ERR(new_fl);
1217 goto out;
1218 }
1219
1220 break_time = 0;
1221 if (lease_break_time > 0) {
1222 break_time = jiffies + lease_break_time * HZ;
1223 if (break_time == 0)
1224 break_time++;
1225 }
1226
1227 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1228 if (fl->fl_type != future) {
1229 fl->fl_type = future;
1230 fl->fl_break_time = break_time;
1231
1232 fl->fl_lmops->fl_break(fl);
1233 }
1234 }
1235
1236 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1237 error = -EWOULDBLOCK;
1238 goto out;
1239 }
1240
1241restart:
1242 break_time = flock->fl_break_time;
1243 if (break_time != 0) {
1244 break_time -= jiffies;
1245 if (break_time == 0)
1246 break_time++;
1247 }
1248 locks_insert_block(flock, new_fl);
1249 unlock_flocks();
1250 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1251 !new_fl->fl_next, break_time);
1252 lock_flocks();
1253 __locks_delete_block(new_fl);
1254 if (error >= 0) {
1255 if (error == 0)
1256 time_out_leases(inode);
1257
1258 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1259 flock = flock->fl_next) {
1260 if (flock->fl_type & F_INPROGRESS)
1261 goto restart;
1262 }
1263 error = 0;
1264 }
1265
1266out:
1267 unlock_flocks();
1268 if (!IS_ERR(new_fl))
1269 locks_free_lock(new_fl);
1270 return error;
1271}
1272
1273EXPORT_SYMBOL(__break_lease);
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284void lease_get_mtime(struct inode *inode, struct timespec *time)
1285{
1286 struct file_lock *flock = inode->i_flock;
1287 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1288 *time = current_fs_time(inode->i_sb);
1289 else
1290 *time = inode->i_mtime;
1291}
1292
1293EXPORT_SYMBOL(lease_get_mtime);
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318int fcntl_getlease(struct file *filp)
1319{
1320 struct file_lock *fl;
1321 int type = F_UNLCK;
1322
1323 lock_flocks();
1324 time_out_leases(filp->f_path.dentry->d_inode);
1325 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1326 fl = fl->fl_next) {
1327 if (fl->fl_file == filp) {
1328 type = fl->fl_type & ~F_INPROGRESS;
1329 break;
1330 }
1331 }
1332 unlock_flocks();
1333 return type;
1334}
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1348{
1349 struct file_lock *fl, **before, **my_before = NULL, *lease;
1350 struct dentry *dentry = filp->f_path.dentry;
1351 struct inode *inode = dentry->d_inode;
1352 int error, rdlease_count = 0, wrlease_count = 0;
1353
1354 lease = *flp;
1355
1356 error = -EACCES;
1357 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1358 goto out;
1359 error = -EINVAL;
1360 if (!S_ISREG(inode->i_mode))
1361 goto out;
1362 error = security_file_lock(filp, arg);
1363 if (error)
1364 goto out;
1365
1366 time_out_leases(inode);
1367
1368 BUG_ON(!(*flp)->fl_lmops->fl_break);
1369
1370 if (arg != F_UNLCK) {
1371 error = -EAGAIN;
1372 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1373 goto out;
1374 if ((arg == F_WRLCK)
1375 && ((dentry->d_count > 1)
1376 || (atomic_read(&inode->i_count) > 1)))
1377 goto out;
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 for (before = &inode->i_flock;
1389 ((fl = *before) != NULL) && IS_LEASE(fl);
1390 before = &fl->fl_next) {
1391 if (fl->fl_file == filp)
1392 my_before = before;
1393 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1394
1395
1396
1397
1398
1399 wrlease_count++;
1400 else
1401 rdlease_count++;
1402 }
1403
1404 error = -EAGAIN;
1405 if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1406 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1407 goto out;
1408
1409 if (my_before != NULL) {
1410 error = lease->fl_lmops->fl_change(my_before, arg);
1411 if (!error)
1412 *flp = *my_before;
1413 goto out;
1414 }
1415
1416 if (arg == F_UNLCK)
1417 goto out;
1418
1419 error = -EINVAL;
1420 if (!leases_enable)
1421 goto out;
1422
1423 locks_insert_lock(before, lease);
1424 return 0;
1425
1426out:
1427 return error;
1428}
1429EXPORT_SYMBOL(generic_setlease);
1430
1431static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1432{
1433 if (filp->f_op && filp->f_op->setlease)
1434 return filp->f_op->setlease(filp, arg, lease);
1435 else
1436 return generic_setlease(filp, arg, lease);
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1467{
1468 int error;
1469
1470 lock_flocks();
1471 error = __vfs_setlease(filp, arg, lease);
1472 unlock_flocks();
1473
1474 return error;
1475}
1476EXPORT_SYMBOL_GPL(vfs_setlease);
1477
1478static int do_fcntl_delete_lease(struct file *filp)
1479{
1480 struct file_lock fl, *flp = &fl;
1481
1482 lease_init(filp, F_UNLCK, flp);
1483
1484 return vfs_setlease(filp, F_UNLCK, &flp);
1485}
1486
1487static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1488{
1489 struct file_lock *fl, *ret;
1490 struct fasync_struct *new;
1491 int error;
1492
1493 fl = lease_alloc(filp, arg);
1494 if (IS_ERR(fl))
1495 return PTR_ERR(fl);
1496
1497 new = fasync_alloc();
1498 if (!new) {
1499 locks_free_lock(fl);
1500 return -ENOMEM;
1501 }
1502 ret = fl;
1503 lock_flocks();
1504 error = __vfs_setlease(filp, arg, &ret);
1505 if (error) {
1506 unlock_flocks();
1507 locks_free_lock(fl);
1508 goto out_free_fasync;
1509 }
1510 if (ret != fl)
1511 locks_free_lock(fl);
1512
1513
1514
1515
1516
1517
1518
1519 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1520 new = NULL;
1521
1522 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1523 unlock_flocks();
1524
1525out_free_fasync:
1526 if (new)
1527 fasync_free(new);
1528 return error;
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1542{
1543 if (arg == F_UNLCK)
1544 return do_fcntl_delete_lease(filp);
1545 return do_fcntl_add_lease(fd, filp, arg);
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1556{
1557 int error;
1558 might_sleep();
1559 for (;;) {
1560 error = flock_lock_file(filp, fl);
1561 if (error != FILE_LOCK_DEFERRED)
1562 break;
1563 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1564 if (!error)
1565 continue;
1566
1567 locks_delete_block(fl);
1568 break;
1569 }
1570 return error;
1571}
1572
1573EXPORT_SYMBOL(flock_lock_file_wait);
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1595{
1596 struct file *filp;
1597 struct file_lock *lock;
1598 int can_sleep, unlock;
1599 int error;
1600
1601 error = -EBADF;
1602 filp = fget(fd);
1603 if (!filp)
1604 goto out;
1605
1606 can_sleep = !(cmd & LOCK_NB);
1607 cmd &= ~LOCK_NB;
1608 unlock = (cmd == LOCK_UN);
1609
1610 if (!unlock && !(cmd & LOCK_MAND) &&
1611 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1612 goto out_putf;
1613
1614 error = flock_make_lock(filp, &lock, cmd);
1615 if (error)
1616 goto out_putf;
1617 if (can_sleep)
1618 lock->fl_flags |= FL_SLEEP;
1619
1620 error = security_file_lock(filp, lock->fl_type);
1621 if (error)
1622 goto out_free;
1623
1624 if (filp->f_op && filp->f_op->flock)
1625 error = filp->f_op->flock(filp,
1626 (can_sleep) ? F_SETLKW : F_SETLK,
1627 lock);
1628 else
1629 error = flock_lock_file_wait(filp, lock);
1630
1631 out_free:
1632 locks_free_lock(lock);
1633
1634 out_putf:
1635 fput(filp);
1636 out:
1637 return error;
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648int vfs_test_lock(struct file *filp, struct file_lock *fl)
1649{
1650 if (filp->f_op && filp->f_op->lock)
1651 return filp->f_op->lock(filp, F_GETLK, fl);
1652 posix_test_lock(filp, fl);
1653 return 0;
1654}
1655EXPORT_SYMBOL_GPL(vfs_test_lock);
1656
1657static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1658{
1659 flock->l_pid = fl->fl_pid;
1660#if BITS_PER_LONG == 32
1661
1662
1663
1664
1665 if (fl->fl_start > OFFT_OFFSET_MAX)
1666 return -EOVERFLOW;
1667 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1668 return -EOVERFLOW;
1669#endif
1670 flock->l_start = fl->fl_start;
1671 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1672 fl->fl_end - fl->fl_start + 1;
1673 flock->l_whence = 0;
1674 flock->l_type = fl->fl_type;
1675 return 0;
1676}
1677
1678#if BITS_PER_LONG == 32
1679static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1680{
1681 flock->l_pid = fl->fl_pid;
1682 flock->l_start = fl->fl_start;
1683 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1684 fl->fl_end - fl->fl_start + 1;
1685 flock->l_whence = 0;
1686 flock->l_type = fl->fl_type;
1687}
1688#endif
1689
1690
1691
1692
1693int fcntl_getlk(struct file *filp, struct flock __user *l)
1694{
1695 struct file_lock file_lock;
1696 struct flock flock;
1697 int error;
1698
1699 error = -EFAULT;
1700 if (copy_from_user(&flock, l, sizeof(flock)))
1701 goto out;
1702 error = -EINVAL;
1703 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1704 goto out;
1705
1706 error = flock_to_posix_lock(filp, &file_lock, &flock);
1707 if (error)
1708 goto out;
1709
1710 error = vfs_test_lock(filp, &file_lock);
1711 if (error)
1712 goto out;
1713
1714 flock.l_type = file_lock.fl_type;
1715 if (file_lock.fl_type != F_UNLCK) {
1716 error = posix_lock_to_flock(&flock, &file_lock);
1717 if (error)
1718 goto out;
1719 }
1720 error = -EFAULT;
1721 if (!copy_to_user(l, &flock, sizeof(flock)))
1722 error = 0;
1723out:
1724 return error;
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1761{
1762 if (filp->f_op && filp->f_op->lock)
1763 return filp->f_op->lock(filp, cmd, fl);
1764 else
1765 return posix_lock_file(filp, fl, conf);
1766}
1767EXPORT_SYMBOL_GPL(vfs_lock_file);
1768
1769static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1770 struct file_lock *fl)
1771{
1772 int error;
1773
1774 error = security_file_lock(filp, fl->fl_type);
1775 if (error)
1776 return error;
1777
1778 for (;;) {
1779 error = vfs_lock_file(filp, cmd, fl, NULL);
1780 if (error != FILE_LOCK_DEFERRED)
1781 break;
1782 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1783 if (!error)
1784 continue;
1785
1786 locks_delete_block(fl);
1787 break;
1788 }
1789
1790 return error;
1791}
1792
1793
1794
1795
1796int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1797 struct flock __user *l)
1798{
1799 struct file_lock *file_lock = locks_alloc_lock();
1800 struct flock flock;
1801 struct inode *inode;
1802 struct file *f;
1803 int error;
1804
1805 if (file_lock == NULL)
1806 return -ENOLCK;
1807
1808
1809
1810
1811 error = -EFAULT;
1812 if (copy_from_user(&flock, l, sizeof(flock)))
1813 goto out;
1814
1815 inode = filp->f_path.dentry->d_inode;
1816
1817
1818
1819
1820 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1821 error = -EAGAIN;
1822 goto out;
1823 }
1824
1825again:
1826 error = flock_to_posix_lock(filp, file_lock, &flock);
1827 if (error)
1828 goto out;
1829 if (cmd == F_SETLKW) {
1830 file_lock->fl_flags |= FL_SLEEP;
1831 }
1832
1833 error = -EBADF;
1834 switch (flock.l_type) {
1835 case F_RDLCK:
1836 if (!(filp->f_mode & FMODE_READ))
1837 goto out;
1838 break;
1839 case F_WRLCK:
1840 if (!(filp->f_mode & FMODE_WRITE))
1841 goto out;
1842 break;
1843 case F_UNLCK:
1844 break;
1845 default:
1846 error = -EINVAL;
1847 goto out;
1848 }
1849
1850 error = do_lock_file_wait(filp, cmd, file_lock);
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861 spin_lock(¤t->files->file_lock);
1862 f = fcheck(fd);
1863 spin_unlock(¤t->files->file_lock);
1864 if (!error && f != filp && flock.l_type != F_UNLCK) {
1865 flock.l_type = F_UNLCK;
1866 goto again;
1867 }
1868
1869out:
1870 locks_free_lock(file_lock);
1871 return error;
1872}
1873
1874#if BITS_PER_LONG == 32
1875
1876
1877
1878int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1879{
1880 struct file_lock file_lock;
1881 struct flock64 flock;
1882 int error;
1883
1884 error = -EFAULT;
1885 if (copy_from_user(&flock, l, sizeof(flock)))
1886 goto out;
1887 error = -EINVAL;
1888 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1889 goto out;
1890
1891 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1892 if (error)
1893 goto out;
1894
1895 error = vfs_test_lock(filp, &file_lock);
1896 if (error)
1897 goto out;
1898
1899 flock.l_type = file_lock.fl_type;
1900 if (file_lock.fl_type != F_UNLCK)
1901 posix_lock_to_flock64(&flock, &file_lock);
1902
1903 error = -EFAULT;
1904 if (!copy_to_user(l, &flock, sizeof(flock)))
1905 error = 0;
1906
1907out:
1908 return error;
1909}
1910
1911
1912
1913
1914int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1915 struct flock64 __user *l)
1916{
1917 struct file_lock *file_lock = locks_alloc_lock();
1918 struct flock64 flock;
1919 struct inode *inode;
1920 struct file *f;
1921 int error;
1922
1923 if (file_lock == NULL)
1924 return -ENOLCK;
1925
1926
1927
1928
1929 error = -EFAULT;
1930 if (copy_from_user(&flock, l, sizeof(flock)))
1931 goto out;
1932
1933 inode = filp->f_path.dentry->d_inode;
1934
1935
1936
1937
1938 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1939 error = -EAGAIN;
1940 goto out;
1941 }
1942
1943again:
1944 error = flock64_to_posix_lock(filp, file_lock, &flock);
1945 if (error)
1946 goto out;
1947 if (cmd == F_SETLKW64) {
1948 file_lock->fl_flags |= FL_SLEEP;
1949 }
1950
1951 error = -EBADF;
1952 switch (flock.l_type) {
1953 case F_RDLCK:
1954 if (!(filp->f_mode & FMODE_READ))
1955 goto out;
1956 break;
1957 case F_WRLCK:
1958 if (!(filp->f_mode & FMODE_WRITE))
1959 goto out;
1960 break;
1961 case F_UNLCK:
1962 break;
1963 default:
1964 error = -EINVAL;
1965 goto out;
1966 }
1967
1968 error = do_lock_file_wait(filp, cmd, file_lock);
1969
1970
1971
1972
1973
1974 spin_lock(¤t->files->file_lock);
1975 f = fcheck(fd);
1976 spin_unlock(¤t->files->file_lock);
1977 if (!error && f != filp && flock.l_type != F_UNLCK) {
1978 flock.l_type = F_UNLCK;
1979 goto again;
1980 }
1981
1982out:
1983 locks_free_lock(file_lock);
1984 return error;
1985}
1986#endif
1987
1988
1989
1990
1991
1992
1993void locks_remove_posix(struct file *filp, fl_owner_t owner)
1994{
1995 struct file_lock lock;
1996
1997
1998
1999
2000
2001
2002 if (!filp->f_path.dentry->d_inode->i_flock)
2003 return;
2004
2005 lock.fl_type = F_UNLCK;
2006 lock.fl_flags = FL_POSIX | FL_CLOSE;
2007 lock.fl_start = 0;
2008 lock.fl_end = OFFSET_MAX;
2009 lock.fl_owner = owner;
2010 lock.fl_pid = current->tgid;
2011 lock.fl_file = filp;
2012 lock.fl_ops = NULL;
2013 lock.fl_lmops = NULL;
2014
2015 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2016
2017 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2018 lock.fl_ops->fl_release_private(&lock);
2019}
2020
2021EXPORT_SYMBOL(locks_remove_posix);
2022
2023
2024
2025
2026void locks_remove_flock(struct file *filp)
2027{
2028 struct inode * inode = filp->f_path.dentry->d_inode;
2029 struct file_lock *fl;
2030 struct file_lock **before;
2031
2032 if (!inode->i_flock)
2033 return;
2034
2035 if (filp->f_op && filp->f_op->flock) {
2036 struct file_lock fl = {
2037 .fl_pid = current->tgid,
2038 .fl_file = filp,
2039 .fl_flags = FL_FLOCK,
2040 .fl_type = F_UNLCK,
2041 .fl_end = OFFSET_MAX,
2042 };
2043 filp->f_op->flock(filp, F_SETLKW, &fl);
2044 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2045 fl.fl_ops->fl_release_private(&fl);
2046 }
2047
2048 lock_flocks();
2049 before = &inode->i_flock;
2050
2051 while ((fl = *before) != NULL) {
2052 if (fl->fl_file == filp) {
2053 if (IS_FLOCK(fl)) {
2054 locks_delete_lock(before);
2055 continue;
2056 }
2057 if (IS_LEASE(fl)) {
2058 lease_modify(before, F_UNLCK);
2059 continue;
2060 }
2061
2062 BUG();
2063 }
2064 before = &fl->fl_next;
2065 }
2066 unlock_flocks();
2067}
2068
2069
2070
2071
2072
2073
2074
2075
2076int
2077posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2078{
2079 int status = 0;
2080
2081 lock_flocks();
2082 if (waiter->fl_next)
2083 __locks_delete_block(waiter);
2084 else
2085 status = -ENOENT;
2086 unlock_flocks();
2087 return status;
2088}
2089
2090EXPORT_SYMBOL(posix_unblock_lock);
2091
2092
2093
2094
2095
2096
2097
2098
2099int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2100{
2101 if (filp->f_op && filp->f_op->lock)
2102 return filp->f_op->lock(filp, F_CANCELLK, fl);
2103 return 0;
2104}
2105
2106EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2107
2108#ifdef CONFIG_PROC_FS
2109#include <linux/proc_fs.h>
2110#include <linux/seq_file.h>
2111
2112static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2113 loff_t id, char *pfx)
2114{
2115 struct inode *inode = NULL;
2116 unsigned int fl_pid;
2117
2118 if (fl->fl_nspid)
2119 fl_pid = pid_vnr(fl->fl_nspid);
2120 else
2121 fl_pid = fl->fl_pid;
2122
2123 if (fl->fl_file != NULL)
2124 inode = fl->fl_file->f_path.dentry->d_inode;
2125
2126 seq_printf(f, "%lld:%s ", id, pfx);
2127 if (IS_POSIX(fl)) {
2128 seq_printf(f, "%6s %s ",
2129 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2130 (inode == NULL) ? "*NOINODE*" :
2131 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2132 } else if (IS_FLOCK(fl)) {
2133 if (fl->fl_type & LOCK_MAND) {
2134 seq_printf(f, "FLOCK MSNFS ");
2135 } else {
2136 seq_printf(f, "FLOCK ADVISORY ");
2137 }
2138 } else if (IS_LEASE(fl)) {
2139 seq_printf(f, "LEASE ");
2140 if (fl->fl_type & F_INPROGRESS)
2141 seq_printf(f, "BREAKING ");
2142 else if (fl->fl_file)
2143 seq_printf(f, "ACTIVE ");
2144 else
2145 seq_printf(f, "BREAKER ");
2146 } else {
2147 seq_printf(f, "UNKNOWN UNKNOWN ");
2148 }
2149 if (fl->fl_type & LOCK_MAND) {
2150 seq_printf(f, "%s ",
2151 (fl->fl_type & LOCK_READ)
2152 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2153 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2154 } else {
2155 seq_printf(f, "%s ",
2156 (fl->fl_type & F_INPROGRESS)
2157 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2158 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2159 }
2160 if (inode) {
2161#ifdef WE_CAN_BREAK_LSLK_NOW
2162 seq_printf(f, "%d %s:%ld ", fl_pid,
2163 inode->i_sb->s_id, inode->i_ino);
2164#else
2165
2166 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2167 MAJOR(inode->i_sb->s_dev),
2168 MINOR(inode->i_sb->s_dev), inode->i_ino);
2169#endif
2170 } else {
2171 seq_printf(f, "%d <none>:0 ", fl_pid);
2172 }
2173 if (IS_POSIX(fl)) {
2174 if (fl->fl_end == OFFSET_MAX)
2175 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2176 else
2177 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2178 } else {
2179 seq_printf(f, "0 EOF\n");
2180 }
2181}
2182
2183static int locks_show(struct seq_file *f, void *v)
2184{
2185 struct file_lock *fl, *bfl;
2186
2187 fl = list_entry(v, struct file_lock, fl_link);
2188
2189 lock_get_status(f, fl, *((loff_t *)f->private), "");
2190
2191 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2192 lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2193
2194 return 0;
2195}
2196
2197static void *locks_start(struct seq_file *f, loff_t *pos)
2198{
2199 loff_t *p = f->private;
2200
2201 lock_flocks();
2202 *p = (*pos + 1);
2203 return seq_list_start(&file_lock_list, *pos);
2204}
2205
2206static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2207{
2208 loff_t *p = f->private;
2209 ++*p;
2210 return seq_list_next(v, &file_lock_list, pos);
2211}
2212
2213static void locks_stop(struct seq_file *f, void *v)
2214{
2215 unlock_flocks();
2216}
2217
2218static const struct seq_operations locks_seq_operations = {
2219 .start = locks_start,
2220 .next = locks_next,
2221 .stop = locks_stop,
2222 .show = locks_show,
2223};
2224
2225static int locks_open(struct inode *inode, struct file *filp)
2226{
2227 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2228}
2229
2230static const struct file_operations proc_locks_operations = {
2231 .open = locks_open,
2232 .read = seq_read,
2233 .llseek = seq_lseek,
2234 .release = seq_release_private,
2235};
2236
2237static int __init proc_locks_init(void)
2238{
2239 proc_create("locks", 0, NULL, &proc_locks_operations);
2240 return 0;
2241}
2242module_init(proc_locks_init);
2243#endif
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2259{
2260 struct file_lock *fl;
2261 int result = 1;
2262 lock_flocks();
2263 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2264 if (IS_POSIX(fl)) {
2265 if (fl->fl_type == F_RDLCK)
2266 continue;
2267 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2268 continue;
2269 } else if (IS_FLOCK(fl)) {
2270 if (!(fl->fl_type & LOCK_MAND))
2271 continue;
2272 if (fl->fl_type & LOCK_READ)
2273 continue;
2274 } else
2275 continue;
2276 result = 0;
2277 break;
2278 }
2279 unlock_flocks();
2280 return result;
2281}
2282
2283EXPORT_SYMBOL(lock_may_read);
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2299{
2300 struct file_lock *fl;
2301 int result = 1;
2302 lock_flocks();
2303 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2304 if (IS_POSIX(fl)) {
2305 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2306 continue;
2307 } else if (IS_FLOCK(fl)) {
2308 if (!(fl->fl_type & LOCK_MAND))
2309 continue;
2310 if (fl->fl_type & LOCK_WRITE)
2311 continue;
2312 } else
2313 continue;
2314 result = 0;
2315 break;
2316 }
2317 unlock_flocks();
2318 return result;
2319}
2320
2321EXPORT_SYMBOL(lock_may_write);
2322
2323static int __init filelock_init(void)
2324{
2325 filelock_cache = kmem_cache_create("file_lock_cache",
2326 sizeof(struct file_lock), 0, SLAB_PANIC,
2327 init_once);
2328 return 0;
2329}
2330
2331core_initcall(filelock_init);
2332