1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/smp_lock.h>
126#include <linux/syscalls.h>
127#include <linux/time.h>
128#include <linux/rcupdate.h>
129#include <linux/pid_namespace.h>
130
131#include <asm/uaccess.h>
132
133#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
134#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
135#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
136
137int leases_enable = 1;
138int lease_break_time = 45;
139
140#define for_each_lock(inode, lockp) \
141 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
142
143static LIST_HEAD(file_lock_list);
144static LIST_HEAD(blocked_list);
145
146static struct kmem_cache *filelock_cache __read_mostly;
147
148
149static struct file_lock *locks_alloc_lock(void)
150{
151 return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
152}
153
154void locks_release_private(struct file_lock *fl)
155{
156 if (fl->fl_ops) {
157 if (fl->fl_ops->fl_release_private)
158 fl->fl_ops->fl_release_private(fl);
159 fl->fl_ops = NULL;
160 }
161 if (fl->fl_lmops) {
162 if (fl->fl_lmops->fl_release_private)
163 fl->fl_lmops->fl_release_private(fl);
164 fl->fl_lmops = NULL;
165 }
166
167}
168EXPORT_SYMBOL_GPL(locks_release_private);
169
170
171static void locks_free_lock(struct file_lock *fl)
172{
173 BUG_ON(waitqueue_active(&fl->fl_wait));
174 BUG_ON(!list_empty(&fl->fl_block));
175 BUG_ON(!list_empty(&fl->fl_link));
176
177 locks_release_private(fl);
178 kmem_cache_free(filelock_cache, fl);
179}
180
181void locks_init_lock(struct file_lock *fl)
182{
183 INIT_LIST_HEAD(&fl->fl_link);
184 INIT_LIST_HEAD(&fl->fl_block);
185 init_waitqueue_head(&fl->fl_wait);
186 fl->fl_next = NULL;
187 fl->fl_fasync = NULL;
188 fl->fl_owner = NULL;
189 fl->fl_pid = 0;
190 fl->fl_nspid = NULL;
191 fl->fl_file = NULL;
192 fl->fl_flags = 0;
193 fl->fl_type = 0;
194 fl->fl_start = fl->fl_end = 0;
195 fl->fl_ops = NULL;
196 fl->fl_lmops = NULL;
197}
198
199EXPORT_SYMBOL(locks_init_lock);
200
201
202
203
204
205static void init_once(void *foo)
206{
207 struct file_lock *lock = (struct file_lock *) foo;
208
209 locks_init_lock(lock);
210}
211
212static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
213{
214 if (fl->fl_ops) {
215 if (fl->fl_ops->fl_copy_lock)
216 fl->fl_ops->fl_copy_lock(new, fl);
217 new->fl_ops = fl->fl_ops;
218 }
219 if (fl->fl_lmops) {
220 if (fl->fl_lmops->fl_copy_lock)
221 fl->fl_lmops->fl_copy_lock(new, fl);
222 new->fl_lmops = fl->fl_lmops;
223 }
224}
225
226
227
228
229void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
230{
231 new->fl_owner = fl->fl_owner;
232 new->fl_pid = fl->fl_pid;
233 new->fl_file = NULL;
234 new->fl_flags = fl->fl_flags;
235 new->fl_type = fl->fl_type;
236 new->fl_start = fl->fl_start;
237 new->fl_end = fl->fl_end;
238 new->fl_ops = NULL;
239 new->fl_lmops = NULL;
240}
241EXPORT_SYMBOL(__locks_copy_lock);
242
243void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
244{
245 locks_release_private(new);
246
247 __locks_copy_lock(new, fl);
248 new->fl_file = fl->fl_file;
249 new->fl_ops = fl->fl_ops;
250 new->fl_lmops = fl->fl_lmops;
251
252 locks_copy_private(new, fl);
253}
254
255EXPORT_SYMBOL(locks_copy_lock);
256
257static inline int flock_translate_cmd(int cmd) {
258 if (cmd & LOCK_MAND)
259 return cmd & (LOCK_MAND | LOCK_RW);
260 switch (cmd) {
261 case LOCK_SH:
262 return F_RDLCK;
263 case LOCK_EX:
264 return F_WRLCK;
265 case LOCK_UN:
266 return F_UNLCK;
267 }
268 return -EINVAL;
269}
270
271
272static int flock_make_lock(struct file *filp, struct file_lock **lock,
273 unsigned int cmd)
274{
275 struct file_lock *fl;
276 int type = flock_translate_cmd(cmd);
277 if (type < 0)
278 return type;
279
280 fl = locks_alloc_lock();
281 if (fl == NULL)
282 return -ENOMEM;
283
284 fl->fl_file = filp;
285 fl->fl_pid = current->tgid;
286 fl->fl_flags = FL_FLOCK;
287 fl->fl_type = type;
288 fl->fl_end = OFFSET_MAX;
289
290 *lock = fl;
291 return 0;
292}
293
294static int assign_type(struct file_lock *fl, int type)
295{
296 switch (type) {
297 case F_RDLCK:
298 case F_WRLCK:
299 case F_UNLCK:
300 fl->fl_type = type;
301 break;
302 default:
303 return -EINVAL;
304 }
305 return 0;
306}
307
308
309
310
311static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
312 struct flock *l)
313{
314 off_t start, end;
315
316 switch (l->l_whence) {
317 case SEEK_SET:
318 start = 0;
319 break;
320 case SEEK_CUR:
321 start = filp->f_pos;
322 break;
323 case SEEK_END:
324 start = i_size_read(filp->f_path.dentry->d_inode);
325 break;
326 default:
327 return -EINVAL;
328 }
329
330
331
332 start += l->l_start;
333 if (start < 0)
334 return -EINVAL;
335 fl->fl_end = OFFSET_MAX;
336 if (l->l_len > 0) {
337 end = start + l->l_len - 1;
338 fl->fl_end = end;
339 } else if (l->l_len < 0) {
340 end = start - 1;
341 fl->fl_end = end;
342 start += l->l_len;
343 if (start < 0)
344 return -EINVAL;
345 }
346 fl->fl_start = start;
347 if (fl->fl_end < fl->fl_start)
348 return -EOVERFLOW;
349
350 fl->fl_owner = current->files;
351 fl->fl_pid = current->tgid;
352 fl->fl_file = filp;
353 fl->fl_flags = FL_POSIX;
354 fl->fl_ops = NULL;
355 fl->fl_lmops = NULL;
356
357 return assign_type(fl, l->l_type);
358}
359
360#if BITS_PER_LONG == 32
361static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
362 struct flock64 *l)
363{
364 loff_t start;
365
366 switch (l->l_whence) {
367 case SEEK_SET:
368 start = 0;
369 break;
370 case SEEK_CUR:
371 start = filp->f_pos;
372 break;
373 case SEEK_END:
374 start = i_size_read(filp->f_path.dentry->d_inode);
375 break;
376 default:
377 return -EINVAL;
378 }
379
380 start += l->l_start;
381 if (start < 0)
382 return -EINVAL;
383 fl->fl_end = OFFSET_MAX;
384 if (l->l_len > 0) {
385 fl->fl_end = start + l->l_len - 1;
386 } else if (l->l_len < 0) {
387 fl->fl_end = start - 1;
388 start += l->l_len;
389 if (start < 0)
390 return -EINVAL;
391 }
392 fl->fl_start = start;
393 if (fl->fl_end < fl->fl_start)
394 return -EOVERFLOW;
395
396 fl->fl_owner = current->files;
397 fl->fl_pid = current->tgid;
398 fl->fl_file = filp;
399 fl->fl_flags = FL_POSIX;
400 fl->fl_ops = NULL;
401 fl->fl_lmops = NULL;
402
403 switch (l->l_type) {
404 case F_RDLCK:
405 case F_WRLCK:
406 case F_UNLCK:
407 fl->fl_type = l->l_type;
408 break;
409 default:
410 return -EINVAL;
411 }
412
413 return (0);
414}
415#endif
416
417
418static void lease_break_callback(struct file_lock *fl)
419{
420 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
421}
422
423static void lease_release_private_callback(struct file_lock *fl)
424{
425 if (!fl->fl_file)
426 return;
427
428 f_delown(fl->fl_file);
429 fl->fl_file->f_owner.signum = 0;
430}
431
432static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
433{
434 return fl->fl_file == try->fl_file;
435}
436
437static const struct lock_manager_operations lease_manager_ops = {
438 .fl_break = lease_break_callback,
439 .fl_release_private = lease_release_private_callback,
440 .fl_mylease = lease_mylease_callback,
441 .fl_change = lease_modify,
442};
443
444
445
446
447static int lease_init(struct file *filp, int type, struct file_lock *fl)
448 {
449 if (assign_type(fl, type) != 0)
450 return -EINVAL;
451
452 fl->fl_owner = current->files;
453 fl->fl_pid = current->tgid;
454
455 fl->fl_file = filp;
456 fl->fl_flags = FL_LEASE;
457 fl->fl_start = 0;
458 fl->fl_end = OFFSET_MAX;
459 fl->fl_ops = NULL;
460 fl->fl_lmops = &lease_manager_ops;
461 return 0;
462}
463
464
465static struct file_lock *lease_alloc(struct file *filp, int type)
466{
467 struct file_lock *fl = locks_alloc_lock();
468 int error = -ENOMEM;
469
470 if (fl == NULL)
471 return ERR_PTR(error);
472
473 error = lease_init(filp, type, fl);
474 if (error) {
475 locks_free_lock(fl);
476 return ERR_PTR(error);
477 }
478 return fl;
479}
480
481
482
483static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
484{
485 return ((fl1->fl_end >= fl2->fl_start) &&
486 (fl2->fl_end >= fl1->fl_start));
487}
488
489
490
491
492static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
493{
494 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
495 return fl2->fl_lmops == fl1->fl_lmops &&
496 fl1->fl_lmops->fl_compare_owner(fl1, fl2);
497 return fl1->fl_owner == fl2->fl_owner;
498}
499
500
501
502
503static void __locks_delete_block(struct file_lock *waiter)
504{
505 list_del_init(&waiter->fl_block);
506 list_del_init(&waiter->fl_link);
507 waiter->fl_next = NULL;
508}
509
510
511
512static void locks_delete_block(struct file_lock *waiter)
513{
514 lock_kernel();
515 __locks_delete_block(waiter);
516 unlock_kernel();
517}
518
519
520
521
522
523
524static void locks_insert_block(struct file_lock *blocker,
525 struct file_lock *waiter)
526{
527 BUG_ON(!list_empty(&waiter->fl_block));
528 list_add_tail(&waiter->fl_block, &blocker->fl_block);
529 waiter->fl_next = blocker;
530 if (IS_POSIX(blocker))
531 list_add(&waiter->fl_link, &blocked_list);
532}
533
534
535
536
537
538static void locks_wake_up_blocks(struct file_lock *blocker)
539{
540 while (!list_empty(&blocker->fl_block)) {
541 struct file_lock *waiter;
542
543 waiter = list_first_entry(&blocker->fl_block,
544 struct file_lock, fl_block);
545 __locks_delete_block(waiter);
546 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
547 waiter->fl_lmops->fl_notify(waiter);
548 else
549 wake_up(&waiter->fl_wait);
550 }
551}
552
553
554
555
556static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
557{
558 list_add(&fl->fl_link, &file_lock_list);
559
560 fl->fl_nspid = get_pid(task_tgid(current));
561
562
563 fl->fl_next = *pos;
564 *pos = fl;
565}
566
567
568
569
570
571
572
573static void locks_delete_lock(struct file_lock **thisfl_p)
574{
575 struct file_lock *fl = *thisfl_p;
576
577 *thisfl_p = fl->fl_next;
578 fl->fl_next = NULL;
579 list_del_init(&fl->fl_link);
580
581 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
582 if (fl->fl_fasync != NULL) {
583 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
584 fl->fl_fasync = NULL;
585 }
586
587 if (fl->fl_nspid) {
588 put_pid(fl->fl_nspid);
589 fl->fl_nspid = NULL;
590 }
591
592 locks_wake_up_blocks(fl);
593 locks_free_lock(fl);
594}
595
596
597
598
599static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
600{
601 if (sys_fl->fl_type == F_WRLCK)
602 return 1;
603 if (caller_fl->fl_type == F_WRLCK)
604 return 1;
605 return 0;
606}
607
608
609
610
611static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
612{
613
614
615
616 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
617 return (0);
618
619
620 if (!locks_overlap(caller_fl, sys_fl))
621 return 0;
622
623 return (locks_conflict(caller_fl, sys_fl));
624}
625
626
627
628
629static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
630{
631
632
633
634 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
635 return (0);
636 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
637 return 0;
638
639 return (locks_conflict(caller_fl, sys_fl));
640}
641
642void
643posix_test_lock(struct file *filp, struct file_lock *fl)
644{
645 struct file_lock *cfl;
646
647 lock_kernel();
648 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
649 if (!IS_POSIX(cfl))
650 continue;
651 if (posix_locks_conflict(fl, cfl))
652 break;
653 }
654 if (cfl) {
655 __locks_copy_lock(fl, cfl);
656 if (cfl->fl_nspid)
657 fl->fl_pid = pid_vnr(cfl->fl_nspid);
658 } else
659 fl->fl_type = F_UNLCK;
660 unlock_kernel();
661 return;
662}
663EXPORT_SYMBOL(posix_test_lock);
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690#define MAX_DEADLK_ITERATIONS 10
691
692
693static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
694{
695 struct file_lock *fl;
696
697 list_for_each_entry(fl, &blocked_list, fl_link) {
698 if (posix_same_owner(fl, block_fl))
699 return fl->fl_next;
700 }
701 return NULL;
702}
703
704static int posix_locks_deadlock(struct file_lock *caller_fl,
705 struct file_lock *block_fl)
706{
707 int i = 0;
708
709 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
710 if (i++ > MAX_DEADLK_ITERATIONS)
711 return 0;
712 if (posix_same_owner(caller_fl, block_fl))
713 return 1;
714 }
715 return 0;
716}
717
718
719
720
721
722
723
724
725static int flock_lock_file(struct file *filp, struct file_lock *request)
726{
727 struct file_lock *new_fl = NULL;
728 struct file_lock **before;
729 struct inode * inode = filp->f_path.dentry->d_inode;
730 int error = 0;
731 int found = 0;
732
733 lock_kernel();
734 if (request->fl_flags & FL_ACCESS)
735 goto find_conflict;
736
737 if (request->fl_type != F_UNLCK) {
738 error = -ENOMEM;
739 new_fl = locks_alloc_lock();
740 if (new_fl == NULL)
741 goto out;
742 error = 0;
743 }
744
745 for_each_lock(inode, before) {
746 struct file_lock *fl = *before;
747 if (IS_POSIX(fl))
748 break;
749 if (IS_LEASE(fl))
750 continue;
751 if (filp != fl->fl_file)
752 continue;
753 if (request->fl_type == fl->fl_type)
754 goto out;
755 found = 1;
756 locks_delete_lock(before);
757 break;
758 }
759
760 if (request->fl_type == F_UNLCK) {
761 if ((request->fl_flags & FL_EXISTS) && !found)
762 error = -ENOENT;
763 goto out;
764 }
765
766
767
768
769
770 if (found)
771 cond_resched();
772
773find_conflict:
774 for_each_lock(inode, before) {
775 struct file_lock *fl = *before;
776 if (IS_POSIX(fl))
777 break;
778 if (IS_LEASE(fl))
779 continue;
780 if (!flock_locks_conflict(request, fl))
781 continue;
782 error = -EAGAIN;
783 if (!(request->fl_flags & FL_SLEEP))
784 goto out;
785 error = FILE_LOCK_DEFERRED;
786 locks_insert_block(fl, request);
787 goto out;
788 }
789 if (request->fl_flags & FL_ACCESS)
790 goto out;
791 locks_copy_lock(new_fl, request);
792 locks_insert_lock(before, new_fl);
793 new_fl = NULL;
794 error = 0;
795
796out:
797 unlock_kernel();
798 if (new_fl)
799 locks_free_lock(new_fl);
800 return error;
801}
802
803static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
804{
805 struct file_lock *fl;
806 struct file_lock *new_fl = NULL;
807 struct file_lock *new_fl2 = NULL;
808 struct file_lock *left = NULL;
809 struct file_lock *right = NULL;
810 struct file_lock **before;
811 int error, added = 0;
812
813
814
815
816
817
818
819 if (!(request->fl_flags & FL_ACCESS) &&
820 (request->fl_type != F_UNLCK ||
821 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
822 new_fl = locks_alloc_lock();
823 new_fl2 = locks_alloc_lock();
824 }
825
826 lock_kernel();
827 if (request->fl_type != F_UNLCK) {
828 for_each_lock(inode, before) {
829 fl = *before;
830 if (!IS_POSIX(fl))
831 continue;
832 if (!posix_locks_conflict(request, fl))
833 continue;
834 if (conflock)
835 __locks_copy_lock(conflock, fl);
836 error = -EAGAIN;
837 if (!(request->fl_flags & FL_SLEEP))
838 goto out;
839 error = -EDEADLK;
840 if (posix_locks_deadlock(request, fl))
841 goto out;
842 error = FILE_LOCK_DEFERRED;
843 locks_insert_block(fl, request);
844 goto out;
845 }
846 }
847
848
849 error = 0;
850 if (request->fl_flags & FL_ACCESS)
851 goto out;
852
853
854
855
856
857 before = &inode->i_flock;
858
859
860 while ((fl = *before) && (!IS_POSIX(fl) ||
861 !posix_same_owner(request, fl))) {
862 before = &fl->fl_next;
863 }
864
865
866 while ((fl = *before) && posix_same_owner(request, fl)) {
867
868
869 if (request->fl_type == fl->fl_type) {
870
871
872
873
874 if (fl->fl_end < request->fl_start - 1)
875 goto next_lock;
876
877
878
879 if (fl->fl_start - 1 > request->fl_end)
880 break;
881
882
883
884
885
886
887 if (fl->fl_start > request->fl_start)
888 fl->fl_start = request->fl_start;
889 else
890 request->fl_start = fl->fl_start;
891 if (fl->fl_end < request->fl_end)
892 fl->fl_end = request->fl_end;
893 else
894 request->fl_end = fl->fl_end;
895 if (added) {
896 locks_delete_lock(before);
897 continue;
898 }
899 request = fl;
900 added = 1;
901 }
902 else {
903
904
905
906 if (fl->fl_end < request->fl_start)
907 goto next_lock;
908 if (fl->fl_start > request->fl_end)
909 break;
910 if (request->fl_type == F_UNLCK)
911 added = 1;
912 if (fl->fl_start < request->fl_start)
913 left = fl;
914
915
916
917 if (fl->fl_end > request->fl_end) {
918 right = fl;
919 break;
920 }
921 if (fl->fl_start >= request->fl_start) {
922
923
924
925 if (added) {
926 locks_delete_lock(before);
927 continue;
928 }
929
930
931
932
933
934 locks_wake_up_blocks(fl);
935 fl->fl_start = request->fl_start;
936 fl->fl_end = request->fl_end;
937 fl->fl_type = request->fl_type;
938 locks_release_private(fl);
939 locks_copy_private(fl, request);
940 request = fl;
941 added = 1;
942 }
943 }
944
945
946 next_lock:
947 before = &fl->fl_next;
948 }
949
950
951
952
953
954
955
956 error = -ENOLCK;
957 if (right && left == right && !new_fl2)
958 goto out;
959
960 error = 0;
961 if (!added) {
962 if (request->fl_type == F_UNLCK) {
963 if (request->fl_flags & FL_EXISTS)
964 error = -ENOENT;
965 goto out;
966 }
967
968 if (!new_fl) {
969 error = -ENOLCK;
970 goto out;
971 }
972 locks_copy_lock(new_fl, request);
973 locks_insert_lock(before, new_fl);
974 new_fl = NULL;
975 }
976 if (right) {
977 if (left == right) {
978
979
980
981 left = new_fl2;
982 new_fl2 = NULL;
983 locks_copy_lock(left, right);
984 locks_insert_lock(before, left);
985 }
986 right->fl_start = request->fl_end + 1;
987 locks_wake_up_blocks(right);
988 }
989 if (left) {
990 left->fl_end = request->fl_start - 1;
991 locks_wake_up_blocks(left);
992 }
993 out:
994 unlock_kernel();
995
996
997
998 if (new_fl)
999 locks_free_lock(new_fl);
1000 if (new_fl2)
1001 locks_free_lock(new_fl2);
1002 return error;
1003}
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019int posix_lock_file(struct file *filp, struct file_lock *fl,
1020 struct file_lock *conflock)
1021{
1022 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1023}
1024EXPORT_SYMBOL(posix_lock_file);
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1036{
1037 int error;
1038 might_sleep ();
1039 for (;;) {
1040 error = posix_lock_file(filp, fl, NULL);
1041 if (error != FILE_LOCK_DEFERRED)
1042 break;
1043 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1044 if (!error)
1045 continue;
1046
1047 locks_delete_block(fl);
1048 break;
1049 }
1050 return error;
1051}
1052EXPORT_SYMBOL(posix_lock_file_wait);
1053
1054
1055
1056
1057
1058
1059
1060
1061int locks_mandatory_locked(struct inode *inode)
1062{
1063 fl_owner_t owner = current->files;
1064 struct file_lock *fl;
1065
1066
1067
1068
1069 lock_kernel();
1070 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1071 if (!IS_POSIX(fl))
1072 continue;
1073 if (fl->fl_owner != owner)
1074 break;
1075 }
1076 unlock_kernel();
1077 return fl ? -EAGAIN : 0;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093int locks_mandatory_area(int read_write, struct inode *inode,
1094 struct file *filp, loff_t offset,
1095 size_t count)
1096{
1097 struct file_lock fl;
1098 int error;
1099
1100 locks_init_lock(&fl);
1101 fl.fl_owner = current->files;
1102 fl.fl_pid = current->tgid;
1103 fl.fl_file = filp;
1104 fl.fl_flags = FL_POSIX | FL_ACCESS;
1105 if (filp && !(filp->f_flags & O_NONBLOCK))
1106 fl.fl_flags |= FL_SLEEP;
1107 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1108 fl.fl_start = offset;
1109 fl.fl_end = offset + count - 1;
1110
1111 for (;;) {
1112 error = __posix_lock_file(inode, &fl, NULL);
1113 if (error != FILE_LOCK_DEFERRED)
1114 break;
1115 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1116 if (!error) {
1117
1118
1119
1120
1121 if (__mandatory_lock(inode))
1122 continue;
1123 }
1124
1125 locks_delete_block(&fl);
1126 break;
1127 }
1128
1129 return error;
1130}
1131
1132EXPORT_SYMBOL(locks_mandatory_area);
1133
1134
1135int lease_modify(struct file_lock **before, int arg)
1136{
1137 struct file_lock *fl = *before;
1138 int error = assign_type(fl, arg);
1139
1140 if (error)
1141 return error;
1142 locks_wake_up_blocks(fl);
1143 if (arg == F_UNLCK)
1144 locks_delete_lock(before);
1145 return 0;
1146}
1147
1148EXPORT_SYMBOL(lease_modify);
1149
1150static void time_out_leases(struct inode *inode)
1151{
1152 struct file_lock **before;
1153 struct file_lock *fl;
1154
1155 before = &inode->i_flock;
1156 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1157 if ((fl->fl_break_time == 0)
1158 || time_before(jiffies, fl->fl_break_time)) {
1159 before = &fl->fl_next;
1160 continue;
1161 }
1162 lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1163 if (fl == *before)
1164 before = &fl->fl_next;
1165 }
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178int __break_lease(struct inode *inode, unsigned int mode)
1179{
1180 int error = 0, future;
1181 struct file_lock *new_fl, *flock;
1182 struct file_lock *fl;
1183 unsigned long break_time;
1184 int i_have_this_lease = 0;
1185
1186 new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK);
1187
1188 lock_kernel();
1189
1190 time_out_leases(inode);
1191
1192 flock = inode->i_flock;
1193 if ((flock == NULL) || !IS_LEASE(flock))
1194 goto out;
1195
1196 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1197 if (fl->fl_owner == current->files)
1198 i_have_this_lease = 1;
1199
1200 if (mode & FMODE_WRITE) {
1201
1202 future = F_UNLCK | F_INPROGRESS;
1203 } else if (flock->fl_type & F_INPROGRESS) {
1204
1205 future = flock->fl_type;
1206 } else if (flock->fl_type & F_WRLCK) {
1207
1208 future = F_RDLCK | F_INPROGRESS;
1209 } else {
1210
1211 goto out;
1212 }
1213
1214 if (IS_ERR(new_fl) && !i_have_this_lease
1215 && ((mode & O_NONBLOCK) == 0)) {
1216 error = PTR_ERR(new_fl);
1217 goto out;
1218 }
1219
1220 break_time = 0;
1221 if (lease_break_time > 0) {
1222 break_time = jiffies + lease_break_time * HZ;
1223 if (break_time == 0)
1224 break_time++;
1225 }
1226
1227 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1228 if (fl->fl_type != future) {
1229 fl->fl_type = future;
1230 fl->fl_break_time = break_time;
1231
1232 fl->fl_lmops->fl_break(fl);
1233 }
1234 }
1235
1236 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1237 error = -EWOULDBLOCK;
1238 goto out;
1239 }
1240
1241restart:
1242 break_time = flock->fl_break_time;
1243 if (break_time != 0) {
1244 break_time -= jiffies;
1245 if (break_time == 0)
1246 break_time++;
1247 }
1248 locks_insert_block(flock, new_fl);
1249 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1250 !new_fl->fl_next, break_time);
1251 __locks_delete_block(new_fl);
1252 if (error >= 0) {
1253 if (error == 0)
1254 time_out_leases(inode);
1255
1256 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1257 flock = flock->fl_next) {
1258 if (flock->fl_type & F_INPROGRESS)
1259 goto restart;
1260 }
1261 error = 0;
1262 }
1263
1264out:
1265 unlock_kernel();
1266 if (!IS_ERR(new_fl))
1267 locks_free_lock(new_fl);
1268 return error;
1269}
1270
1271EXPORT_SYMBOL(__break_lease);
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282void lease_get_mtime(struct inode *inode, struct timespec *time)
1283{
1284 struct file_lock *flock = inode->i_flock;
1285 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1286 *time = current_fs_time(inode->i_sb);
1287 else
1288 *time = inode->i_mtime;
1289}
1290
1291EXPORT_SYMBOL(lease_get_mtime);
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316int fcntl_getlease(struct file *filp)
1317{
1318 struct file_lock *fl;
1319 int type = F_UNLCK;
1320
1321 lock_kernel();
1322 time_out_leases(filp->f_path.dentry->d_inode);
1323 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1324 fl = fl->fl_next) {
1325 if (fl->fl_file == filp) {
1326 type = fl->fl_type & ~F_INPROGRESS;
1327 break;
1328 }
1329 }
1330 unlock_kernel();
1331 return type;
1332}
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1346{
1347 struct file_lock *fl, **before, **my_before = NULL, *lease;
1348 struct file_lock *new_fl = NULL;
1349 struct dentry *dentry = filp->f_path.dentry;
1350 struct inode *inode = dentry->d_inode;
1351 int error, rdlease_count = 0, wrlease_count = 0;
1352
1353 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1354 return -EACCES;
1355 if (!S_ISREG(inode->i_mode))
1356 return -EINVAL;
1357 error = security_file_lock(filp, arg);
1358 if (error)
1359 return error;
1360
1361 time_out_leases(inode);
1362
1363 BUG_ON(!(*flp)->fl_lmops->fl_break);
1364
1365 lease = *flp;
1366
1367 if (arg != F_UNLCK) {
1368 error = -ENOMEM;
1369 new_fl = locks_alloc_lock();
1370 if (new_fl == NULL)
1371 goto out;
1372
1373 error = -EAGAIN;
1374 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1375 goto out;
1376 if ((arg == F_WRLCK)
1377 && ((atomic_read(&dentry->d_count) > 1)
1378 || (atomic_read(&inode->i_count) > 1)))
1379 goto out;
1380 }
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 for (before = &inode->i_flock;
1391 ((fl = *before) != NULL) && IS_LEASE(fl);
1392 before = &fl->fl_next) {
1393 if (lease->fl_lmops->fl_mylease(fl, lease))
1394 my_before = before;
1395 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1396
1397
1398
1399
1400
1401 wrlease_count++;
1402 else
1403 rdlease_count++;
1404 }
1405
1406 error = -EAGAIN;
1407 if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1408 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1409 goto out;
1410
1411 if (my_before != NULL) {
1412 *flp = *my_before;
1413 error = lease->fl_lmops->fl_change(my_before, arg);
1414 goto out;
1415 }
1416
1417 error = 0;
1418 if (arg == F_UNLCK)
1419 goto out;
1420
1421 error = -EINVAL;
1422 if (!leases_enable)
1423 goto out;
1424
1425 locks_copy_lock(new_fl, lease);
1426 locks_insert_lock(before, new_fl);
1427
1428 *flp = new_fl;
1429 return 0;
1430
1431out:
1432 if (new_fl != NULL)
1433 locks_free_lock(new_fl);
1434 return error;
1435}
1436EXPORT_SYMBOL(generic_setlease);
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1466{
1467 int error;
1468
1469 lock_kernel();
1470 if (filp->f_op && filp->f_op->setlease)
1471 error = filp->f_op->setlease(filp, arg, lease);
1472 else
1473 error = generic_setlease(filp, arg, lease);
1474 unlock_kernel();
1475
1476 return error;
1477}
1478EXPORT_SYMBOL_GPL(vfs_setlease);
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1491{
1492 struct file_lock fl, *flp = &fl;
1493 struct inode *inode = filp->f_path.dentry->d_inode;
1494 int error;
1495
1496 locks_init_lock(&fl);
1497 error = lease_init(filp, arg, &fl);
1498 if (error)
1499 return error;
1500
1501 lock_kernel();
1502
1503 error = vfs_setlease(filp, arg, &flp);
1504 if (error || arg == F_UNLCK)
1505 goto out_unlock;
1506
1507 error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
1508 if (error < 0) {
1509
1510 flp->fl_type = F_UNLCK | F_INPROGRESS;
1511 flp->fl_break_time = jiffies - 10;
1512 time_out_leases(inode);
1513 goto out_unlock;
1514 }
1515
1516 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1517out_unlock:
1518 unlock_kernel();
1519 return error;
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1530{
1531 int error;
1532 might_sleep();
1533 for (;;) {
1534 error = flock_lock_file(filp, fl);
1535 if (error != FILE_LOCK_DEFERRED)
1536 break;
1537 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1538 if (!error)
1539 continue;
1540
1541 locks_delete_block(fl);
1542 break;
1543 }
1544 return error;
1545}
1546
1547EXPORT_SYMBOL(flock_lock_file_wait);
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1569{
1570 struct file *filp;
1571 struct file_lock *lock;
1572 int can_sleep, unlock;
1573 int error;
1574
1575 error = -EBADF;
1576 filp = fget(fd);
1577 if (!filp)
1578 goto out;
1579
1580 can_sleep = !(cmd & LOCK_NB);
1581 cmd &= ~LOCK_NB;
1582 unlock = (cmd == LOCK_UN);
1583
1584 if (!unlock && !(cmd & LOCK_MAND) &&
1585 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1586 goto out_putf;
1587
1588 error = flock_make_lock(filp, &lock, cmd);
1589 if (error)
1590 goto out_putf;
1591 if (can_sleep)
1592 lock->fl_flags |= FL_SLEEP;
1593
1594 error = security_file_lock(filp, lock->fl_type);
1595 if (error)
1596 goto out_free;
1597
1598 if (filp->f_op && filp->f_op->flock)
1599 error = filp->f_op->flock(filp,
1600 (can_sleep) ? F_SETLKW : F_SETLK,
1601 lock);
1602 else
1603 error = flock_lock_file_wait(filp, lock);
1604
1605 out_free:
1606 locks_free_lock(lock);
1607
1608 out_putf:
1609 fput(filp);
1610 out:
1611 return error;
1612}
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622int vfs_test_lock(struct file *filp, struct file_lock *fl)
1623{
1624 if (filp->f_op && filp->f_op->lock)
1625 return filp->f_op->lock(filp, F_GETLK, fl);
1626 posix_test_lock(filp, fl);
1627 return 0;
1628}
1629EXPORT_SYMBOL_GPL(vfs_test_lock);
1630
1631static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1632{
1633 flock->l_pid = fl->fl_pid;
1634#if BITS_PER_LONG == 32
1635
1636
1637
1638
1639 if (fl->fl_start > OFFT_OFFSET_MAX)
1640 return -EOVERFLOW;
1641 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1642 return -EOVERFLOW;
1643#endif
1644 flock->l_start = fl->fl_start;
1645 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1646 fl->fl_end - fl->fl_start + 1;
1647 flock->l_whence = 0;
1648 flock->l_type = fl->fl_type;
1649 return 0;
1650}
1651
1652#if BITS_PER_LONG == 32
1653static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1654{
1655 flock->l_pid = fl->fl_pid;
1656 flock->l_start = fl->fl_start;
1657 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1658 fl->fl_end - fl->fl_start + 1;
1659 flock->l_whence = 0;
1660 flock->l_type = fl->fl_type;
1661}
1662#endif
1663
1664
1665
1666
1667int fcntl_getlk(struct file *filp, struct flock __user *l)
1668{
1669 struct file_lock file_lock;
1670 struct flock flock;
1671 int error;
1672
1673 error = -EFAULT;
1674 if (copy_from_user(&flock, l, sizeof(flock)))
1675 goto out;
1676 error = -EINVAL;
1677 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1678 goto out;
1679
1680 error = flock_to_posix_lock(filp, &file_lock, &flock);
1681 if (error)
1682 goto out;
1683
1684 error = vfs_test_lock(filp, &file_lock);
1685 if (error)
1686 goto out;
1687
1688 flock.l_type = file_lock.fl_type;
1689 if (file_lock.fl_type != F_UNLCK) {
1690 error = posix_lock_to_flock(&flock, &file_lock);
1691 if (error)
1692 goto out;
1693 }
1694 error = -EFAULT;
1695 if (!copy_to_user(l, &flock, sizeof(flock)))
1696 error = 0;
1697out:
1698 return error;
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1735{
1736 if (filp->f_op && filp->f_op->lock)
1737 return filp->f_op->lock(filp, cmd, fl);
1738 else
1739 return posix_lock_file(filp, fl, conf);
1740}
1741EXPORT_SYMBOL_GPL(vfs_lock_file);
1742
1743static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1744 struct file_lock *fl)
1745{
1746 int error;
1747
1748 error = security_file_lock(filp, fl->fl_type);
1749 if (error)
1750 return error;
1751
1752 for (;;) {
1753 error = vfs_lock_file(filp, cmd, fl, NULL);
1754 if (error != FILE_LOCK_DEFERRED)
1755 break;
1756 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1757 if (!error)
1758 continue;
1759
1760 locks_delete_block(fl);
1761 break;
1762 }
1763
1764 return error;
1765}
1766
1767
1768
1769
1770int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1771 struct flock __user *l)
1772{
1773 struct file_lock *file_lock = locks_alloc_lock();
1774 struct flock flock;
1775 struct inode *inode;
1776 struct file *f;
1777 int error;
1778
1779 if (file_lock == NULL)
1780 return -ENOLCK;
1781
1782
1783
1784
1785 error = -EFAULT;
1786 if (copy_from_user(&flock, l, sizeof(flock)))
1787 goto out;
1788
1789 inode = filp->f_path.dentry->d_inode;
1790
1791
1792
1793
1794 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1795 error = -EAGAIN;
1796 goto out;
1797 }
1798
1799again:
1800 error = flock_to_posix_lock(filp, file_lock, &flock);
1801 if (error)
1802 goto out;
1803 if (cmd == F_SETLKW) {
1804 file_lock->fl_flags |= FL_SLEEP;
1805 }
1806
1807 error = -EBADF;
1808 switch (flock.l_type) {
1809 case F_RDLCK:
1810 if (!(filp->f_mode & FMODE_READ))
1811 goto out;
1812 break;
1813 case F_WRLCK:
1814 if (!(filp->f_mode & FMODE_WRITE))
1815 goto out;
1816 break;
1817 case F_UNLCK:
1818 break;
1819 default:
1820 error = -EINVAL;
1821 goto out;
1822 }
1823
1824 error = do_lock_file_wait(filp, cmd, file_lock);
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835 spin_lock(¤t->files->file_lock);
1836 f = fcheck(fd);
1837 spin_unlock(¤t->files->file_lock);
1838 if (!error && f != filp && flock.l_type != F_UNLCK) {
1839 flock.l_type = F_UNLCK;
1840 goto again;
1841 }
1842
1843out:
1844 locks_free_lock(file_lock);
1845 return error;
1846}
1847
1848#if BITS_PER_LONG == 32
1849
1850
1851
1852int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1853{
1854 struct file_lock file_lock;
1855 struct flock64 flock;
1856 int error;
1857
1858 error = -EFAULT;
1859 if (copy_from_user(&flock, l, sizeof(flock)))
1860 goto out;
1861 error = -EINVAL;
1862 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1863 goto out;
1864
1865 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1866 if (error)
1867 goto out;
1868
1869 error = vfs_test_lock(filp, &file_lock);
1870 if (error)
1871 goto out;
1872
1873 flock.l_type = file_lock.fl_type;
1874 if (file_lock.fl_type != F_UNLCK)
1875 posix_lock_to_flock64(&flock, &file_lock);
1876
1877 error = -EFAULT;
1878 if (!copy_to_user(l, &flock, sizeof(flock)))
1879 error = 0;
1880
1881out:
1882 return error;
1883}
1884
1885
1886
1887
1888int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1889 struct flock64 __user *l)
1890{
1891 struct file_lock *file_lock = locks_alloc_lock();
1892 struct flock64 flock;
1893 struct inode *inode;
1894 struct file *f;
1895 int error;
1896
1897 if (file_lock == NULL)
1898 return -ENOLCK;
1899
1900
1901
1902
1903 error = -EFAULT;
1904 if (copy_from_user(&flock, l, sizeof(flock)))
1905 goto out;
1906
1907 inode = filp->f_path.dentry->d_inode;
1908
1909
1910
1911
1912 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1913 error = -EAGAIN;
1914 goto out;
1915 }
1916
1917again:
1918 error = flock64_to_posix_lock(filp, file_lock, &flock);
1919 if (error)
1920 goto out;
1921 if (cmd == F_SETLKW64) {
1922 file_lock->fl_flags |= FL_SLEEP;
1923 }
1924
1925 error = -EBADF;
1926 switch (flock.l_type) {
1927 case F_RDLCK:
1928 if (!(filp->f_mode & FMODE_READ))
1929 goto out;
1930 break;
1931 case F_WRLCK:
1932 if (!(filp->f_mode & FMODE_WRITE))
1933 goto out;
1934 break;
1935 case F_UNLCK:
1936 break;
1937 default:
1938 error = -EINVAL;
1939 goto out;
1940 }
1941
1942 error = do_lock_file_wait(filp, cmd, file_lock);
1943
1944
1945
1946
1947
1948 spin_lock(¤t->files->file_lock);
1949 f = fcheck(fd);
1950 spin_unlock(¤t->files->file_lock);
1951 if (!error && f != filp && flock.l_type != F_UNLCK) {
1952 flock.l_type = F_UNLCK;
1953 goto again;
1954 }
1955
1956out:
1957 locks_free_lock(file_lock);
1958 return error;
1959}
1960#endif
1961
1962
1963
1964
1965
1966
1967void locks_remove_posix(struct file *filp, fl_owner_t owner)
1968{
1969 struct file_lock lock;
1970
1971
1972
1973
1974
1975
1976 if (!filp->f_path.dentry->d_inode->i_flock)
1977 return;
1978
1979 lock.fl_type = F_UNLCK;
1980 lock.fl_flags = FL_POSIX | FL_CLOSE;
1981 lock.fl_start = 0;
1982 lock.fl_end = OFFSET_MAX;
1983 lock.fl_owner = owner;
1984 lock.fl_pid = current->tgid;
1985 lock.fl_file = filp;
1986 lock.fl_ops = NULL;
1987 lock.fl_lmops = NULL;
1988
1989 vfs_lock_file(filp, F_SETLK, &lock, NULL);
1990
1991 if (lock.fl_ops && lock.fl_ops->fl_release_private)
1992 lock.fl_ops->fl_release_private(&lock);
1993}
1994
1995EXPORT_SYMBOL(locks_remove_posix);
1996
1997
1998
1999
2000void locks_remove_flock(struct file *filp)
2001{
2002 struct inode * inode = filp->f_path.dentry->d_inode;
2003 struct file_lock *fl;
2004 struct file_lock **before;
2005
2006 if (!inode->i_flock)
2007 return;
2008
2009 if (filp->f_op && filp->f_op->flock) {
2010 struct file_lock fl = {
2011 .fl_pid = current->tgid,
2012 .fl_file = filp,
2013 .fl_flags = FL_FLOCK,
2014 .fl_type = F_UNLCK,
2015 .fl_end = OFFSET_MAX,
2016 };
2017 filp->f_op->flock(filp, F_SETLKW, &fl);
2018 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2019 fl.fl_ops->fl_release_private(&fl);
2020 }
2021
2022 lock_kernel();
2023 before = &inode->i_flock;
2024
2025 while ((fl = *before) != NULL) {
2026 if (fl->fl_file == filp) {
2027 if (IS_FLOCK(fl)) {
2028 locks_delete_lock(before);
2029 continue;
2030 }
2031 if (IS_LEASE(fl)) {
2032 lease_modify(before, F_UNLCK);
2033 continue;
2034 }
2035
2036 BUG();
2037 }
2038 before = &fl->fl_next;
2039 }
2040 unlock_kernel();
2041}
2042
2043
2044
2045
2046
2047
2048
2049
2050int
2051posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2052{
2053 int status = 0;
2054
2055 lock_kernel();
2056 if (waiter->fl_next)
2057 __locks_delete_block(waiter);
2058 else
2059 status = -ENOENT;
2060 unlock_kernel();
2061 return status;
2062}
2063
2064EXPORT_SYMBOL(posix_unblock_lock);
2065
2066
2067
2068
2069
2070
2071
2072
2073int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2074{
2075 if (filp->f_op && filp->f_op->lock)
2076 return filp->f_op->lock(filp, F_CANCELLK, fl);
2077 return 0;
2078}
2079
2080EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2081
2082#ifdef CONFIG_PROC_FS
2083#include <linux/proc_fs.h>
2084#include <linux/seq_file.h>
2085
2086static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2087 int id, char *pfx)
2088{
2089 struct inode *inode = NULL;
2090 unsigned int fl_pid;
2091
2092 if (fl->fl_nspid)
2093 fl_pid = pid_vnr(fl->fl_nspid);
2094 else
2095 fl_pid = fl->fl_pid;
2096
2097 if (fl->fl_file != NULL)
2098 inode = fl->fl_file->f_path.dentry->d_inode;
2099
2100 seq_printf(f, "%d:%s ", id, pfx);
2101 if (IS_POSIX(fl)) {
2102 seq_printf(f, "%6s %s ",
2103 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2104 (inode == NULL) ? "*NOINODE*" :
2105 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2106 } else if (IS_FLOCK(fl)) {
2107 if (fl->fl_type & LOCK_MAND) {
2108 seq_printf(f, "FLOCK MSNFS ");
2109 } else {
2110 seq_printf(f, "FLOCK ADVISORY ");
2111 }
2112 } else if (IS_LEASE(fl)) {
2113 seq_printf(f, "LEASE ");
2114 if (fl->fl_type & F_INPROGRESS)
2115 seq_printf(f, "BREAKING ");
2116 else if (fl->fl_file)
2117 seq_printf(f, "ACTIVE ");
2118 else
2119 seq_printf(f, "BREAKER ");
2120 } else {
2121 seq_printf(f, "UNKNOWN UNKNOWN ");
2122 }
2123 if (fl->fl_type & LOCK_MAND) {
2124 seq_printf(f, "%s ",
2125 (fl->fl_type & LOCK_READ)
2126 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2127 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2128 } else {
2129 seq_printf(f, "%s ",
2130 (fl->fl_type & F_INPROGRESS)
2131 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2132 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2133 }
2134 if (inode) {
2135#ifdef WE_CAN_BREAK_LSLK_NOW
2136 seq_printf(f, "%d %s:%ld ", fl_pid,
2137 inode->i_sb->s_id, inode->i_ino);
2138#else
2139
2140 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2141 MAJOR(inode->i_sb->s_dev),
2142 MINOR(inode->i_sb->s_dev), inode->i_ino);
2143#endif
2144 } else {
2145 seq_printf(f, "%d <none>:0 ", fl_pid);
2146 }
2147 if (IS_POSIX(fl)) {
2148 if (fl->fl_end == OFFSET_MAX)
2149 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2150 else
2151 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2152 } else {
2153 seq_printf(f, "0 EOF\n");
2154 }
2155}
2156
2157static int locks_show(struct seq_file *f, void *v)
2158{
2159 struct file_lock *fl, *bfl;
2160
2161 fl = list_entry(v, struct file_lock, fl_link);
2162
2163 lock_get_status(f, fl, (long)f->private, "");
2164
2165 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2166 lock_get_status(f, bfl, (long)f->private, " ->");
2167
2168 f->private++;
2169 return 0;
2170}
2171
2172static void *locks_start(struct seq_file *f, loff_t *pos)
2173{
2174 lock_kernel();
2175 f->private = (void *)1;
2176 return seq_list_start(&file_lock_list, *pos);
2177}
2178
2179static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2180{
2181 return seq_list_next(v, &file_lock_list, pos);
2182}
2183
2184static void locks_stop(struct seq_file *f, void *v)
2185{
2186 unlock_kernel();
2187}
2188
2189static const struct seq_operations locks_seq_operations = {
2190 .start = locks_start,
2191 .next = locks_next,
2192 .stop = locks_stop,
2193 .show = locks_show,
2194};
2195
2196static int locks_open(struct inode *inode, struct file *filp)
2197{
2198 return seq_open(filp, &locks_seq_operations);
2199}
2200
2201static const struct file_operations proc_locks_operations = {
2202 .open = locks_open,
2203 .read = seq_read,
2204 .llseek = seq_lseek,
2205 .release = seq_release,
2206};
2207
2208static int __init proc_locks_init(void)
2209{
2210 proc_create("locks", 0, NULL, &proc_locks_operations);
2211 return 0;
2212}
2213module_init(proc_locks_init);
2214#endif
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2230{
2231 struct file_lock *fl;
2232 int result = 1;
2233 lock_kernel();
2234 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2235 if (IS_POSIX(fl)) {
2236 if (fl->fl_type == F_RDLCK)
2237 continue;
2238 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2239 continue;
2240 } else if (IS_FLOCK(fl)) {
2241 if (!(fl->fl_type & LOCK_MAND))
2242 continue;
2243 if (fl->fl_type & LOCK_READ)
2244 continue;
2245 } else
2246 continue;
2247 result = 0;
2248 break;
2249 }
2250 unlock_kernel();
2251 return result;
2252}
2253
2254EXPORT_SYMBOL(lock_may_read);
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2270{
2271 struct file_lock *fl;
2272 int result = 1;
2273 lock_kernel();
2274 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2275 if (IS_POSIX(fl)) {
2276 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2277 continue;
2278 } else if (IS_FLOCK(fl)) {
2279 if (!(fl->fl_type & LOCK_MAND))
2280 continue;
2281 if (fl->fl_type & LOCK_WRITE)
2282 continue;
2283 } else
2284 continue;
2285 result = 0;
2286 break;
2287 }
2288 unlock_kernel();
2289 return result;
2290}
2291
2292EXPORT_SYMBOL(lock_may_write);
2293
2294static int __init filelock_init(void)
2295{
2296 filelock_cache = kmem_cache_create("file_lock_cache",
2297 sizeof(struct file_lock), 0, SLAB_PANIC,
2298 init_once);
2299 return 0;
2300}
2301
2302core_initcall(filelock_init);
2303