1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/syscalls.h>
126#include <linux/time.h>
127#include <linux/rcupdate.h>
128#include <linux/pid_namespace.h>
129
130#include <asm/uaccess.h>
131
132#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
135
136int leases_enable = 1;
137int lease_break_time = 45;
138
139#define for_each_lock(inode, lockp) \
140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
141
142static LIST_HEAD(file_lock_list);
143static LIST_HEAD(blocked_list);
144static DEFINE_SPINLOCK(file_lock_lock);
145
146
147
148
149void lock_flocks(void)
150{
151 spin_lock(&file_lock_lock);
152}
153EXPORT_SYMBOL_GPL(lock_flocks);
154
155void unlock_flocks(void)
156{
157 spin_unlock(&file_lock_lock);
158}
159EXPORT_SYMBOL_GPL(unlock_flocks);
160
161static struct kmem_cache *filelock_cache __read_mostly;
162
163static void locks_init_lock_heads(struct file_lock *fl)
164{
165 INIT_LIST_HEAD(&fl->fl_link);
166 INIT_LIST_HEAD(&fl->fl_block);
167 init_waitqueue_head(&fl->fl_wait);
168}
169
170
171struct file_lock *locks_alloc_lock(void)
172{
173 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
174
175 if (fl)
176 locks_init_lock_heads(fl);
177
178 return fl;
179}
180EXPORT_SYMBOL_GPL(locks_alloc_lock);
181
182void locks_release_private(struct file_lock *fl)
183{
184 if (fl->fl_ops) {
185 if (fl->fl_ops->fl_release_private)
186 fl->fl_ops->fl_release_private(fl);
187 fl->fl_ops = NULL;
188 }
189 if (fl->fl_lmops) {
190 if (fl->fl_lmops->lm_release_private)
191 fl->fl_lmops->lm_release_private(fl);
192 fl->fl_lmops = NULL;
193 }
194
195}
196EXPORT_SYMBOL_GPL(locks_release_private);
197
198
199void locks_free_lock(struct file_lock *fl)
200{
201 BUG_ON(waitqueue_active(&fl->fl_wait));
202 BUG_ON(!list_empty(&fl->fl_block));
203 BUG_ON(!list_empty(&fl->fl_link));
204
205 locks_release_private(fl);
206 kmem_cache_free(filelock_cache, fl);
207}
208EXPORT_SYMBOL(locks_free_lock);
209
210void locks_init_lock(struct file_lock *fl)
211{
212 memset(fl, 0, sizeof(struct file_lock));
213 locks_init_lock_heads(fl);
214}
215
216EXPORT_SYMBOL(locks_init_lock);
217
218static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
219{
220 if (fl->fl_ops) {
221 if (fl->fl_ops->fl_copy_lock)
222 fl->fl_ops->fl_copy_lock(new, fl);
223 new->fl_ops = fl->fl_ops;
224 }
225 if (fl->fl_lmops)
226 new->fl_lmops = fl->fl_lmops;
227}
228
229
230
231
232void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
233{
234 new->fl_owner = fl->fl_owner;
235 new->fl_pid = fl->fl_pid;
236 new->fl_file = NULL;
237 new->fl_flags = fl->fl_flags;
238 new->fl_type = fl->fl_type;
239 new->fl_start = fl->fl_start;
240 new->fl_end = fl->fl_end;
241 new->fl_ops = NULL;
242 new->fl_lmops = NULL;
243}
244EXPORT_SYMBOL(__locks_copy_lock);
245
246void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
247{
248 locks_release_private(new);
249
250 __locks_copy_lock(new, fl);
251 new->fl_file = fl->fl_file;
252 new->fl_ops = fl->fl_ops;
253 new->fl_lmops = fl->fl_lmops;
254
255 locks_copy_private(new, fl);
256}
257
258EXPORT_SYMBOL(locks_copy_lock);
259
260static inline int flock_translate_cmd(int cmd) {
261 if (cmd & LOCK_MAND)
262 return cmd & (LOCK_MAND | LOCK_RW);
263 switch (cmd) {
264 case LOCK_SH:
265 return F_RDLCK;
266 case LOCK_EX:
267 return F_WRLCK;
268 case LOCK_UN:
269 return F_UNLCK;
270 }
271 return -EINVAL;
272}
273
274
275static int flock_make_lock(struct file *filp, struct file_lock **lock,
276 unsigned int cmd)
277{
278 struct file_lock *fl;
279 int type = flock_translate_cmd(cmd);
280 if (type < 0)
281 return type;
282
283 fl = locks_alloc_lock();
284 if (fl == NULL)
285 return -ENOMEM;
286
287 fl->fl_file = filp;
288 fl->fl_pid = current->tgid;
289 fl->fl_flags = FL_FLOCK;
290 fl->fl_type = type;
291 fl->fl_end = OFFSET_MAX;
292
293 *lock = fl;
294 return 0;
295}
296
297static int assign_type(struct file_lock *fl, int type)
298{
299 switch (type) {
300 case F_RDLCK:
301 case F_WRLCK:
302 case F_UNLCK:
303 fl->fl_type = type;
304 break;
305 default:
306 return -EINVAL;
307 }
308 return 0;
309}
310
311
312
313
314static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
315 struct flock *l)
316{
317 off_t start, end;
318
319 switch (l->l_whence) {
320 case SEEK_SET:
321 start = 0;
322 break;
323 case SEEK_CUR:
324 start = filp->f_pos;
325 break;
326 case SEEK_END:
327 start = i_size_read(filp->f_path.dentry->d_inode);
328 break;
329 default:
330 return -EINVAL;
331 }
332
333
334
335 start += l->l_start;
336 if (start < 0)
337 return -EINVAL;
338 fl->fl_end = OFFSET_MAX;
339 if (l->l_len > 0) {
340 end = start + l->l_len - 1;
341 fl->fl_end = end;
342 } else if (l->l_len < 0) {
343 end = start - 1;
344 fl->fl_end = end;
345 start += l->l_len;
346 if (start < 0)
347 return -EINVAL;
348 }
349 fl->fl_start = start;
350 if (fl->fl_end < fl->fl_start)
351 return -EOVERFLOW;
352
353 fl->fl_owner = current->files;
354 fl->fl_pid = current->tgid;
355 fl->fl_file = filp;
356 fl->fl_flags = FL_POSIX;
357 fl->fl_ops = NULL;
358 fl->fl_lmops = NULL;
359
360 return assign_type(fl, l->l_type);
361}
362
363#if BITS_PER_LONG == 32
364static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
365 struct flock64 *l)
366{
367 loff_t start;
368
369 switch (l->l_whence) {
370 case SEEK_SET:
371 start = 0;
372 break;
373 case SEEK_CUR:
374 start = filp->f_pos;
375 break;
376 case SEEK_END:
377 start = i_size_read(filp->f_path.dentry->d_inode);
378 break;
379 default:
380 return -EINVAL;
381 }
382
383 start += l->l_start;
384 if (start < 0)
385 return -EINVAL;
386 fl->fl_end = OFFSET_MAX;
387 if (l->l_len > 0) {
388 fl->fl_end = start + l->l_len - 1;
389 } else if (l->l_len < 0) {
390 fl->fl_end = start - 1;
391 start += l->l_len;
392 if (start < 0)
393 return -EINVAL;
394 }
395 fl->fl_start = start;
396 if (fl->fl_end < fl->fl_start)
397 return -EOVERFLOW;
398
399 fl->fl_owner = current->files;
400 fl->fl_pid = current->tgid;
401 fl->fl_file = filp;
402 fl->fl_flags = FL_POSIX;
403 fl->fl_ops = NULL;
404 fl->fl_lmops = NULL;
405
406 return assign_type(fl, l->l_type);
407}
408#endif
409
410
411static void lease_break_callback(struct file_lock *fl)
412{
413 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
414}
415
416static void lease_release_private_callback(struct file_lock *fl)
417{
418 if (!fl->fl_file)
419 return;
420
421 f_delown(fl->fl_file);
422 fl->fl_file->f_owner.signum = 0;
423}
424
425static const struct lock_manager_operations lease_manager_ops = {
426 .lm_break = lease_break_callback,
427 .lm_release_private = lease_release_private_callback,
428 .lm_change = lease_modify,
429};
430
431
432
433
434static int lease_init(struct file *filp, int type, struct file_lock *fl)
435 {
436 if (assign_type(fl, type) != 0)
437 return -EINVAL;
438
439 fl->fl_owner = current->files;
440 fl->fl_pid = current->tgid;
441
442 fl->fl_file = filp;
443 fl->fl_flags = FL_LEASE;
444 fl->fl_start = 0;
445 fl->fl_end = OFFSET_MAX;
446 fl->fl_ops = NULL;
447 fl->fl_lmops = &lease_manager_ops;
448 return 0;
449}
450
451
452static struct file_lock *lease_alloc(struct file *filp, int type)
453{
454 struct file_lock *fl = locks_alloc_lock();
455 int error = -ENOMEM;
456
457 if (fl == NULL)
458 return ERR_PTR(error);
459
460 error = lease_init(filp, type, fl);
461 if (error) {
462 locks_free_lock(fl);
463 return ERR_PTR(error);
464 }
465 return fl;
466}
467
468
469
470static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
471{
472 return ((fl1->fl_end >= fl2->fl_start) &&
473 (fl2->fl_end >= fl1->fl_start));
474}
475
476
477
478
479static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
480{
481 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
482 return fl2->fl_lmops == fl1->fl_lmops &&
483 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
484 return fl1->fl_owner == fl2->fl_owner;
485}
486
487
488
489
490static void __locks_delete_block(struct file_lock *waiter)
491{
492 list_del_init(&waiter->fl_block);
493 list_del_init(&waiter->fl_link);
494 waiter->fl_next = NULL;
495}
496
497
498
499static void locks_delete_block(struct file_lock *waiter)
500{
501 lock_flocks();
502 __locks_delete_block(waiter);
503 unlock_flocks();
504}
505
506
507
508
509
510
511static void locks_insert_block(struct file_lock *blocker,
512 struct file_lock *waiter)
513{
514 BUG_ON(!list_empty(&waiter->fl_block));
515 list_add_tail(&waiter->fl_block, &blocker->fl_block);
516 waiter->fl_next = blocker;
517 if (IS_POSIX(blocker))
518 list_add(&waiter->fl_link, &blocked_list);
519}
520
521
522
523
524
525static void locks_wake_up_blocks(struct file_lock *blocker)
526{
527 while (!list_empty(&blocker->fl_block)) {
528 struct file_lock *waiter;
529
530 waiter = list_first_entry(&blocker->fl_block,
531 struct file_lock, fl_block);
532 __locks_delete_block(waiter);
533 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
534 waiter->fl_lmops->lm_notify(waiter);
535 else
536 wake_up(&waiter->fl_wait);
537 }
538}
539
540
541
542
543static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
544{
545 list_add(&fl->fl_link, &file_lock_list);
546
547 fl->fl_nspid = get_pid(task_tgid(current));
548
549
550 fl->fl_next = *pos;
551 *pos = fl;
552}
553
554
555
556
557
558
559
560static void locks_delete_lock(struct file_lock **thisfl_p)
561{
562 struct file_lock *fl = *thisfl_p;
563
564 *thisfl_p = fl->fl_next;
565 fl->fl_next = NULL;
566 list_del_init(&fl->fl_link);
567
568 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
569 if (fl->fl_fasync != NULL) {
570 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
571 fl->fl_fasync = NULL;
572 }
573
574 if (fl->fl_nspid) {
575 put_pid(fl->fl_nspid);
576 fl->fl_nspid = NULL;
577 }
578
579 locks_wake_up_blocks(fl);
580 locks_free_lock(fl);
581}
582
583
584
585
586static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
587{
588 if (sys_fl->fl_type == F_WRLCK)
589 return 1;
590 if (caller_fl->fl_type == F_WRLCK)
591 return 1;
592 return 0;
593}
594
595
596
597
598static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
599{
600
601
602
603 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
604 return (0);
605
606
607 if (!locks_overlap(caller_fl, sys_fl))
608 return 0;
609
610 return (locks_conflict(caller_fl, sys_fl));
611}
612
613
614
615
616static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
617{
618
619
620
621 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
622 return (0);
623 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
624 return 0;
625
626 return (locks_conflict(caller_fl, sys_fl));
627}
628
629void
630posix_test_lock(struct file *filp, struct file_lock *fl)
631{
632 struct file_lock *cfl;
633
634 lock_flocks();
635 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
636 if (!IS_POSIX(cfl))
637 continue;
638 if (posix_locks_conflict(fl, cfl))
639 break;
640 }
641 if (cfl) {
642 __locks_copy_lock(fl, cfl);
643 if (cfl->fl_nspid)
644 fl->fl_pid = pid_vnr(cfl->fl_nspid);
645 } else
646 fl->fl_type = F_UNLCK;
647 unlock_flocks();
648 return;
649}
650EXPORT_SYMBOL(posix_test_lock);
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677#define MAX_DEADLK_ITERATIONS 10
678
679
680static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
681{
682 struct file_lock *fl;
683
684 list_for_each_entry(fl, &blocked_list, fl_link) {
685 if (posix_same_owner(fl, block_fl))
686 return fl->fl_next;
687 }
688 return NULL;
689}
690
691static int posix_locks_deadlock(struct file_lock *caller_fl,
692 struct file_lock *block_fl)
693{
694 int i = 0;
695
696 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
697 if (i++ > MAX_DEADLK_ITERATIONS)
698 return 0;
699 if (posix_same_owner(caller_fl, block_fl))
700 return 1;
701 }
702 return 0;
703}
704
705
706
707
708
709
710
711
712static int flock_lock_file(struct file *filp, struct file_lock *request)
713{
714 struct file_lock *new_fl = NULL;
715 struct file_lock **before;
716 struct inode * inode = filp->f_path.dentry->d_inode;
717 int error = 0;
718 int found = 0;
719
720 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
721 new_fl = locks_alloc_lock();
722 if (!new_fl)
723 return -ENOMEM;
724 }
725
726 lock_flocks();
727 if (request->fl_flags & FL_ACCESS)
728 goto find_conflict;
729
730 for_each_lock(inode, before) {
731 struct file_lock *fl = *before;
732 if (IS_POSIX(fl))
733 break;
734 if (IS_LEASE(fl))
735 continue;
736 if (filp != fl->fl_file)
737 continue;
738 if (request->fl_type == fl->fl_type)
739 goto out;
740 found = 1;
741 locks_delete_lock(before);
742 break;
743 }
744
745 if (request->fl_type == F_UNLCK) {
746 if ((request->fl_flags & FL_EXISTS) && !found)
747 error = -ENOENT;
748 goto out;
749 }
750
751
752
753
754
755 if (found) {
756 unlock_flocks();
757 cond_resched();
758 lock_flocks();
759 }
760
761find_conflict:
762 for_each_lock(inode, before) {
763 struct file_lock *fl = *before;
764 if (IS_POSIX(fl))
765 break;
766 if (IS_LEASE(fl))
767 continue;
768 if (!flock_locks_conflict(request, fl))
769 continue;
770 error = -EAGAIN;
771 if (!(request->fl_flags & FL_SLEEP))
772 goto out;
773 error = FILE_LOCK_DEFERRED;
774 locks_insert_block(fl, request);
775 goto out;
776 }
777 if (request->fl_flags & FL_ACCESS)
778 goto out;
779 locks_copy_lock(new_fl, request);
780 locks_insert_lock(before, new_fl);
781 new_fl = NULL;
782 error = 0;
783
784out:
785 unlock_flocks();
786 if (new_fl)
787 locks_free_lock(new_fl);
788 return error;
789}
790
791static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
792{
793 struct file_lock *fl;
794 struct file_lock *new_fl = NULL;
795 struct file_lock *new_fl2 = NULL;
796 struct file_lock *left = NULL;
797 struct file_lock *right = NULL;
798 struct file_lock **before;
799 int error, added = 0;
800
801
802
803
804
805
806
807 if (!(request->fl_flags & FL_ACCESS) &&
808 (request->fl_type != F_UNLCK ||
809 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
810 new_fl = locks_alloc_lock();
811 new_fl2 = locks_alloc_lock();
812 }
813
814 lock_flocks();
815 if (request->fl_type != F_UNLCK) {
816 for_each_lock(inode, before) {
817 fl = *before;
818 if (!IS_POSIX(fl))
819 continue;
820 if (!posix_locks_conflict(request, fl))
821 continue;
822 if (conflock)
823 __locks_copy_lock(conflock, fl);
824 error = -EAGAIN;
825 if (!(request->fl_flags & FL_SLEEP))
826 goto out;
827 error = -EDEADLK;
828 if (posix_locks_deadlock(request, fl))
829 goto out;
830 error = FILE_LOCK_DEFERRED;
831 locks_insert_block(fl, request);
832 goto out;
833 }
834 }
835
836
837 error = 0;
838 if (request->fl_flags & FL_ACCESS)
839 goto out;
840
841
842
843
844
845 before = &inode->i_flock;
846
847
848 while ((fl = *before) && (!IS_POSIX(fl) ||
849 !posix_same_owner(request, fl))) {
850 before = &fl->fl_next;
851 }
852
853
854 while ((fl = *before) && posix_same_owner(request, fl)) {
855
856
857 if (request->fl_type == fl->fl_type) {
858
859
860
861
862 if (fl->fl_end < request->fl_start - 1)
863 goto next_lock;
864
865
866
867 if (fl->fl_start - 1 > request->fl_end)
868 break;
869
870
871
872
873
874
875 if (fl->fl_start > request->fl_start)
876 fl->fl_start = request->fl_start;
877 else
878 request->fl_start = fl->fl_start;
879 if (fl->fl_end < request->fl_end)
880 fl->fl_end = request->fl_end;
881 else
882 request->fl_end = fl->fl_end;
883 if (added) {
884 locks_delete_lock(before);
885 continue;
886 }
887 request = fl;
888 added = 1;
889 }
890 else {
891
892
893
894 if (fl->fl_end < request->fl_start)
895 goto next_lock;
896 if (fl->fl_start > request->fl_end)
897 break;
898 if (request->fl_type == F_UNLCK)
899 added = 1;
900 if (fl->fl_start < request->fl_start)
901 left = fl;
902
903
904
905 if (fl->fl_end > request->fl_end) {
906 right = fl;
907 break;
908 }
909 if (fl->fl_start >= request->fl_start) {
910
911
912
913 if (added) {
914 locks_delete_lock(before);
915 continue;
916 }
917
918
919
920
921
922 locks_wake_up_blocks(fl);
923 fl->fl_start = request->fl_start;
924 fl->fl_end = request->fl_end;
925 fl->fl_type = request->fl_type;
926 locks_release_private(fl);
927 locks_copy_private(fl, request);
928 request = fl;
929 added = 1;
930 }
931 }
932
933
934 next_lock:
935 before = &fl->fl_next;
936 }
937
938
939
940
941
942
943
944 error = -ENOLCK;
945 if (right && left == right && !new_fl2)
946 goto out;
947
948 error = 0;
949 if (!added) {
950 if (request->fl_type == F_UNLCK) {
951 if (request->fl_flags & FL_EXISTS)
952 error = -ENOENT;
953 goto out;
954 }
955
956 if (!new_fl) {
957 error = -ENOLCK;
958 goto out;
959 }
960 locks_copy_lock(new_fl, request);
961 locks_insert_lock(before, new_fl);
962 new_fl = NULL;
963 }
964 if (right) {
965 if (left == right) {
966
967
968
969 left = new_fl2;
970 new_fl2 = NULL;
971 locks_copy_lock(left, right);
972 locks_insert_lock(before, left);
973 }
974 right->fl_start = request->fl_end + 1;
975 locks_wake_up_blocks(right);
976 }
977 if (left) {
978 left->fl_end = request->fl_start - 1;
979 locks_wake_up_blocks(left);
980 }
981 out:
982 unlock_flocks();
983
984
985
986 if (new_fl)
987 locks_free_lock(new_fl);
988 if (new_fl2)
989 locks_free_lock(new_fl2);
990 return error;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007int posix_lock_file(struct file *filp, struct file_lock *fl,
1008 struct file_lock *conflock)
1009{
1010 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1011}
1012EXPORT_SYMBOL(posix_lock_file);
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1024{
1025 int error;
1026 might_sleep ();
1027 for (;;) {
1028 error = posix_lock_file(filp, fl, NULL);
1029 if (error != FILE_LOCK_DEFERRED)
1030 break;
1031 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1032 if (!error)
1033 continue;
1034
1035 locks_delete_block(fl);
1036 break;
1037 }
1038 return error;
1039}
1040EXPORT_SYMBOL(posix_lock_file_wait);
1041
1042
1043
1044
1045
1046
1047
1048
1049int locks_mandatory_locked(struct inode *inode)
1050{
1051 fl_owner_t owner = current->files;
1052 struct file_lock *fl;
1053
1054
1055
1056
1057 lock_flocks();
1058 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1059 if (!IS_POSIX(fl))
1060 continue;
1061 if (fl->fl_owner != owner)
1062 break;
1063 }
1064 unlock_flocks();
1065 return fl ? -EAGAIN : 0;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081int locks_mandatory_area(int read_write, struct inode *inode,
1082 struct file *filp, loff_t offset,
1083 size_t count)
1084{
1085 struct file_lock fl;
1086 int error;
1087
1088 locks_init_lock(&fl);
1089 fl.fl_owner = current->files;
1090 fl.fl_pid = current->tgid;
1091 fl.fl_file = filp;
1092 fl.fl_flags = FL_POSIX | FL_ACCESS;
1093 if (filp && !(filp->f_flags & O_NONBLOCK))
1094 fl.fl_flags |= FL_SLEEP;
1095 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1096 fl.fl_start = offset;
1097 fl.fl_end = offset + count - 1;
1098
1099 for (;;) {
1100 error = __posix_lock_file(inode, &fl, NULL);
1101 if (error != FILE_LOCK_DEFERRED)
1102 break;
1103 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1104 if (!error) {
1105
1106
1107
1108
1109 if (__mandatory_lock(inode))
1110 continue;
1111 }
1112
1113 locks_delete_block(&fl);
1114 break;
1115 }
1116
1117 return error;
1118}
1119
1120EXPORT_SYMBOL(locks_mandatory_area);
1121
1122
1123int lease_modify(struct file_lock **before, int arg)
1124{
1125 struct file_lock *fl = *before;
1126 int error = assign_type(fl, arg);
1127
1128 if (error)
1129 return error;
1130 locks_wake_up_blocks(fl);
1131 if (arg == F_UNLCK)
1132 locks_delete_lock(before);
1133 return 0;
1134}
1135
1136EXPORT_SYMBOL(lease_modify);
1137
1138static void time_out_leases(struct inode *inode)
1139{
1140 struct file_lock **before;
1141 struct file_lock *fl;
1142
1143 before = &inode->i_flock;
1144 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1145 if ((fl->fl_break_time == 0)
1146 || time_before(jiffies, fl->fl_break_time)) {
1147 before = &fl->fl_next;
1148 continue;
1149 }
1150 lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1151 if (fl == *before)
1152 before = &fl->fl_next;
1153 }
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166int __break_lease(struct inode *inode, unsigned int mode)
1167{
1168 int error = 0, future;
1169 struct file_lock *new_fl, *flock;
1170 struct file_lock *fl;
1171 unsigned long break_time;
1172 int i_have_this_lease = 0;
1173 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1174
1175 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1176
1177 lock_flocks();
1178
1179 time_out_leases(inode);
1180
1181 flock = inode->i_flock;
1182 if ((flock == NULL) || !IS_LEASE(flock))
1183 goto out;
1184
1185 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1186 if (fl->fl_owner == current->files)
1187 i_have_this_lease = 1;
1188
1189 if (want_write) {
1190
1191 future = F_UNLCK | F_INPROGRESS;
1192 } else if (flock->fl_type & F_INPROGRESS) {
1193
1194 future = flock->fl_type;
1195 } else if (flock->fl_type & F_WRLCK) {
1196
1197 future = F_RDLCK | F_INPROGRESS;
1198 } else {
1199
1200 goto out;
1201 }
1202
1203 if (IS_ERR(new_fl) && !i_have_this_lease
1204 && ((mode & O_NONBLOCK) == 0)) {
1205 error = PTR_ERR(new_fl);
1206 goto out;
1207 }
1208
1209 break_time = 0;
1210 if (lease_break_time > 0) {
1211 break_time = jiffies + lease_break_time * HZ;
1212 if (break_time == 0)
1213 break_time++;
1214 }
1215
1216 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1217 if (fl->fl_type != future) {
1218 fl->fl_type = future;
1219 fl->fl_break_time = break_time;
1220
1221 fl->fl_lmops->lm_break(fl);
1222 }
1223 }
1224
1225 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1226 error = -EWOULDBLOCK;
1227 goto out;
1228 }
1229
1230restart:
1231 break_time = flock->fl_break_time;
1232 if (break_time != 0) {
1233 break_time -= jiffies;
1234 if (break_time == 0)
1235 break_time++;
1236 }
1237 locks_insert_block(flock, new_fl);
1238 unlock_flocks();
1239 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1240 !new_fl->fl_next, break_time);
1241 lock_flocks();
1242 __locks_delete_block(new_fl);
1243 if (error >= 0) {
1244 if (error == 0)
1245 time_out_leases(inode);
1246
1247 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1248 flock = flock->fl_next) {
1249 if (flock->fl_type & F_INPROGRESS)
1250 goto restart;
1251 }
1252 error = 0;
1253 }
1254
1255out:
1256 unlock_flocks();
1257 if (!IS_ERR(new_fl))
1258 locks_free_lock(new_fl);
1259 return error;
1260}
1261
1262EXPORT_SYMBOL(__break_lease);
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273void lease_get_mtime(struct inode *inode, struct timespec *time)
1274{
1275 struct file_lock *flock = inode->i_flock;
1276 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1277 *time = current_fs_time(inode->i_sb);
1278 else
1279 *time = inode->i_mtime;
1280}
1281
1282EXPORT_SYMBOL(lease_get_mtime);
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307int fcntl_getlease(struct file *filp)
1308{
1309 struct file_lock *fl;
1310 int type = F_UNLCK;
1311
1312 lock_flocks();
1313 time_out_leases(filp->f_path.dentry->d_inode);
1314 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1315 fl = fl->fl_next) {
1316 if (fl->fl_file == filp) {
1317 type = fl->fl_type & ~F_INPROGRESS;
1318 break;
1319 }
1320 }
1321 unlock_flocks();
1322 return type;
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1337{
1338 struct file_lock *fl, **before, **my_before = NULL, *lease;
1339 struct dentry *dentry = filp->f_path.dentry;
1340 struct inode *inode = dentry->d_inode;
1341 int error, rdlease_count = 0, wrlease_count = 0;
1342
1343 lease = *flp;
1344
1345 error = -EACCES;
1346 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1347 goto out;
1348 error = -EINVAL;
1349 if (!S_ISREG(inode->i_mode))
1350 goto out;
1351 error = security_file_lock(filp, arg);
1352 if (error)
1353 goto out;
1354
1355 time_out_leases(inode);
1356
1357 BUG_ON(!(*flp)->fl_lmops->lm_break);
1358
1359 if (arg != F_UNLCK) {
1360 error = -EAGAIN;
1361 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1362 goto out;
1363 if ((arg == F_WRLCK)
1364 && ((dentry->d_count > 1)
1365 || (atomic_read(&inode->i_count) > 1)))
1366 goto out;
1367 }
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 for (before = &inode->i_flock;
1378 ((fl = *before) != NULL) && IS_LEASE(fl);
1379 before = &fl->fl_next) {
1380 if (fl->fl_file == filp)
1381 my_before = before;
1382 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1383
1384
1385
1386
1387
1388 wrlease_count++;
1389 else
1390 rdlease_count++;
1391 }
1392
1393 error = -EAGAIN;
1394 if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1395 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1396 goto out;
1397
1398 if (my_before != NULL) {
1399 error = lease->fl_lmops->lm_change(my_before, arg);
1400 if (!error)
1401 *flp = *my_before;
1402 goto out;
1403 }
1404
1405 if (arg == F_UNLCK)
1406 goto out;
1407
1408 error = -EINVAL;
1409 if (!leases_enable)
1410 goto out;
1411
1412 locks_insert_lock(before, lease);
1413 return 0;
1414
1415out:
1416 return error;
1417}
1418EXPORT_SYMBOL(generic_setlease);
1419
1420static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1421{
1422 if (filp->f_op && filp->f_op->setlease)
1423 return filp->f_op->setlease(filp, arg, lease);
1424 else
1425 return generic_setlease(filp, arg, lease);
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1456{
1457 int error;
1458
1459 lock_flocks();
1460 error = __vfs_setlease(filp, arg, lease);
1461 unlock_flocks();
1462
1463 return error;
1464}
1465EXPORT_SYMBOL_GPL(vfs_setlease);
1466
1467static int do_fcntl_delete_lease(struct file *filp)
1468{
1469 struct file_lock fl, *flp = &fl;
1470
1471 lease_init(filp, F_UNLCK, flp);
1472
1473 return vfs_setlease(filp, F_UNLCK, &flp);
1474}
1475
1476static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1477{
1478 struct file_lock *fl, *ret;
1479 struct fasync_struct *new;
1480 int error;
1481
1482 fl = lease_alloc(filp, arg);
1483 if (IS_ERR(fl))
1484 return PTR_ERR(fl);
1485
1486 new = fasync_alloc();
1487 if (!new) {
1488 locks_free_lock(fl);
1489 return -ENOMEM;
1490 }
1491 ret = fl;
1492 lock_flocks();
1493 error = __vfs_setlease(filp, arg, &ret);
1494 if (error) {
1495 unlock_flocks();
1496 locks_free_lock(fl);
1497 goto out_free_fasync;
1498 }
1499 if (ret != fl)
1500 locks_free_lock(fl);
1501
1502
1503
1504
1505
1506
1507
1508 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1509 new = NULL;
1510
1511 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1512 unlock_flocks();
1513
1514out_free_fasync:
1515 if (new)
1516 fasync_free(new);
1517 return error;
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1531{
1532 if (arg == F_UNLCK)
1533 return do_fcntl_delete_lease(filp);
1534 return do_fcntl_add_lease(fd, filp, arg);
1535}
1536
1537
1538
1539
1540
1541
1542
1543
1544int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1545{
1546 int error;
1547 might_sleep();
1548 for (;;) {
1549 error = flock_lock_file(filp, fl);
1550 if (error != FILE_LOCK_DEFERRED)
1551 break;
1552 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1553 if (!error)
1554 continue;
1555
1556 locks_delete_block(fl);
1557 break;
1558 }
1559 return error;
1560}
1561
1562EXPORT_SYMBOL(flock_lock_file_wait);
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1584{
1585 struct file *filp;
1586 struct file_lock *lock;
1587 int can_sleep, unlock;
1588 int error;
1589
1590 error = -EBADF;
1591 filp = fget(fd);
1592 if (!filp)
1593 goto out;
1594
1595 can_sleep = !(cmd & LOCK_NB);
1596 cmd &= ~LOCK_NB;
1597 unlock = (cmd == LOCK_UN);
1598
1599 if (!unlock && !(cmd & LOCK_MAND) &&
1600 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1601 goto out_putf;
1602
1603 error = flock_make_lock(filp, &lock, cmd);
1604 if (error)
1605 goto out_putf;
1606 if (can_sleep)
1607 lock->fl_flags |= FL_SLEEP;
1608
1609 error = security_file_lock(filp, lock->fl_type);
1610 if (error)
1611 goto out_free;
1612
1613 if (filp->f_op && filp->f_op->flock)
1614 error = filp->f_op->flock(filp,
1615 (can_sleep) ? F_SETLKW : F_SETLK,
1616 lock);
1617 else
1618 error = flock_lock_file_wait(filp, lock);
1619
1620 out_free:
1621 locks_free_lock(lock);
1622
1623 out_putf:
1624 fput(filp);
1625 out:
1626 return error;
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637int vfs_test_lock(struct file *filp, struct file_lock *fl)
1638{
1639 if (filp->f_op && filp->f_op->lock)
1640 return filp->f_op->lock(filp, F_GETLK, fl);
1641 posix_test_lock(filp, fl);
1642 return 0;
1643}
1644EXPORT_SYMBOL_GPL(vfs_test_lock);
1645
1646static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1647{
1648 flock->l_pid = fl->fl_pid;
1649#if BITS_PER_LONG == 32
1650
1651
1652
1653
1654 if (fl->fl_start > OFFT_OFFSET_MAX)
1655 return -EOVERFLOW;
1656 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1657 return -EOVERFLOW;
1658#endif
1659 flock->l_start = fl->fl_start;
1660 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1661 fl->fl_end - fl->fl_start + 1;
1662 flock->l_whence = 0;
1663 flock->l_type = fl->fl_type;
1664 return 0;
1665}
1666
1667#if BITS_PER_LONG == 32
1668static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1669{
1670 flock->l_pid = fl->fl_pid;
1671 flock->l_start = fl->fl_start;
1672 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1673 fl->fl_end - fl->fl_start + 1;
1674 flock->l_whence = 0;
1675 flock->l_type = fl->fl_type;
1676}
1677#endif
1678
1679
1680
1681
1682int fcntl_getlk(struct file *filp, struct flock __user *l)
1683{
1684 struct file_lock file_lock;
1685 struct flock flock;
1686 int error;
1687
1688 error = -EFAULT;
1689 if (copy_from_user(&flock, l, sizeof(flock)))
1690 goto out;
1691 error = -EINVAL;
1692 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1693 goto out;
1694
1695 error = flock_to_posix_lock(filp, &file_lock, &flock);
1696 if (error)
1697 goto out;
1698
1699 error = vfs_test_lock(filp, &file_lock);
1700 if (error)
1701 goto out;
1702
1703 flock.l_type = file_lock.fl_type;
1704 if (file_lock.fl_type != F_UNLCK) {
1705 error = posix_lock_to_flock(&flock, &file_lock);
1706 if (error)
1707 goto out;
1708 }
1709 error = -EFAULT;
1710 if (!copy_to_user(l, &flock, sizeof(flock)))
1711 error = 0;
1712out:
1713 return error;
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1750{
1751 if (filp->f_op && filp->f_op->lock)
1752 return filp->f_op->lock(filp, cmd, fl);
1753 else
1754 return posix_lock_file(filp, fl, conf);
1755}
1756EXPORT_SYMBOL_GPL(vfs_lock_file);
1757
1758static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1759 struct file_lock *fl)
1760{
1761 int error;
1762
1763 error = security_file_lock(filp, fl->fl_type);
1764 if (error)
1765 return error;
1766
1767 for (;;) {
1768 error = vfs_lock_file(filp, cmd, fl, NULL);
1769 if (error != FILE_LOCK_DEFERRED)
1770 break;
1771 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1772 if (!error)
1773 continue;
1774
1775 locks_delete_block(fl);
1776 break;
1777 }
1778
1779 return error;
1780}
1781
1782
1783
1784
1785int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1786 struct flock __user *l)
1787{
1788 struct file_lock *file_lock = locks_alloc_lock();
1789 struct flock flock;
1790 struct inode *inode;
1791 struct file *f;
1792 int error;
1793
1794 if (file_lock == NULL)
1795 return -ENOLCK;
1796
1797
1798
1799
1800 error = -EFAULT;
1801 if (copy_from_user(&flock, l, sizeof(flock)))
1802 goto out;
1803
1804 inode = filp->f_path.dentry->d_inode;
1805
1806
1807
1808
1809 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1810 error = -EAGAIN;
1811 goto out;
1812 }
1813
1814again:
1815 error = flock_to_posix_lock(filp, file_lock, &flock);
1816 if (error)
1817 goto out;
1818 if (cmd == F_SETLKW) {
1819 file_lock->fl_flags |= FL_SLEEP;
1820 }
1821
1822 error = -EBADF;
1823 switch (flock.l_type) {
1824 case F_RDLCK:
1825 if (!(filp->f_mode & FMODE_READ))
1826 goto out;
1827 break;
1828 case F_WRLCK:
1829 if (!(filp->f_mode & FMODE_WRITE))
1830 goto out;
1831 break;
1832 case F_UNLCK:
1833 break;
1834 default:
1835 error = -EINVAL;
1836 goto out;
1837 }
1838
1839 error = do_lock_file_wait(filp, cmd, file_lock);
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850 spin_lock(¤t->files->file_lock);
1851 f = fcheck(fd);
1852 spin_unlock(¤t->files->file_lock);
1853 if (!error && f != filp && flock.l_type != F_UNLCK) {
1854 flock.l_type = F_UNLCK;
1855 goto again;
1856 }
1857
1858out:
1859 locks_free_lock(file_lock);
1860 return error;
1861}
1862
1863#if BITS_PER_LONG == 32
1864
1865
1866
1867int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1868{
1869 struct file_lock file_lock;
1870 struct flock64 flock;
1871 int error;
1872
1873 error = -EFAULT;
1874 if (copy_from_user(&flock, l, sizeof(flock)))
1875 goto out;
1876 error = -EINVAL;
1877 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1878 goto out;
1879
1880 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1881 if (error)
1882 goto out;
1883
1884 error = vfs_test_lock(filp, &file_lock);
1885 if (error)
1886 goto out;
1887
1888 flock.l_type = file_lock.fl_type;
1889 if (file_lock.fl_type != F_UNLCK)
1890 posix_lock_to_flock64(&flock, &file_lock);
1891
1892 error = -EFAULT;
1893 if (!copy_to_user(l, &flock, sizeof(flock)))
1894 error = 0;
1895
1896out:
1897 return error;
1898}
1899
1900
1901
1902
1903int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1904 struct flock64 __user *l)
1905{
1906 struct file_lock *file_lock = locks_alloc_lock();
1907 struct flock64 flock;
1908 struct inode *inode;
1909 struct file *f;
1910 int error;
1911
1912 if (file_lock == NULL)
1913 return -ENOLCK;
1914
1915
1916
1917
1918 error = -EFAULT;
1919 if (copy_from_user(&flock, l, sizeof(flock)))
1920 goto out;
1921
1922 inode = filp->f_path.dentry->d_inode;
1923
1924
1925
1926
1927 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1928 error = -EAGAIN;
1929 goto out;
1930 }
1931
1932again:
1933 error = flock64_to_posix_lock(filp, file_lock, &flock);
1934 if (error)
1935 goto out;
1936 if (cmd == F_SETLKW64) {
1937 file_lock->fl_flags |= FL_SLEEP;
1938 }
1939
1940 error = -EBADF;
1941 switch (flock.l_type) {
1942 case F_RDLCK:
1943 if (!(filp->f_mode & FMODE_READ))
1944 goto out;
1945 break;
1946 case F_WRLCK:
1947 if (!(filp->f_mode & FMODE_WRITE))
1948 goto out;
1949 break;
1950 case F_UNLCK:
1951 break;
1952 default:
1953 error = -EINVAL;
1954 goto out;
1955 }
1956
1957 error = do_lock_file_wait(filp, cmd, file_lock);
1958
1959
1960
1961
1962
1963 spin_lock(¤t->files->file_lock);
1964 f = fcheck(fd);
1965 spin_unlock(¤t->files->file_lock);
1966 if (!error && f != filp && flock.l_type != F_UNLCK) {
1967 flock.l_type = F_UNLCK;
1968 goto again;
1969 }
1970
1971out:
1972 locks_free_lock(file_lock);
1973 return error;
1974}
1975#endif
1976
1977
1978
1979
1980
1981
1982void locks_remove_posix(struct file *filp, fl_owner_t owner)
1983{
1984 struct file_lock lock;
1985
1986
1987
1988
1989
1990
1991 if (!filp->f_path.dentry->d_inode->i_flock)
1992 return;
1993
1994 lock.fl_type = F_UNLCK;
1995 lock.fl_flags = FL_POSIX | FL_CLOSE;
1996 lock.fl_start = 0;
1997 lock.fl_end = OFFSET_MAX;
1998 lock.fl_owner = owner;
1999 lock.fl_pid = current->tgid;
2000 lock.fl_file = filp;
2001 lock.fl_ops = NULL;
2002 lock.fl_lmops = NULL;
2003
2004 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2005
2006 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2007 lock.fl_ops->fl_release_private(&lock);
2008}
2009
2010EXPORT_SYMBOL(locks_remove_posix);
2011
2012
2013
2014
2015void locks_remove_flock(struct file *filp)
2016{
2017 struct inode * inode = filp->f_path.dentry->d_inode;
2018 struct file_lock *fl;
2019 struct file_lock **before;
2020
2021 if (!inode->i_flock)
2022 return;
2023
2024 if (filp->f_op && filp->f_op->flock) {
2025 struct file_lock fl = {
2026 .fl_pid = current->tgid,
2027 .fl_file = filp,
2028 .fl_flags = FL_FLOCK,
2029 .fl_type = F_UNLCK,
2030 .fl_end = OFFSET_MAX,
2031 };
2032 filp->f_op->flock(filp, F_SETLKW, &fl);
2033 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2034 fl.fl_ops->fl_release_private(&fl);
2035 }
2036
2037 lock_flocks();
2038 before = &inode->i_flock;
2039
2040 while ((fl = *before) != NULL) {
2041 if (fl->fl_file == filp) {
2042 if (IS_FLOCK(fl)) {
2043 locks_delete_lock(before);
2044 continue;
2045 }
2046 if (IS_LEASE(fl)) {
2047 lease_modify(before, F_UNLCK);
2048 continue;
2049 }
2050
2051 BUG();
2052 }
2053 before = &fl->fl_next;
2054 }
2055 unlock_flocks();
2056}
2057
2058
2059
2060
2061
2062
2063
2064
2065int
2066posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2067{
2068 int status = 0;
2069
2070 lock_flocks();
2071 if (waiter->fl_next)
2072 __locks_delete_block(waiter);
2073 else
2074 status = -ENOENT;
2075 unlock_flocks();
2076 return status;
2077}
2078
2079EXPORT_SYMBOL(posix_unblock_lock);
2080
2081
2082
2083
2084
2085
2086
2087
2088int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2089{
2090 if (filp->f_op && filp->f_op->lock)
2091 return filp->f_op->lock(filp, F_CANCELLK, fl);
2092 return 0;
2093}
2094
2095EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2096
2097#ifdef CONFIG_PROC_FS
2098#include <linux/proc_fs.h>
2099#include <linux/seq_file.h>
2100
2101static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2102 loff_t id, char *pfx)
2103{
2104 struct inode *inode = NULL;
2105 unsigned int fl_pid;
2106
2107 if (fl->fl_nspid)
2108 fl_pid = pid_vnr(fl->fl_nspid);
2109 else
2110 fl_pid = fl->fl_pid;
2111
2112 if (fl->fl_file != NULL)
2113 inode = fl->fl_file->f_path.dentry->d_inode;
2114
2115 seq_printf(f, "%lld:%s ", id, pfx);
2116 if (IS_POSIX(fl)) {
2117 seq_printf(f, "%6s %s ",
2118 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2119 (inode == NULL) ? "*NOINODE*" :
2120 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2121 } else if (IS_FLOCK(fl)) {
2122 if (fl->fl_type & LOCK_MAND) {
2123 seq_printf(f, "FLOCK MSNFS ");
2124 } else {
2125 seq_printf(f, "FLOCK ADVISORY ");
2126 }
2127 } else if (IS_LEASE(fl)) {
2128 seq_printf(f, "LEASE ");
2129 if (fl->fl_type & F_INPROGRESS)
2130 seq_printf(f, "BREAKING ");
2131 else if (fl->fl_file)
2132 seq_printf(f, "ACTIVE ");
2133 else
2134 seq_printf(f, "BREAKER ");
2135 } else {
2136 seq_printf(f, "UNKNOWN UNKNOWN ");
2137 }
2138 if (fl->fl_type & LOCK_MAND) {
2139 seq_printf(f, "%s ",
2140 (fl->fl_type & LOCK_READ)
2141 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2142 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2143 } else {
2144 seq_printf(f, "%s ",
2145 (fl->fl_type & F_INPROGRESS)
2146 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2147 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2148 }
2149 if (inode) {
2150#ifdef WE_CAN_BREAK_LSLK_NOW
2151 seq_printf(f, "%d %s:%ld ", fl_pid,
2152 inode->i_sb->s_id, inode->i_ino);
2153#else
2154
2155 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2156 MAJOR(inode->i_sb->s_dev),
2157 MINOR(inode->i_sb->s_dev), inode->i_ino);
2158#endif
2159 } else {
2160 seq_printf(f, "%d <none>:0 ", fl_pid);
2161 }
2162 if (IS_POSIX(fl)) {
2163 if (fl->fl_end == OFFSET_MAX)
2164 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2165 else
2166 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2167 } else {
2168 seq_printf(f, "0 EOF\n");
2169 }
2170}
2171
2172static int locks_show(struct seq_file *f, void *v)
2173{
2174 struct file_lock *fl, *bfl;
2175
2176 fl = list_entry(v, struct file_lock, fl_link);
2177
2178 lock_get_status(f, fl, *((loff_t *)f->private), "");
2179
2180 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2181 lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2182
2183 return 0;
2184}
2185
2186static void *locks_start(struct seq_file *f, loff_t *pos)
2187{
2188 loff_t *p = f->private;
2189
2190 lock_flocks();
2191 *p = (*pos + 1);
2192 return seq_list_start(&file_lock_list, *pos);
2193}
2194
2195static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2196{
2197 loff_t *p = f->private;
2198 ++*p;
2199 return seq_list_next(v, &file_lock_list, pos);
2200}
2201
2202static void locks_stop(struct seq_file *f, void *v)
2203{
2204 unlock_flocks();
2205}
2206
2207static const struct seq_operations locks_seq_operations = {
2208 .start = locks_start,
2209 .next = locks_next,
2210 .stop = locks_stop,
2211 .show = locks_show,
2212};
2213
2214static int locks_open(struct inode *inode, struct file *filp)
2215{
2216 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2217}
2218
2219static const struct file_operations proc_locks_operations = {
2220 .open = locks_open,
2221 .read = seq_read,
2222 .llseek = seq_lseek,
2223 .release = seq_release_private,
2224};
2225
2226static int __init proc_locks_init(void)
2227{
2228 proc_create("locks", 0, NULL, &proc_locks_operations);
2229 return 0;
2230}
2231module_init(proc_locks_init);
2232#endif
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2248{
2249 struct file_lock *fl;
2250 int result = 1;
2251 lock_flocks();
2252 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2253 if (IS_POSIX(fl)) {
2254 if (fl->fl_type == F_RDLCK)
2255 continue;
2256 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2257 continue;
2258 } else if (IS_FLOCK(fl)) {
2259 if (!(fl->fl_type & LOCK_MAND))
2260 continue;
2261 if (fl->fl_type & LOCK_READ)
2262 continue;
2263 } else
2264 continue;
2265 result = 0;
2266 break;
2267 }
2268 unlock_flocks();
2269 return result;
2270}
2271
2272EXPORT_SYMBOL(lock_may_read);
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2288{
2289 struct file_lock *fl;
2290 int result = 1;
2291 lock_flocks();
2292 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2293 if (IS_POSIX(fl)) {
2294 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2295 continue;
2296 } else if (IS_FLOCK(fl)) {
2297 if (!(fl->fl_type & LOCK_MAND))
2298 continue;
2299 if (fl->fl_type & LOCK_WRITE)
2300 continue;
2301 } else
2302 continue;
2303 result = 0;
2304 break;
2305 }
2306 unlock_flocks();
2307 return result;
2308}
2309
2310EXPORT_SYMBOL(lock_may_write);
2311
2312static int __init filelock_init(void)
2313{
2314 filelock_cache = kmem_cache_create("file_lock_cache",
2315 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2316
2317 return 0;
2318}
2319
2320core_initcall(filelock_init);
2321