1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/syscalls.h>
126#include <linux/time.h>
127#include <linux/rcupdate.h>
128#include <linux/pid_namespace.h>
129
130#include <asm/uaccess.h>
131
132#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
135
136static bool lease_breaking(struct file_lock *fl)
137{
138 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
139}
140
141static int target_leasetype(struct file_lock *fl)
142{
143 if (fl->fl_flags & FL_UNLOCK_PENDING)
144 return F_UNLCK;
145 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
146 return F_RDLCK;
147 return fl->fl_type;
148}
149
150int leases_enable = 1;
151int lease_break_time = 45;
152
153#define for_each_lock(inode, lockp) \
154 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
155
156static LIST_HEAD(file_lock_list);
157static LIST_HEAD(blocked_list);
158static DEFINE_SPINLOCK(file_lock_lock);
159
160
161
162
163void lock_flocks(void)
164{
165 spin_lock(&file_lock_lock);
166}
167EXPORT_SYMBOL_GPL(lock_flocks);
168
169void unlock_flocks(void)
170{
171 spin_unlock(&file_lock_lock);
172}
173EXPORT_SYMBOL_GPL(unlock_flocks);
174
175static struct kmem_cache *filelock_cache __read_mostly;
176
177static void locks_init_lock_heads(struct file_lock *fl)
178{
179 INIT_LIST_HEAD(&fl->fl_link);
180 INIT_LIST_HEAD(&fl->fl_block);
181 init_waitqueue_head(&fl->fl_wait);
182}
183
184
185struct file_lock *locks_alloc_lock(void)
186{
187 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
188
189 if (fl)
190 locks_init_lock_heads(fl);
191
192 return fl;
193}
194EXPORT_SYMBOL_GPL(locks_alloc_lock);
195
196void locks_release_private(struct file_lock *fl)
197{
198 if (fl->fl_ops) {
199 if (fl->fl_ops->fl_release_private)
200 fl->fl_ops->fl_release_private(fl);
201 fl->fl_ops = NULL;
202 }
203 fl->fl_lmops = NULL;
204
205}
206EXPORT_SYMBOL_GPL(locks_release_private);
207
208
209void locks_free_lock(struct file_lock *fl)
210{
211 BUG_ON(waitqueue_active(&fl->fl_wait));
212 BUG_ON(!list_empty(&fl->fl_block));
213 BUG_ON(!list_empty(&fl->fl_link));
214
215 locks_release_private(fl);
216 kmem_cache_free(filelock_cache, fl);
217}
218EXPORT_SYMBOL(locks_free_lock);
219
220void locks_init_lock(struct file_lock *fl)
221{
222 memset(fl, 0, sizeof(struct file_lock));
223 locks_init_lock_heads(fl);
224}
225
226EXPORT_SYMBOL(locks_init_lock);
227
228static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
229{
230 if (fl->fl_ops) {
231 if (fl->fl_ops->fl_copy_lock)
232 fl->fl_ops->fl_copy_lock(new, fl);
233 new->fl_ops = fl->fl_ops;
234 }
235 if (fl->fl_lmops)
236 new->fl_lmops = fl->fl_lmops;
237}
238
239
240
241
242void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
243{
244 new->fl_owner = fl->fl_owner;
245 new->fl_pid = fl->fl_pid;
246 new->fl_file = NULL;
247 new->fl_flags = fl->fl_flags;
248 new->fl_type = fl->fl_type;
249 new->fl_start = fl->fl_start;
250 new->fl_end = fl->fl_end;
251 new->fl_ops = NULL;
252 new->fl_lmops = NULL;
253}
254EXPORT_SYMBOL(__locks_copy_lock);
255
256void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
257{
258 locks_release_private(new);
259
260 __locks_copy_lock(new, fl);
261 new->fl_file = fl->fl_file;
262 new->fl_ops = fl->fl_ops;
263 new->fl_lmops = fl->fl_lmops;
264
265 locks_copy_private(new, fl);
266}
267
268EXPORT_SYMBOL(locks_copy_lock);
269
270static inline int flock_translate_cmd(int cmd) {
271 if (cmd & LOCK_MAND)
272 return cmd & (LOCK_MAND | LOCK_RW);
273 switch (cmd) {
274 case LOCK_SH:
275 return F_RDLCK;
276 case LOCK_EX:
277 return F_WRLCK;
278 case LOCK_UN:
279 return F_UNLCK;
280 }
281 return -EINVAL;
282}
283
284
285static int flock_make_lock(struct file *filp, struct file_lock **lock,
286 unsigned int cmd)
287{
288 struct file_lock *fl;
289 int type = flock_translate_cmd(cmd);
290 if (type < 0)
291 return type;
292
293 fl = locks_alloc_lock();
294 if (fl == NULL)
295 return -ENOMEM;
296
297 fl->fl_file = filp;
298 fl->fl_pid = current->tgid;
299 fl->fl_flags = FL_FLOCK;
300 fl->fl_type = type;
301 fl->fl_end = OFFSET_MAX;
302
303 *lock = fl;
304 return 0;
305}
306
307static int assign_type(struct file_lock *fl, long type)
308{
309 switch (type) {
310 case F_RDLCK:
311 case F_WRLCK:
312 case F_UNLCK:
313 fl->fl_type = type;
314 break;
315 default:
316 return -EINVAL;
317 }
318 return 0;
319}
320
321
322
323
324static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
325 struct flock *l)
326{
327 off_t start, end;
328
329 switch (l->l_whence) {
330 case SEEK_SET:
331 start = 0;
332 break;
333 case SEEK_CUR:
334 start = filp->f_pos;
335 break;
336 case SEEK_END:
337 start = i_size_read(filp->f_path.dentry->d_inode);
338 break;
339 default:
340 return -EINVAL;
341 }
342
343
344
345 start += l->l_start;
346 if (start < 0)
347 return -EINVAL;
348 fl->fl_end = OFFSET_MAX;
349 if (l->l_len > 0) {
350 end = start + l->l_len - 1;
351 fl->fl_end = end;
352 } else if (l->l_len < 0) {
353 end = start - 1;
354 fl->fl_end = end;
355 start += l->l_len;
356 if (start < 0)
357 return -EINVAL;
358 }
359 fl->fl_start = start;
360 if (fl->fl_end < fl->fl_start)
361 return -EOVERFLOW;
362
363 fl->fl_owner = current->files;
364 fl->fl_pid = current->tgid;
365 fl->fl_file = filp;
366 fl->fl_flags = FL_POSIX;
367 fl->fl_ops = NULL;
368 fl->fl_lmops = NULL;
369
370 return assign_type(fl, l->l_type);
371}
372
373#if BITS_PER_LONG == 32
374static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
375 struct flock64 *l)
376{
377 loff_t start;
378
379 switch (l->l_whence) {
380 case SEEK_SET:
381 start = 0;
382 break;
383 case SEEK_CUR:
384 start = filp->f_pos;
385 break;
386 case SEEK_END:
387 start = i_size_read(filp->f_path.dentry->d_inode);
388 break;
389 default:
390 return -EINVAL;
391 }
392
393 start += l->l_start;
394 if (start < 0)
395 return -EINVAL;
396 fl->fl_end = OFFSET_MAX;
397 if (l->l_len > 0) {
398 fl->fl_end = start + l->l_len - 1;
399 } else if (l->l_len < 0) {
400 fl->fl_end = start - 1;
401 start += l->l_len;
402 if (start < 0)
403 return -EINVAL;
404 }
405 fl->fl_start = start;
406 if (fl->fl_end < fl->fl_start)
407 return -EOVERFLOW;
408
409 fl->fl_owner = current->files;
410 fl->fl_pid = current->tgid;
411 fl->fl_file = filp;
412 fl->fl_flags = FL_POSIX;
413 fl->fl_ops = NULL;
414 fl->fl_lmops = NULL;
415
416 return assign_type(fl, l->l_type);
417}
418#endif
419
420
421static void lease_break_callback(struct file_lock *fl)
422{
423 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
424}
425
426static const struct lock_manager_operations lease_manager_ops = {
427 .lm_break = lease_break_callback,
428 .lm_change = lease_modify,
429};
430
431
432
433
434static int lease_init(struct file *filp, long type, struct file_lock *fl)
435 {
436 if (assign_type(fl, type) != 0)
437 return -EINVAL;
438
439 fl->fl_owner = current->files;
440 fl->fl_pid = current->tgid;
441
442 fl->fl_file = filp;
443 fl->fl_flags = FL_LEASE;
444 fl->fl_start = 0;
445 fl->fl_end = OFFSET_MAX;
446 fl->fl_ops = NULL;
447 fl->fl_lmops = &lease_manager_ops;
448 return 0;
449}
450
451
452static struct file_lock *lease_alloc(struct file *filp, long type)
453{
454 struct file_lock *fl = locks_alloc_lock();
455 int error = -ENOMEM;
456
457 if (fl == NULL)
458 return ERR_PTR(error);
459
460 error = lease_init(filp, type, fl);
461 if (error) {
462 locks_free_lock(fl);
463 return ERR_PTR(error);
464 }
465 return fl;
466}
467
468
469
470static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
471{
472 return ((fl1->fl_end >= fl2->fl_start) &&
473 (fl2->fl_end >= fl1->fl_start));
474}
475
476
477
478
479static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
480{
481 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
482 return fl2->fl_lmops == fl1->fl_lmops &&
483 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
484 return fl1->fl_owner == fl2->fl_owner;
485}
486
487
488
489
490static void __locks_delete_block(struct file_lock *waiter)
491{
492 list_del_init(&waiter->fl_block);
493 list_del_init(&waiter->fl_link);
494 waiter->fl_next = NULL;
495}
496
497
498
499void locks_delete_block(struct file_lock *waiter)
500{
501 lock_flocks();
502 __locks_delete_block(waiter);
503 unlock_flocks();
504}
505EXPORT_SYMBOL(locks_delete_block);
506
507
508
509
510
511
512static void locks_insert_block(struct file_lock *blocker,
513 struct file_lock *waiter)
514{
515 BUG_ON(!list_empty(&waiter->fl_block));
516 list_add_tail(&waiter->fl_block, &blocker->fl_block);
517 waiter->fl_next = blocker;
518 if (IS_POSIX(blocker))
519 list_add(&waiter->fl_link, &blocked_list);
520}
521
522
523
524
525
526static void locks_wake_up_blocks(struct file_lock *blocker)
527{
528 while (!list_empty(&blocker->fl_block)) {
529 struct file_lock *waiter;
530
531 waiter = list_first_entry(&blocker->fl_block,
532 struct file_lock, fl_block);
533 __locks_delete_block(waiter);
534 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
535 waiter->fl_lmops->lm_notify(waiter);
536 else
537 wake_up(&waiter->fl_wait);
538 }
539}
540
541
542
543
544static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
545{
546 list_add(&fl->fl_link, &file_lock_list);
547
548 fl->fl_nspid = get_pid(task_tgid(current));
549
550
551 fl->fl_next = *pos;
552 *pos = fl;
553}
554
555
556
557
558
559
560
561static void locks_delete_lock(struct file_lock **thisfl_p)
562{
563 struct file_lock *fl = *thisfl_p;
564
565 *thisfl_p = fl->fl_next;
566 fl->fl_next = NULL;
567 list_del_init(&fl->fl_link);
568
569 if (fl->fl_nspid) {
570 put_pid(fl->fl_nspid);
571 fl->fl_nspid = NULL;
572 }
573
574 locks_wake_up_blocks(fl);
575 locks_free_lock(fl);
576}
577
578
579
580
581static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
582{
583 if (sys_fl->fl_type == F_WRLCK)
584 return 1;
585 if (caller_fl->fl_type == F_WRLCK)
586 return 1;
587 return 0;
588}
589
590
591
592
593static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
594{
595
596
597
598 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
599 return (0);
600
601
602 if (!locks_overlap(caller_fl, sys_fl))
603 return 0;
604
605 return (locks_conflict(caller_fl, sys_fl));
606}
607
608
609
610
611static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
612{
613
614
615
616 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
617 return (0);
618 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
619 return 0;
620
621 return (locks_conflict(caller_fl, sys_fl));
622}
623
624void
625posix_test_lock(struct file *filp, struct file_lock *fl)
626{
627 struct file_lock *cfl;
628
629 lock_flocks();
630 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
631 if (!IS_POSIX(cfl))
632 continue;
633 if (posix_locks_conflict(fl, cfl))
634 break;
635 }
636 if (cfl) {
637 __locks_copy_lock(fl, cfl);
638 if (cfl->fl_nspid)
639 fl->fl_pid = pid_vnr(cfl->fl_nspid);
640 } else
641 fl->fl_type = F_UNLCK;
642 unlock_flocks();
643 return;
644}
645EXPORT_SYMBOL(posix_test_lock);
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672#define MAX_DEADLK_ITERATIONS 10
673
674
675static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
676{
677 struct file_lock *fl;
678
679 list_for_each_entry(fl, &blocked_list, fl_link) {
680 if (posix_same_owner(fl, block_fl))
681 return fl->fl_next;
682 }
683 return NULL;
684}
685
686static int posix_locks_deadlock(struct file_lock *caller_fl,
687 struct file_lock *block_fl)
688{
689 int i = 0;
690
691 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
692 if (i++ > MAX_DEADLK_ITERATIONS)
693 return 0;
694 if (posix_same_owner(caller_fl, block_fl))
695 return 1;
696 }
697 return 0;
698}
699
700
701
702
703
704
705
706
707static int flock_lock_file(struct file *filp, struct file_lock *request)
708{
709 struct file_lock *new_fl = NULL;
710 struct file_lock **before;
711 struct inode * inode = filp->f_path.dentry->d_inode;
712 int error = 0;
713 int found = 0;
714
715 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
716 new_fl = locks_alloc_lock();
717 if (!new_fl)
718 return -ENOMEM;
719 }
720
721 lock_flocks();
722 if (request->fl_flags & FL_ACCESS)
723 goto find_conflict;
724
725 for_each_lock(inode, before) {
726 struct file_lock *fl = *before;
727 if (IS_POSIX(fl))
728 break;
729 if (IS_LEASE(fl))
730 continue;
731 if (filp != fl->fl_file)
732 continue;
733 if (request->fl_type == fl->fl_type)
734 goto out;
735 found = 1;
736 locks_delete_lock(before);
737 break;
738 }
739
740 if (request->fl_type == F_UNLCK) {
741 if ((request->fl_flags & FL_EXISTS) && !found)
742 error = -ENOENT;
743 goto out;
744 }
745
746
747
748
749
750 if (found) {
751 unlock_flocks();
752 cond_resched();
753 lock_flocks();
754 }
755
756find_conflict:
757 for_each_lock(inode, before) {
758 struct file_lock *fl = *before;
759 if (IS_POSIX(fl))
760 break;
761 if (IS_LEASE(fl))
762 continue;
763 if (!flock_locks_conflict(request, fl))
764 continue;
765 error = -EAGAIN;
766 if (!(request->fl_flags & FL_SLEEP))
767 goto out;
768 error = FILE_LOCK_DEFERRED;
769 locks_insert_block(fl, request);
770 goto out;
771 }
772 if (request->fl_flags & FL_ACCESS)
773 goto out;
774 locks_copy_lock(new_fl, request);
775 locks_insert_lock(before, new_fl);
776 new_fl = NULL;
777 error = 0;
778
779out:
780 unlock_flocks();
781 if (new_fl)
782 locks_free_lock(new_fl);
783 return error;
784}
785
786static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
787{
788 struct file_lock *fl;
789 struct file_lock *new_fl = NULL;
790 struct file_lock *new_fl2 = NULL;
791 struct file_lock *left = NULL;
792 struct file_lock *right = NULL;
793 struct file_lock **before;
794 int error, added = 0;
795
796
797
798
799
800
801
802 if (!(request->fl_flags & FL_ACCESS) &&
803 (request->fl_type != F_UNLCK ||
804 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
805 new_fl = locks_alloc_lock();
806 new_fl2 = locks_alloc_lock();
807 }
808
809 lock_flocks();
810 if (request->fl_type != F_UNLCK) {
811 for_each_lock(inode, before) {
812 fl = *before;
813 if (!IS_POSIX(fl))
814 continue;
815 if (!posix_locks_conflict(request, fl))
816 continue;
817 if (conflock)
818 __locks_copy_lock(conflock, fl);
819 error = -EAGAIN;
820 if (!(request->fl_flags & FL_SLEEP))
821 goto out;
822 error = -EDEADLK;
823 if (posix_locks_deadlock(request, fl))
824 goto out;
825 error = FILE_LOCK_DEFERRED;
826 locks_insert_block(fl, request);
827 goto out;
828 }
829 }
830
831
832 error = 0;
833 if (request->fl_flags & FL_ACCESS)
834 goto out;
835
836
837
838
839
840 before = &inode->i_flock;
841
842
843 while ((fl = *before) && (!IS_POSIX(fl) ||
844 !posix_same_owner(request, fl))) {
845 before = &fl->fl_next;
846 }
847
848
849 while ((fl = *before) && posix_same_owner(request, fl)) {
850
851
852 if (request->fl_type == fl->fl_type) {
853
854
855
856
857 if (fl->fl_end < request->fl_start - 1)
858 goto next_lock;
859
860
861
862 if (fl->fl_start - 1 > request->fl_end)
863 break;
864
865
866
867
868
869
870 if (fl->fl_start > request->fl_start)
871 fl->fl_start = request->fl_start;
872 else
873 request->fl_start = fl->fl_start;
874 if (fl->fl_end < request->fl_end)
875 fl->fl_end = request->fl_end;
876 else
877 request->fl_end = fl->fl_end;
878 if (added) {
879 locks_delete_lock(before);
880 continue;
881 }
882 request = fl;
883 added = 1;
884 }
885 else {
886
887
888
889 if (fl->fl_end < request->fl_start)
890 goto next_lock;
891 if (fl->fl_start > request->fl_end)
892 break;
893 if (request->fl_type == F_UNLCK)
894 added = 1;
895 if (fl->fl_start < request->fl_start)
896 left = fl;
897
898
899
900 if (fl->fl_end > request->fl_end) {
901 right = fl;
902 break;
903 }
904 if (fl->fl_start >= request->fl_start) {
905
906
907
908 if (added) {
909 locks_delete_lock(before);
910 continue;
911 }
912
913
914
915
916
917 locks_wake_up_blocks(fl);
918 fl->fl_start = request->fl_start;
919 fl->fl_end = request->fl_end;
920 fl->fl_type = request->fl_type;
921 locks_release_private(fl);
922 locks_copy_private(fl, request);
923 request = fl;
924 added = 1;
925 }
926 }
927
928
929 next_lock:
930 before = &fl->fl_next;
931 }
932
933
934
935
936
937
938
939 error = -ENOLCK;
940 if (right && left == right && !new_fl2)
941 goto out;
942
943 error = 0;
944 if (!added) {
945 if (request->fl_type == F_UNLCK) {
946 if (request->fl_flags & FL_EXISTS)
947 error = -ENOENT;
948 goto out;
949 }
950
951 if (!new_fl) {
952 error = -ENOLCK;
953 goto out;
954 }
955 locks_copy_lock(new_fl, request);
956 locks_insert_lock(before, new_fl);
957 new_fl = NULL;
958 }
959 if (right) {
960 if (left == right) {
961
962
963
964 left = new_fl2;
965 new_fl2 = NULL;
966 locks_copy_lock(left, right);
967 locks_insert_lock(before, left);
968 }
969 right->fl_start = request->fl_end + 1;
970 locks_wake_up_blocks(right);
971 }
972 if (left) {
973 left->fl_end = request->fl_start - 1;
974 locks_wake_up_blocks(left);
975 }
976 out:
977 unlock_flocks();
978
979
980
981 if (new_fl)
982 locks_free_lock(new_fl);
983 if (new_fl2)
984 locks_free_lock(new_fl2);
985 return error;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002int posix_lock_file(struct file *filp, struct file_lock *fl,
1003 struct file_lock *conflock)
1004{
1005 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1006}
1007EXPORT_SYMBOL(posix_lock_file);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1019{
1020 int error;
1021 might_sleep ();
1022 for (;;) {
1023 error = posix_lock_file(filp, fl, NULL);
1024 if (error != FILE_LOCK_DEFERRED)
1025 break;
1026 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1027 if (!error)
1028 continue;
1029
1030 locks_delete_block(fl);
1031 break;
1032 }
1033 return error;
1034}
1035EXPORT_SYMBOL(posix_lock_file_wait);
1036
1037
1038
1039
1040
1041
1042
1043
1044int locks_mandatory_locked(struct inode *inode)
1045{
1046 fl_owner_t owner = current->files;
1047 struct file_lock *fl;
1048
1049
1050
1051
1052 lock_flocks();
1053 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1054 if (!IS_POSIX(fl))
1055 continue;
1056 if (fl->fl_owner != owner)
1057 break;
1058 }
1059 unlock_flocks();
1060 return fl ? -EAGAIN : 0;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076int locks_mandatory_area(int read_write, struct inode *inode,
1077 struct file *filp, loff_t offset,
1078 size_t count)
1079{
1080 struct file_lock fl;
1081 int error;
1082
1083 locks_init_lock(&fl);
1084 fl.fl_owner = current->files;
1085 fl.fl_pid = current->tgid;
1086 fl.fl_file = filp;
1087 fl.fl_flags = FL_POSIX | FL_ACCESS;
1088 if (filp && !(filp->f_flags & O_NONBLOCK))
1089 fl.fl_flags |= FL_SLEEP;
1090 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1091 fl.fl_start = offset;
1092 fl.fl_end = offset + count - 1;
1093
1094 for (;;) {
1095 error = __posix_lock_file(inode, &fl, NULL);
1096 if (error != FILE_LOCK_DEFERRED)
1097 break;
1098 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1099 if (!error) {
1100
1101
1102
1103
1104 if (__mandatory_lock(inode))
1105 continue;
1106 }
1107
1108 locks_delete_block(&fl);
1109 break;
1110 }
1111
1112 return error;
1113}
1114
1115EXPORT_SYMBOL(locks_mandatory_area);
1116
1117static void lease_clear_pending(struct file_lock *fl, int arg)
1118{
1119 switch (arg) {
1120 case F_UNLCK:
1121 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1122
1123 case F_RDLCK:
1124 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1125 }
1126}
1127
1128
1129int lease_modify(struct file_lock **before, int arg)
1130{
1131 struct file_lock *fl = *before;
1132 int error = assign_type(fl, arg);
1133
1134 if (error)
1135 return error;
1136 lease_clear_pending(fl, arg);
1137 locks_wake_up_blocks(fl);
1138 if (arg == F_UNLCK) {
1139 struct file *filp = fl->fl_file;
1140
1141 f_delown(filp);
1142 filp->f_owner.signum = 0;
1143 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1144 if (fl->fl_fasync != NULL) {
1145 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1146 fl->fl_fasync = NULL;
1147 }
1148 locks_delete_lock(before);
1149 }
1150 return 0;
1151}
1152
1153EXPORT_SYMBOL(lease_modify);
1154
1155static bool past_time(unsigned long then)
1156{
1157 if (!then)
1158
1159 return false;
1160 return time_after(jiffies, then);
1161}
1162
1163static void time_out_leases(struct inode *inode)
1164{
1165 struct file_lock **before;
1166 struct file_lock *fl;
1167
1168 before = &inode->i_flock;
1169 while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1170 if (past_time(fl->fl_downgrade_time))
1171 lease_modify(before, F_RDLCK);
1172 if (past_time(fl->fl_break_time))
1173 lease_modify(before, F_UNLCK);
1174 if (fl == *before)
1175 before = &fl->fl_next;
1176 }
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189int __break_lease(struct inode *inode, unsigned int mode)
1190{
1191 int error = 0;
1192 struct file_lock *new_fl, *flock;
1193 struct file_lock *fl;
1194 unsigned long break_time;
1195 int i_have_this_lease = 0;
1196 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1197
1198 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1199 if (IS_ERR(new_fl))
1200 return PTR_ERR(new_fl);
1201
1202 lock_flocks();
1203
1204 time_out_leases(inode);
1205
1206 flock = inode->i_flock;
1207 if ((flock == NULL) || !IS_LEASE(flock))
1208 goto out;
1209
1210 if (!locks_conflict(flock, new_fl))
1211 goto out;
1212
1213 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1214 if (fl->fl_owner == current->files)
1215 i_have_this_lease = 1;
1216
1217 break_time = 0;
1218 if (lease_break_time > 0) {
1219 break_time = jiffies + lease_break_time * HZ;
1220 if (break_time == 0)
1221 break_time++;
1222 }
1223
1224 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1225 if (want_write) {
1226 if (fl->fl_flags & FL_UNLOCK_PENDING)
1227 continue;
1228 fl->fl_flags |= FL_UNLOCK_PENDING;
1229 fl->fl_break_time = break_time;
1230 } else {
1231 if (lease_breaking(flock))
1232 continue;
1233 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1234 fl->fl_downgrade_time = break_time;
1235 }
1236 fl->fl_lmops->lm_break(fl);
1237 }
1238
1239 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1240 error = -EWOULDBLOCK;
1241 goto out;
1242 }
1243
1244restart:
1245 break_time = flock->fl_break_time;
1246 if (break_time != 0) {
1247 break_time -= jiffies;
1248 if (break_time == 0)
1249 break_time++;
1250 }
1251 locks_insert_block(flock, new_fl);
1252 unlock_flocks();
1253 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1254 !new_fl->fl_next, break_time);
1255 lock_flocks();
1256 __locks_delete_block(new_fl);
1257 if (error >= 0) {
1258 if (error == 0)
1259 time_out_leases(inode);
1260
1261
1262
1263
1264 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1265 flock = flock->fl_next) {
1266 if (locks_conflict(new_fl, flock))
1267 goto restart;
1268 }
1269 error = 0;
1270 }
1271
1272out:
1273 unlock_flocks();
1274 locks_free_lock(new_fl);
1275 return error;
1276}
1277
1278EXPORT_SYMBOL(__break_lease);
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289void lease_get_mtime(struct inode *inode, struct timespec *time)
1290{
1291 struct file_lock *flock = inode->i_flock;
1292 if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
1293 *time = current_fs_time(inode->i_sb);
1294 else
1295 *time = inode->i_mtime;
1296}
1297
1298EXPORT_SYMBOL(lease_get_mtime);
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323int fcntl_getlease(struct file *filp)
1324{
1325 struct file_lock *fl;
1326 int type = F_UNLCK;
1327
1328 lock_flocks();
1329 time_out_leases(filp->f_path.dentry->d_inode);
1330 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1331 fl = fl->fl_next) {
1332 if (fl->fl_file == filp) {
1333 type = target_leasetype(fl);
1334 break;
1335 }
1336 }
1337 unlock_flocks();
1338 return type;
1339}
1340
1341int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
1342{
1343 struct file_lock *fl, **before, **my_before = NULL, *lease;
1344 struct dentry *dentry = filp->f_path.dentry;
1345 struct inode *inode = dentry->d_inode;
1346 int error;
1347
1348 lease = *flp;
1349
1350 error = -EAGAIN;
1351 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1352 goto out;
1353 if ((arg == F_WRLCK)
1354 && ((dentry->d_count > 1)
1355 || (atomic_read(&inode->i_count) > 1)))
1356 goto out;
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366 error = -EAGAIN;
1367 for (before = &inode->i_flock;
1368 ((fl = *before) != NULL) && IS_LEASE(fl);
1369 before = &fl->fl_next) {
1370 if (fl->fl_file == filp) {
1371 my_before = before;
1372 continue;
1373 }
1374
1375
1376
1377
1378 if (arg == F_WRLCK)
1379 goto out;
1380
1381
1382
1383
1384 if (fl->fl_flags & FL_UNLOCK_PENDING)
1385 goto out;
1386 }
1387
1388 if (my_before != NULL) {
1389 error = lease->fl_lmops->lm_change(my_before, arg);
1390 if (!error)
1391 *flp = *my_before;
1392 goto out;
1393 }
1394
1395 error = -EINVAL;
1396 if (!leases_enable)
1397 goto out;
1398
1399 locks_insert_lock(before, lease);
1400 return 0;
1401
1402out:
1403 return error;
1404}
1405
1406int generic_delete_lease(struct file *filp, struct file_lock **flp)
1407{
1408 struct file_lock *fl, **before;
1409 struct dentry *dentry = filp->f_path.dentry;
1410 struct inode *inode = dentry->d_inode;
1411
1412 for (before = &inode->i_flock;
1413 ((fl = *before) != NULL) && IS_LEASE(fl);
1414 before = &fl->fl_next) {
1415 if (fl->fl_file != filp)
1416 continue;
1417 return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
1418 }
1419 return -EAGAIN;
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1434{
1435 struct dentry *dentry = filp->f_path.dentry;
1436 struct inode *inode = dentry->d_inode;
1437 int error;
1438
1439 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1440 return -EACCES;
1441 if (!S_ISREG(inode->i_mode))
1442 return -EINVAL;
1443 error = security_file_lock(filp, arg);
1444 if (error)
1445 return error;
1446
1447 time_out_leases(inode);
1448
1449 BUG_ON(!(*flp)->fl_lmops->lm_break);
1450
1451 switch (arg) {
1452 case F_UNLCK:
1453 return generic_delete_lease(filp, flp);
1454 case F_RDLCK:
1455 case F_WRLCK:
1456 return generic_add_lease(filp, arg, flp);
1457 default:
1458 return -EINVAL;
1459 }
1460}
1461EXPORT_SYMBOL(generic_setlease);
1462
1463static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1464{
1465 if (filp->f_op && filp->f_op->setlease)
1466 return filp->f_op->setlease(filp, arg, lease);
1467 else
1468 return generic_setlease(filp, arg, lease);
1469}
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1499{
1500 int error;
1501
1502 lock_flocks();
1503 error = __vfs_setlease(filp, arg, lease);
1504 unlock_flocks();
1505
1506 return error;
1507}
1508EXPORT_SYMBOL_GPL(vfs_setlease);
1509
1510static int do_fcntl_delete_lease(struct file *filp)
1511{
1512 struct file_lock fl, *flp = &fl;
1513
1514 lease_init(filp, F_UNLCK, flp);
1515
1516 return vfs_setlease(filp, F_UNLCK, &flp);
1517}
1518
1519static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1520{
1521 struct file_lock *fl, *ret;
1522 struct fasync_struct *new;
1523 int error;
1524
1525 fl = lease_alloc(filp, arg);
1526 if (IS_ERR(fl))
1527 return PTR_ERR(fl);
1528
1529 new = fasync_alloc();
1530 if (!new) {
1531 locks_free_lock(fl);
1532 return -ENOMEM;
1533 }
1534 ret = fl;
1535 lock_flocks();
1536 error = __vfs_setlease(filp, arg, &ret);
1537 if (error) {
1538 unlock_flocks();
1539 locks_free_lock(fl);
1540 goto out_free_fasync;
1541 }
1542 if (ret != fl)
1543 locks_free_lock(fl);
1544
1545
1546
1547
1548
1549
1550
1551 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1552 new = NULL;
1553
1554 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1555 unlock_flocks();
1556
1557out_free_fasync:
1558 if (new)
1559 fasync_free(new);
1560 return error;
1561}
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1574{
1575 if (arg == F_UNLCK)
1576 return do_fcntl_delete_lease(filp);
1577 return do_fcntl_add_lease(fd, filp, arg);
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1588{
1589 int error;
1590 might_sleep();
1591 for (;;) {
1592 error = flock_lock_file(filp, fl);
1593 if (error != FILE_LOCK_DEFERRED)
1594 break;
1595 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1596 if (!error)
1597 continue;
1598
1599 locks_delete_block(fl);
1600 break;
1601 }
1602 return error;
1603}
1604
1605EXPORT_SYMBOL(flock_lock_file_wait);
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1627{
1628 struct fd f = fdget(fd);
1629 struct file_lock *lock;
1630 int can_sleep, unlock;
1631 int error;
1632
1633 error = -EBADF;
1634 if (!f.file)
1635 goto out;
1636
1637 can_sleep = !(cmd & LOCK_NB);
1638 cmd &= ~LOCK_NB;
1639 unlock = (cmd == LOCK_UN);
1640
1641 if (!unlock && !(cmd & LOCK_MAND) &&
1642 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
1643 goto out_putf;
1644
1645 error = flock_make_lock(f.file, &lock, cmd);
1646 if (error)
1647 goto out_putf;
1648 if (can_sleep)
1649 lock->fl_flags |= FL_SLEEP;
1650
1651 error = security_file_lock(f.file, lock->fl_type);
1652 if (error)
1653 goto out_free;
1654
1655 if (f.file->f_op && f.file->f_op->flock)
1656 error = f.file->f_op->flock(f.file,
1657 (can_sleep) ? F_SETLKW : F_SETLK,
1658 lock);
1659 else
1660 error = flock_lock_file_wait(f.file, lock);
1661
1662 out_free:
1663 locks_free_lock(lock);
1664
1665 out_putf:
1666 fdput(f);
1667 out:
1668 return error;
1669}
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679int vfs_test_lock(struct file *filp, struct file_lock *fl)
1680{
1681 if (filp->f_op && filp->f_op->lock)
1682 return filp->f_op->lock(filp, F_GETLK, fl);
1683 posix_test_lock(filp, fl);
1684 return 0;
1685}
1686EXPORT_SYMBOL_GPL(vfs_test_lock);
1687
1688static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1689{
1690 flock->l_pid = fl->fl_pid;
1691#if BITS_PER_LONG == 32
1692
1693
1694
1695
1696 if (fl->fl_start > OFFT_OFFSET_MAX)
1697 return -EOVERFLOW;
1698 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1699 return -EOVERFLOW;
1700#endif
1701 flock->l_start = fl->fl_start;
1702 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1703 fl->fl_end - fl->fl_start + 1;
1704 flock->l_whence = 0;
1705 flock->l_type = fl->fl_type;
1706 return 0;
1707}
1708
1709#if BITS_PER_LONG == 32
1710static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1711{
1712 flock->l_pid = fl->fl_pid;
1713 flock->l_start = fl->fl_start;
1714 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1715 fl->fl_end - fl->fl_start + 1;
1716 flock->l_whence = 0;
1717 flock->l_type = fl->fl_type;
1718}
1719#endif
1720
1721
1722
1723
1724int fcntl_getlk(struct file *filp, struct flock __user *l)
1725{
1726 struct file_lock file_lock;
1727 struct flock flock;
1728 int error;
1729
1730 error = -EFAULT;
1731 if (copy_from_user(&flock, l, sizeof(flock)))
1732 goto out;
1733 error = -EINVAL;
1734 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1735 goto out;
1736
1737 error = flock_to_posix_lock(filp, &file_lock, &flock);
1738 if (error)
1739 goto out;
1740
1741 error = vfs_test_lock(filp, &file_lock);
1742 if (error)
1743 goto out;
1744
1745 flock.l_type = file_lock.fl_type;
1746 if (file_lock.fl_type != F_UNLCK) {
1747 error = posix_lock_to_flock(&flock, &file_lock);
1748 if (error)
1749 goto out;
1750 }
1751 error = -EFAULT;
1752 if (!copy_to_user(l, &flock, sizeof(flock)))
1753 error = 0;
1754out:
1755 return error;
1756}
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1792{
1793 if (filp->f_op && filp->f_op->lock)
1794 return filp->f_op->lock(filp, cmd, fl);
1795 else
1796 return posix_lock_file(filp, fl, conf);
1797}
1798EXPORT_SYMBOL_GPL(vfs_lock_file);
1799
1800static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1801 struct file_lock *fl)
1802{
1803 int error;
1804
1805 error = security_file_lock(filp, fl->fl_type);
1806 if (error)
1807 return error;
1808
1809 for (;;) {
1810 error = vfs_lock_file(filp, cmd, fl, NULL);
1811 if (error != FILE_LOCK_DEFERRED)
1812 break;
1813 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1814 if (!error)
1815 continue;
1816
1817 locks_delete_block(fl);
1818 break;
1819 }
1820
1821 return error;
1822}
1823
1824
1825
1826
1827int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1828 struct flock __user *l)
1829{
1830 struct file_lock *file_lock = locks_alloc_lock();
1831 struct flock flock;
1832 struct inode *inode;
1833 struct file *f;
1834 int error;
1835
1836 if (file_lock == NULL)
1837 return -ENOLCK;
1838
1839
1840
1841
1842 error = -EFAULT;
1843 if (copy_from_user(&flock, l, sizeof(flock)))
1844 goto out;
1845
1846 inode = filp->f_path.dentry->d_inode;
1847
1848
1849
1850
1851 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1852 error = -EAGAIN;
1853 goto out;
1854 }
1855
1856again:
1857 error = flock_to_posix_lock(filp, file_lock, &flock);
1858 if (error)
1859 goto out;
1860 if (cmd == F_SETLKW) {
1861 file_lock->fl_flags |= FL_SLEEP;
1862 }
1863
1864 error = -EBADF;
1865 switch (flock.l_type) {
1866 case F_RDLCK:
1867 if (!(filp->f_mode & FMODE_READ))
1868 goto out;
1869 break;
1870 case F_WRLCK:
1871 if (!(filp->f_mode & FMODE_WRITE))
1872 goto out;
1873 break;
1874 case F_UNLCK:
1875 break;
1876 default:
1877 error = -EINVAL;
1878 goto out;
1879 }
1880
1881 error = do_lock_file_wait(filp, cmd, file_lock);
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892 spin_lock(¤t->files->file_lock);
1893 f = fcheck(fd);
1894 spin_unlock(¤t->files->file_lock);
1895 if (!error && f != filp && flock.l_type != F_UNLCK) {
1896 flock.l_type = F_UNLCK;
1897 goto again;
1898 }
1899
1900out:
1901 locks_free_lock(file_lock);
1902 return error;
1903}
1904
1905#if BITS_PER_LONG == 32
1906
1907
1908
1909int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1910{
1911 struct file_lock file_lock;
1912 struct flock64 flock;
1913 int error;
1914
1915 error = -EFAULT;
1916 if (copy_from_user(&flock, l, sizeof(flock)))
1917 goto out;
1918 error = -EINVAL;
1919 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1920 goto out;
1921
1922 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1923 if (error)
1924 goto out;
1925
1926 error = vfs_test_lock(filp, &file_lock);
1927 if (error)
1928 goto out;
1929
1930 flock.l_type = file_lock.fl_type;
1931 if (file_lock.fl_type != F_UNLCK)
1932 posix_lock_to_flock64(&flock, &file_lock);
1933
1934 error = -EFAULT;
1935 if (!copy_to_user(l, &flock, sizeof(flock)))
1936 error = 0;
1937
1938out:
1939 return error;
1940}
1941
1942
1943
1944
1945int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1946 struct flock64 __user *l)
1947{
1948 struct file_lock *file_lock = locks_alloc_lock();
1949 struct flock64 flock;
1950 struct inode *inode;
1951 struct file *f;
1952 int error;
1953
1954 if (file_lock == NULL)
1955 return -ENOLCK;
1956
1957
1958
1959
1960 error = -EFAULT;
1961 if (copy_from_user(&flock, l, sizeof(flock)))
1962 goto out;
1963
1964 inode = filp->f_path.dentry->d_inode;
1965
1966
1967
1968
1969 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1970 error = -EAGAIN;
1971 goto out;
1972 }
1973
1974again:
1975 error = flock64_to_posix_lock(filp, file_lock, &flock);
1976 if (error)
1977 goto out;
1978 if (cmd == F_SETLKW64) {
1979 file_lock->fl_flags |= FL_SLEEP;
1980 }
1981
1982 error = -EBADF;
1983 switch (flock.l_type) {
1984 case F_RDLCK:
1985 if (!(filp->f_mode & FMODE_READ))
1986 goto out;
1987 break;
1988 case F_WRLCK:
1989 if (!(filp->f_mode & FMODE_WRITE))
1990 goto out;
1991 break;
1992 case F_UNLCK:
1993 break;
1994 default:
1995 error = -EINVAL;
1996 goto out;
1997 }
1998
1999 error = do_lock_file_wait(filp, cmd, file_lock);
2000
2001
2002
2003
2004
2005 spin_lock(¤t->files->file_lock);
2006 f = fcheck(fd);
2007 spin_unlock(¤t->files->file_lock);
2008 if (!error && f != filp && flock.l_type != F_UNLCK) {
2009 flock.l_type = F_UNLCK;
2010 goto again;
2011 }
2012
2013out:
2014 locks_free_lock(file_lock);
2015 return error;
2016}
2017#endif
2018
2019
2020
2021
2022
2023
2024void locks_remove_posix(struct file *filp, fl_owner_t owner)
2025{
2026 struct file_lock lock;
2027
2028
2029
2030
2031
2032
2033 if (!filp->f_path.dentry->d_inode->i_flock)
2034 return;
2035
2036 lock.fl_type = F_UNLCK;
2037 lock.fl_flags = FL_POSIX | FL_CLOSE;
2038 lock.fl_start = 0;
2039 lock.fl_end = OFFSET_MAX;
2040 lock.fl_owner = owner;
2041 lock.fl_pid = current->tgid;
2042 lock.fl_file = filp;
2043 lock.fl_ops = NULL;
2044 lock.fl_lmops = NULL;
2045
2046 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2047
2048 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2049 lock.fl_ops->fl_release_private(&lock);
2050}
2051
2052EXPORT_SYMBOL(locks_remove_posix);
2053
2054
2055
2056
2057void locks_remove_flock(struct file *filp)
2058{
2059 struct inode * inode = filp->f_path.dentry->d_inode;
2060 struct file_lock *fl;
2061 struct file_lock **before;
2062
2063 if (!inode->i_flock)
2064 return;
2065
2066 if (filp->f_op && filp->f_op->flock) {
2067 struct file_lock fl = {
2068 .fl_pid = current->tgid,
2069 .fl_file = filp,
2070 .fl_flags = FL_FLOCK,
2071 .fl_type = F_UNLCK,
2072 .fl_end = OFFSET_MAX,
2073 };
2074 filp->f_op->flock(filp, F_SETLKW, &fl);
2075 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2076 fl.fl_ops->fl_release_private(&fl);
2077 }
2078
2079 lock_flocks();
2080 before = &inode->i_flock;
2081
2082 while ((fl = *before) != NULL) {
2083 if (fl->fl_file == filp) {
2084 if (IS_FLOCK(fl)) {
2085 locks_delete_lock(before);
2086 continue;
2087 }
2088 if (IS_LEASE(fl)) {
2089 lease_modify(before, F_UNLCK);
2090 continue;
2091 }
2092
2093 BUG();
2094 }
2095 before = &fl->fl_next;
2096 }
2097 unlock_flocks();
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107int
2108posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2109{
2110 int status = 0;
2111
2112 lock_flocks();
2113 if (waiter->fl_next)
2114 __locks_delete_block(waiter);
2115 else
2116 status = -ENOENT;
2117 unlock_flocks();
2118 return status;
2119}
2120
2121EXPORT_SYMBOL(posix_unblock_lock);
2122
2123
2124
2125
2126
2127
2128
2129
2130int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2131{
2132 if (filp->f_op && filp->f_op->lock)
2133 return filp->f_op->lock(filp, F_CANCELLK, fl);
2134 return 0;
2135}
2136
2137EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2138
2139#ifdef CONFIG_PROC_FS
2140#include <linux/proc_fs.h>
2141#include <linux/seq_file.h>
2142
2143static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2144 loff_t id, char *pfx)
2145{
2146 struct inode *inode = NULL;
2147 unsigned int fl_pid;
2148
2149 if (fl->fl_nspid)
2150 fl_pid = pid_vnr(fl->fl_nspid);
2151 else
2152 fl_pid = fl->fl_pid;
2153
2154 if (fl->fl_file != NULL)
2155 inode = fl->fl_file->f_path.dentry->d_inode;
2156
2157 seq_printf(f, "%lld:%s ", id, pfx);
2158 if (IS_POSIX(fl)) {
2159 seq_printf(f, "%6s %s ",
2160 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2161 (inode == NULL) ? "*NOINODE*" :
2162 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2163 } else if (IS_FLOCK(fl)) {
2164 if (fl->fl_type & LOCK_MAND) {
2165 seq_printf(f, "FLOCK MSNFS ");
2166 } else {
2167 seq_printf(f, "FLOCK ADVISORY ");
2168 }
2169 } else if (IS_LEASE(fl)) {
2170 seq_printf(f, "LEASE ");
2171 if (lease_breaking(fl))
2172 seq_printf(f, "BREAKING ");
2173 else if (fl->fl_file)
2174 seq_printf(f, "ACTIVE ");
2175 else
2176 seq_printf(f, "BREAKER ");
2177 } else {
2178 seq_printf(f, "UNKNOWN UNKNOWN ");
2179 }
2180 if (fl->fl_type & LOCK_MAND) {
2181 seq_printf(f, "%s ",
2182 (fl->fl_type & LOCK_READ)
2183 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2184 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2185 } else {
2186 seq_printf(f, "%s ",
2187 (lease_breaking(fl))
2188 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2189 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2190 }
2191 if (inode) {
2192#ifdef WE_CAN_BREAK_LSLK_NOW
2193 seq_printf(f, "%d %s:%ld ", fl_pid,
2194 inode->i_sb->s_id, inode->i_ino);
2195#else
2196
2197 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2198 MAJOR(inode->i_sb->s_dev),
2199 MINOR(inode->i_sb->s_dev), inode->i_ino);
2200#endif
2201 } else {
2202 seq_printf(f, "%d <none>:0 ", fl_pid);
2203 }
2204 if (IS_POSIX(fl)) {
2205 if (fl->fl_end == OFFSET_MAX)
2206 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2207 else
2208 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2209 } else {
2210 seq_printf(f, "0 EOF\n");
2211 }
2212}
2213
2214static int locks_show(struct seq_file *f, void *v)
2215{
2216 struct file_lock *fl, *bfl;
2217
2218 fl = list_entry(v, struct file_lock, fl_link);
2219
2220 lock_get_status(f, fl, *((loff_t *)f->private), "");
2221
2222 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2223 lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2224
2225 return 0;
2226}
2227
2228static void *locks_start(struct seq_file *f, loff_t *pos)
2229{
2230 loff_t *p = f->private;
2231
2232 lock_flocks();
2233 *p = (*pos + 1);
2234 return seq_list_start(&file_lock_list, *pos);
2235}
2236
2237static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2238{
2239 loff_t *p = f->private;
2240 ++*p;
2241 return seq_list_next(v, &file_lock_list, pos);
2242}
2243
2244static void locks_stop(struct seq_file *f, void *v)
2245{
2246 unlock_flocks();
2247}
2248
2249static const struct seq_operations locks_seq_operations = {
2250 .start = locks_start,
2251 .next = locks_next,
2252 .stop = locks_stop,
2253 .show = locks_show,
2254};
2255
2256static int locks_open(struct inode *inode, struct file *filp)
2257{
2258 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2259}
2260
2261static const struct file_operations proc_locks_operations = {
2262 .open = locks_open,
2263 .read = seq_read,
2264 .llseek = seq_lseek,
2265 .release = seq_release_private,
2266};
2267
2268static int __init proc_locks_init(void)
2269{
2270 proc_create("locks", 0, NULL, &proc_locks_operations);
2271 return 0;
2272}
2273module_init(proc_locks_init);
2274#endif
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2290{
2291 struct file_lock *fl;
2292 int result = 1;
2293 lock_flocks();
2294 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2295 if (IS_POSIX(fl)) {
2296 if (fl->fl_type == F_RDLCK)
2297 continue;
2298 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2299 continue;
2300 } else if (IS_FLOCK(fl)) {
2301 if (!(fl->fl_type & LOCK_MAND))
2302 continue;
2303 if (fl->fl_type & LOCK_READ)
2304 continue;
2305 } else
2306 continue;
2307 result = 0;
2308 break;
2309 }
2310 unlock_flocks();
2311 return result;
2312}
2313
2314EXPORT_SYMBOL(lock_may_read);
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2330{
2331 struct file_lock *fl;
2332 int result = 1;
2333 lock_flocks();
2334 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2335 if (IS_POSIX(fl)) {
2336 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2337 continue;
2338 } else if (IS_FLOCK(fl)) {
2339 if (!(fl->fl_type & LOCK_MAND))
2340 continue;
2341 if (fl->fl_type & LOCK_WRITE)
2342 continue;
2343 } else
2344 continue;
2345 result = 0;
2346 break;
2347 }
2348 unlock_flocks();
2349 return result;
2350}
2351
2352EXPORT_SYMBOL(lock_may_write);
2353
2354static int __init filelock_init(void)
2355{
2356 filelock_cache = kmem_cache_create("file_lock_cache",
2357 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2358
2359 return 0;
2360}
2361
2362core_initcall(filelock_init);
2363