1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/module.h>
123#include <linux/security.h>
124#include <linux/slab.h>
125#include <linux/syscalls.h>
126#include <linux/time.h>
127#include <linux/rcupdate.h>
128#include <linux/pid_namespace.h>
129#include <linux/hashtable.h>
130#include <linux/percpu.h>
131#include <linux/lglock.h>
132
133#include <asm/uaccess.h>
134
135#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
136#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
137#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
138
139static bool lease_breaking(struct file_lock *fl)
140{
141 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
142}
143
144static int target_leasetype(struct file_lock *fl)
145{
146 if (fl->fl_flags & FL_UNLOCK_PENDING)
147 return F_UNLCK;
148 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
149 return F_RDLCK;
150 return fl->fl_type;
151}
152
153int leases_enable = 1;
154int lease_break_time = 45;
155
156#define for_each_lock(inode, lockp) \
157 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
158
159
160
161
162
163
164
165DEFINE_STATIC_LGLOCK(file_lock_lglock);
166static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
167
168
169
170
171
172
173
174
175
176
177
178
179#define BLOCKED_HASH_BITS 7
180static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199static DEFINE_SPINLOCK(blocked_lock_lock);
200
201static struct kmem_cache *filelock_cache __read_mostly;
202
203static void locks_init_lock_heads(struct file_lock *fl)
204{
205 INIT_HLIST_NODE(&fl->fl_link);
206 INIT_LIST_HEAD(&fl->fl_block);
207 init_waitqueue_head(&fl->fl_wait);
208}
209
210
211struct file_lock *locks_alloc_lock(void)
212{
213 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
214
215 if (fl)
216 locks_init_lock_heads(fl);
217
218 return fl;
219}
220EXPORT_SYMBOL_GPL(locks_alloc_lock);
221
222void locks_release_private(struct file_lock *fl)
223{
224 if (fl->fl_ops) {
225 if (fl->fl_ops->fl_release_private)
226 fl->fl_ops->fl_release_private(fl);
227 fl->fl_ops = NULL;
228 }
229 fl->fl_lmops = NULL;
230
231}
232EXPORT_SYMBOL_GPL(locks_release_private);
233
234
235void locks_free_lock(struct file_lock *fl)
236{
237 BUG_ON(waitqueue_active(&fl->fl_wait));
238 BUG_ON(!list_empty(&fl->fl_block));
239 BUG_ON(!hlist_unhashed(&fl->fl_link));
240
241 locks_release_private(fl);
242 kmem_cache_free(filelock_cache, fl);
243}
244EXPORT_SYMBOL(locks_free_lock);
245
246void locks_init_lock(struct file_lock *fl)
247{
248 memset(fl, 0, sizeof(struct file_lock));
249 locks_init_lock_heads(fl);
250}
251
252EXPORT_SYMBOL(locks_init_lock);
253
254static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
255{
256 if (fl->fl_ops) {
257 if (fl->fl_ops->fl_copy_lock)
258 fl->fl_ops->fl_copy_lock(new, fl);
259 new->fl_ops = fl->fl_ops;
260 }
261 if (fl->fl_lmops)
262 new->fl_lmops = fl->fl_lmops;
263}
264
265
266
267
268void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
269{
270 new->fl_owner = fl->fl_owner;
271 new->fl_pid = fl->fl_pid;
272 new->fl_file = NULL;
273 new->fl_flags = fl->fl_flags;
274 new->fl_type = fl->fl_type;
275 new->fl_start = fl->fl_start;
276 new->fl_end = fl->fl_end;
277 new->fl_ops = NULL;
278 new->fl_lmops = NULL;
279}
280EXPORT_SYMBOL(__locks_copy_lock);
281
282void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
283{
284 locks_release_private(new);
285
286 __locks_copy_lock(new, fl);
287 new->fl_file = fl->fl_file;
288 new->fl_ops = fl->fl_ops;
289 new->fl_lmops = fl->fl_lmops;
290
291 locks_copy_private(new, fl);
292}
293
294EXPORT_SYMBOL(locks_copy_lock);
295
296static inline int flock_translate_cmd(int cmd) {
297 if (cmd & LOCK_MAND)
298 return cmd & (LOCK_MAND | LOCK_RW);
299 switch (cmd) {
300 case LOCK_SH:
301 return F_RDLCK;
302 case LOCK_EX:
303 return F_WRLCK;
304 case LOCK_UN:
305 return F_UNLCK;
306 }
307 return -EINVAL;
308}
309
310
311static int flock_make_lock(struct file *filp, struct file_lock **lock,
312 unsigned int cmd)
313{
314 struct file_lock *fl;
315 int type = flock_translate_cmd(cmd);
316 if (type < 0)
317 return type;
318
319 fl = locks_alloc_lock();
320 if (fl == NULL)
321 return -ENOMEM;
322
323 fl->fl_file = filp;
324 fl->fl_pid = current->tgid;
325 fl->fl_flags = FL_FLOCK;
326 fl->fl_type = type;
327 fl->fl_end = OFFSET_MAX;
328
329 *lock = fl;
330 return 0;
331}
332
333static int assign_type(struct file_lock *fl, long type)
334{
335 switch (type) {
336 case F_RDLCK:
337 case F_WRLCK:
338 case F_UNLCK:
339 fl->fl_type = type;
340 break;
341 default:
342 return -EINVAL;
343 }
344 return 0;
345}
346
347
348
349
350static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
351 struct flock *l)
352{
353 off_t start, end;
354
355 switch (l->l_whence) {
356 case SEEK_SET:
357 start = 0;
358 break;
359 case SEEK_CUR:
360 start = filp->f_pos;
361 break;
362 case SEEK_END:
363 start = i_size_read(file_inode(filp));
364 break;
365 default:
366 return -EINVAL;
367 }
368
369
370
371 start += l->l_start;
372 if (start < 0)
373 return -EINVAL;
374 fl->fl_end = OFFSET_MAX;
375 if (l->l_len > 0) {
376 end = start + l->l_len - 1;
377 fl->fl_end = end;
378 } else if (l->l_len < 0) {
379 end = start - 1;
380 fl->fl_end = end;
381 start += l->l_len;
382 if (start < 0)
383 return -EINVAL;
384 }
385 fl->fl_start = start;
386 if (fl->fl_end < fl->fl_start)
387 return -EOVERFLOW;
388
389 fl->fl_owner = current->files;
390 fl->fl_pid = current->tgid;
391 fl->fl_file = filp;
392 fl->fl_flags = FL_POSIX;
393 fl->fl_ops = NULL;
394 fl->fl_lmops = NULL;
395
396 return assign_type(fl, l->l_type);
397}
398
399#if BITS_PER_LONG == 32
400static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
401 struct flock64 *l)
402{
403 loff_t start;
404
405 switch (l->l_whence) {
406 case SEEK_SET:
407 start = 0;
408 break;
409 case SEEK_CUR:
410 start = filp->f_pos;
411 break;
412 case SEEK_END:
413 start = i_size_read(file_inode(filp));
414 break;
415 default:
416 return -EINVAL;
417 }
418
419 start += l->l_start;
420 if (start < 0)
421 return -EINVAL;
422 fl->fl_end = OFFSET_MAX;
423 if (l->l_len > 0) {
424 fl->fl_end = start + l->l_len - 1;
425 } else if (l->l_len < 0) {
426 fl->fl_end = start - 1;
427 start += l->l_len;
428 if (start < 0)
429 return -EINVAL;
430 }
431 fl->fl_start = start;
432 if (fl->fl_end < fl->fl_start)
433 return -EOVERFLOW;
434
435 fl->fl_owner = current->files;
436 fl->fl_pid = current->tgid;
437 fl->fl_file = filp;
438 fl->fl_flags = FL_POSIX;
439 fl->fl_ops = NULL;
440 fl->fl_lmops = NULL;
441
442 return assign_type(fl, l->l_type);
443}
444#endif
445
446
447static void lease_break_callback(struct file_lock *fl)
448{
449 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
450}
451
452static const struct lock_manager_operations lease_manager_ops = {
453 .lm_break = lease_break_callback,
454 .lm_change = lease_modify,
455};
456
457
458
459
460static int lease_init(struct file *filp, long type, struct file_lock *fl)
461 {
462 if (assign_type(fl, type) != 0)
463 return -EINVAL;
464
465 fl->fl_owner = current->files;
466 fl->fl_pid = current->tgid;
467
468 fl->fl_file = filp;
469 fl->fl_flags = FL_LEASE;
470 fl->fl_start = 0;
471 fl->fl_end = OFFSET_MAX;
472 fl->fl_ops = NULL;
473 fl->fl_lmops = &lease_manager_ops;
474 return 0;
475}
476
477
478static struct file_lock *lease_alloc(struct file *filp, long type)
479{
480 struct file_lock *fl = locks_alloc_lock();
481 int error = -ENOMEM;
482
483 if (fl == NULL)
484 return ERR_PTR(error);
485
486 error = lease_init(filp, type, fl);
487 if (error) {
488 locks_free_lock(fl);
489 return ERR_PTR(error);
490 }
491 return fl;
492}
493
494
495
496static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
497{
498 return ((fl1->fl_end >= fl2->fl_start) &&
499 (fl2->fl_end >= fl1->fl_start));
500}
501
502
503
504
505static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
506{
507 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
508 return fl2->fl_lmops == fl1->fl_lmops &&
509 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
510 return fl1->fl_owner == fl2->fl_owner;
511}
512
513
514static inline void
515locks_insert_global_locks(struct file_lock *fl)
516{
517 lg_local_lock(&file_lock_lglock);
518 fl->fl_link_cpu = smp_processor_id();
519 hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list));
520 lg_local_unlock(&file_lock_lglock);
521}
522
523
524static inline void
525locks_delete_global_locks(struct file_lock *fl)
526{
527
528
529
530
531
532 if (hlist_unhashed(&fl->fl_link))
533 return;
534 lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu);
535 hlist_del_init(&fl->fl_link);
536 lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu);
537}
538
539static unsigned long
540posix_owner_key(struct file_lock *fl)
541{
542 if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
543 return fl->fl_lmops->lm_owner_key(fl);
544 return (unsigned long)fl->fl_owner;
545}
546
547static inline void
548locks_insert_global_blocked(struct file_lock *waiter)
549{
550 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
551}
552
553static inline void
554locks_delete_global_blocked(struct file_lock *waiter)
555{
556 hash_del(&waiter->fl_link);
557}
558
559
560
561
562
563
564static void __locks_delete_block(struct file_lock *waiter)
565{
566 locks_delete_global_blocked(waiter);
567 list_del_init(&waiter->fl_block);
568 waiter->fl_next = NULL;
569}
570
571static void locks_delete_block(struct file_lock *waiter)
572{
573 spin_lock(&blocked_lock_lock);
574 __locks_delete_block(waiter);
575 spin_unlock(&blocked_lock_lock);
576}
577
578
579
580
581
582
583
584
585
586
587
588static void __locks_insert_block(struct file_lock *blocker,
589 struct file_lock *waiter)
590{
591 BUG_ON(!list_empty(&waiter->fl_block));
592 waiter->fl_next = blocker;
593 list_add_tail(&waiter->fl_block, &blocker->fl_block);
594 if (IS_POSIX(blocker))
595 locks_insert_global_blocked(waiter);
596}
597
598
599static void locks_insert_block(struct file_lock *blocker,
600 struct file_lock *waiter)
601{
602 spin_lock(&blocked_lock_lock);
603 __locks_insert_block(blocker, waiter);
604 spin_unlock(&blocked_lock_lock);
605}
606
607
608
609
610
611
612static void locks_wake_up_blocks(struct file_lock *blocker)
613{
614
615
616
617
618
619
620
621 if (list_empty(&blocker->fl_block))
622 return;
623
624 spin_lock(&blocked_lock_lock);
625 while (!list_empty(&blocker->fl_block)) {
626 struct file_lock *waiter;
627
628 waiter = list_first_entry(&blocker->fl_block,
629 struct file_lock, fl_block);
630 __locks_delete_block(waiter);
631 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
632 waiter->fl_lmops->lm_notify(waiter);
633 else
634 wake_up(&waiter->fl_wait);
635 }
636 spin_unlock(&blocked_lock_lock);
637}
638
639
640
641
642
643
644static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
645{
646 fl->fl_nspid = get_pid(task_tgid(current));
647
648
649 fl->fl_next = *pos;
650 *pos = fl;
651
652 locks_insert_global_locks(fl);
653}
654
655
656
657
658
659
660
661
662
663static void locks_delete_lock(struct file_lock **thisfl_p)
664{
665 struct file_lock *fl = *thisfl_p;
666
667 locks_delete_global_locks(fl);
668
669 *thisfl_p = fl->fl_next;
670 fl->fl_next = NULL;
671
672 if (fl->fl_nspid) {
673 put_pid(fl->fl_nspid);
674 fl->fl_nspid = NULL;
675 }
676
677 locks_wake_up_blocks(fl);
678 locks_free_lock(fl);
679}
680
681
682
683
684static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
685{
686 if (sys_fl->fl_type == F_WRLCK)
687 return 1;
688 if (caller_fl->fl_type == F_WRLCK)
689 return 1;
690 return 0;
691}
692
693
694
695
696static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
697{
698
699
700
701 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
702 return (0);
703
704
705 if (!locks_overlap(caller_fl, sys_fl))
706 return 0;
707
708 return (locks_conflict(caller_fl, sys_fl));
709}
710
711
712
713
714static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
715{
716
717
718
719 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
720 return (0);
721 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
722 return 0;
723
724 return (locks_conflict(caller_fl, sys_fl));
725}
726
727void
728posix_test_lock(struct file *filp, struct file_lock *fl)
729{
730 struct file_lock *cfl;
731 struct inode *inode = file_inode(filp);
732
733 spin_lock(&inode->i_lock);
734 for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) {
735 if (!IS_POSIX(cfl))
736 continue;
737 if (posix_locks_conflict(fl, cfl))
738 break;
739 }
740 if (cfl) {
741 __locks_copy_lock(fl, cfl);
742 if (cfl->fl_nspid)
743 fl->fl_pid = pid_vnr(cfl->fl_nspid);
744 } else
745 fl->fl_type = F_UNLCK;
746 spin_unlock(&inode->i_lock);
747 return;
748}
749EXPORT_SYMBOL(posix_test_lock);
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776#define MAX_DEADLK_ITERATIONS 10
777
778
779static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
780{
781 struct file_lock *fl;
782
783 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
784 if (posix_same_owner(fl, block_fl))
785 return fl->fl_next;
786 }
787 return NULL;
788}
789
790
791static int posix_locks_deadlock(struct file_lock *caller_fl,
792 struct file_lock *block_fl)
793{
794 int i = 0;
795
796 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
797 if (i++ > MAX_DEADLK_ITERATIONS)
798 return 0;
799 if (posix_same_owner(caller_fl, block_fl))
800 return 1;
801 }
802 return 0;
803}
804
805
806
807
808
809
810
811
812static int flock_lock_file(struct file *filp, struct file_lock *request)
813{
814 struct file_lock *new_fl = NULL;
815 struct file_lock **before;
816 struct inode * inode = file_inode(filp);
817 int error = 0;
818 int found = 0;
819
820 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
821 new_fl = locks_alloc_lock();
822 if (!new_fl)
823 return -ENOMEM;
824 }
825
826 spin_lock(&inode->i_lock);
827 if (request->fl_flags & FL_ACCESS)
828 goto find_conflict;
829
830 for_each_lock(inode, before) {
831 struct file_lock *fl = *before;
832 if (IS_POSIX(fl))
833 break;
834 if (IS_LEASE(fl))
835 continue;
836 if (filp != fl->fl_file)
837 continue;
838 if (request->fl_type == fl->fl_type)
839 goto out;
840 found = 1;
841 locks_delete_lock(before);
842 break;
843 }
844
845 if (request->fl_type == F_UNLCK) {
846 if ((request->fl_flags & FL_EXISTS) && !found)
847 error = -ENOENT;
848 goto out;
849 }
850
851
852
853
854
855 if (found) {
856 spin_unlock(&inode->i_lock);
857 cond_resched();
858 spin_lock(&inode->i_lock);
859 }
860
861find_conflict:
862 for_each_lock(inode, before) {
863 struct file_lock *fl = *before;
864 if (IS_POSIX(fl))
865 break;
866 if (IS_LEASE(fl))
867 continue;
868 if (!flock_locks_conflict(request, fl))
869 continue;
870 error = -EAGAIN;
871 if (!(request->fl_flags & FL_SLEEP))
872 goto out;
873 error = FILE_LOCK_DEFERRED;
874 locks_insert_block(fl, request);
875 goto out;
876 }
877 if (request->fl_flags & FL_ACCESS)
878 goto out;
879 locks_copy_lock(new_fl, request);
880 locks_insert_lock(before, new_fl);
881 new_fl = NULL;
882 error = 0;
883
884out:
885 spin_unlock(&inode->i_lock);
886 if (new_fl)
887 locks_free_lock(new_fl);
888 return error;
889}
890
891static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
892{
893 struct file_lock *fl;
894 struct file_lock *new_fl = NULL;
895 struct file_lock *new_fl2 = NULL;
896 struct file_lock *left = NULL;
897 struct file_lock *right = NULL;
898 struct file_lock **before;
899 int error;
900 bool added = false;
901
902
903
904
905
906
907
908 if (!(request->fl_flags & FL_ACCESS) &&
909 (request->fl_type != F_UNLCK ||
910 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
911 new_fl = locks_alloc_lock();
912 new_fl2 = locks_alloc_lock();
913 }
914
915 spin_lock(&inode->i_lock);
916
917
918
919
920
921 if (request->fl_type != F_UNLCK) {
922 for_each_lock(inode, before) {
923 fl = *before;
924 if (!IS_POSIX(fl))
925 continue;
926 if (!posix_locks_conflict(request, fl))
927 continue;
928 if (conflock)
929 __locks_copy_lock(conflock, fl);
930 error = -EAGAIN;
931 if (!(request->fl_flags & FL_SLEEP))
932 goto out;
933
934
935
936
937 error = -EDEADLK;
938 spin_lock(&blocked_lock_lock);
939 if (likely(!posix_locks_deadlock(request, fl))) {
940 error = FILE_LOCK_DEFERRED;
941 __locks_insert_block(fl, request);
942 }
943 spin_unlock(&blocked_lock_lock);
944 goto out;
945 }
946 }
947
948
949 error = 0;
950 if (request->fl_flags & FL_ACCESS)
951 goto out;
952
953
954
955
956
957 before = &inode->i_flock;
958
959
960 while ((fl = *before) && (!IS_POSIX(fl) ||
961 !posix_same_owner(request, fl))) {
962 before = &fl->fl_next;
963 }
964
965
966 while ((fl = *before) && posix_same_owner(request, fl)) {
967
968
969 if (request->fl_type == fl->fl_type) {
970
971
972
973
974 if (fl->fl_end < request->fl_start - 1)
975 goto next_lock;
976
977
978
979 if (fl->fl_start - 1 > request->fl_end)
980 break;
981
982
983
984
985
986
987 if (fl->fl_start > request->fl_start)
988 fl->fl_start = request->fl_start;
989 else
990 request->fl_start = fl->fl_start;
991 if (fl->fl_end < request->fl_end)
992 fl->fl_end = request->fl_end;
993 else
994 request->fl_end = fl->fl_end;
995 if (added) {
996 locks_delete_lock(before);
997 continue;
998 }
999 request = fl;
1000 added = true;
1001 }
1002 else {
1003
1004
1005
1006 if (fl->fl_end < request->fl_start)
1007 goto next_lock;
1008 if (fl->fl_start > request->fl_end)
1009 break;
1010 if (request->fl_type == F_UNLCK)
1011 added = true;
1012 if (fl->fl_start < request->fl_start)
1013 left = fl;
1014
1015
1016
1017 if (fl->fl_end > request->fl_end) {
1018 right = fl;
1019 break;
1020 }
1021 if (fl->fl_start >= request->fl_start) {
1022
1023
1024
1025 if (added) {
1026 locks_delete_lock(before);
1027 continue;
1028 }
1029
1030
1031
1032
1033
1034 locks_wake_up_blocks(fl);
1035 fl->fl_start = request->fl_start;
1036 fl->fl_end = request->fl_end;
1037 fl->fl_type = request->fl_type;
1038 locks_release_private(fl);
1039 locks_copy_private(fl, request);
1040 request = fl;
1041 added = true;
1042 }
1043 }
1044
1045
1046 next_lock:
1047 before = &fl->fl_next;
1048 }
1049
1050
1051
1052
1053
1054
1055 error = -ENOLCK;
1056 if (right && left == right && !new_fl2)
1057 goto out;
1058
1059 error = 0;
1060 if (!added) {
1061 if (request->fl_type == F_UNLCK) {
1062 if (request->fl_flags & FL_EXISTS)
1063 error = -ENOENT;
1064 goto out;
1065 }
1066
1067 if (!new_fl) {
1068 error = -ENOLCK;
1069 goto out;
1070 }
1071 locks_copy_lock(new_fl, request);
1072 locks_insert_lock(before, new_fl);
1073 new_fl = NULL;
1074 }
1075 if (right) {
1076 if (left == right) {
1077
1078
1079
1080 left = new_fl2;
1081 new_fl2 = NULL;
1082 locks_copy_lock(left, right);
1083 locks_insert_lock(before, left);
1084 }
1085 right->fl_start = request->fl_end + 1;
1086 locks_wake_up_blocks(right);
1087 }
1088 if (left) {
1089 left->fl_end = request->fl_start - 1;
1090 locks_wake_up_blocks(left);
1091 }
1092 out:
1093 spin_unlock(&inode->i_lock);
1094
1095
1096
1097 if (new_fl)
1098 locks_free_lock(new_fl);
1099 if (new_fl2)
1100 locks_free_lock(new_fl2);
1101 return error;
1102}
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118int posix_lock_file(struct file *filp, struct file_lock *fl,
1119 struct file_lock *conflock)
1120{
1121 return __posix_lock_file(file_inode(filp), fl, conflock);
1122}
1123EXPORT_SYMBOL(posix_lock_file);
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1135{
1136 int error;
1137 might_sleep ();
1138 for (;;) {
1139 error = posix_lock_file(filp, fl, NULL);
1140 if (error != FILE_LOCK_DEFERRED)
1141 break;
1142 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1143 if (!error)
1144 continue;
1145
1146 locks_delete_block(fl);
1147 break;
1148 }
1149 return error;
1150}
1151EXPORT_SYMBOL(posix_lock_file_wait);
1152
1153
1154
1155
1156
1157
1158
1159
1160int locks_mandatory_locked(struct inode *inode)
1161{
1162 fl_owner_t owner = current->files;
1163 struct file_lock *fl;
1164
1165
1166
1167
1168 spin_lock(&inode->i_lock);
1169 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1170 if (!IS_POSIX(fl))
1171 continue;
1172 if (fl->fl_owner != owner)
1173 break;
1174 }
1175 spin_unlock(&inode->i_lock);
1176 return fl ? -EAGAIN : 0;
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192int locks_mandatory_area(int read_write, struct inode *inode,
1193 struct file *filp, loff_t offset,
1194 size_t count)
1195{
1196 struct file_lock fl;
1197 int error;
1198
1199 locks_init_lock(&fl);
1200 fl.fl_owner = current->files;
1201 fl.fl_pid = current->tgid;
1202 fl.fl_file = filp;
1203 fl.fl_flags = FL_POSIX | FL_ACCESS;
1204 if (filp && !(filp->f_flags & O_NONBLOCK))
1205 fl.fl_flags |= FL_SLEEP;
1206 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1207 fl.fl_start = offset;
1208 fl.fl_end = offset + count - 1;
1209
1210 for (;;) {
1211 error = __posix_lock_file(inode, &fl, NULL);
1212 if (error != FILE_LOCK_DEFERRED)
1213 break;
1214 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1215 if (!error) {
1216
1217
1218
1219
1220 if (__mandatory_lock(inode))
1221 continue;
1222 }
1223
1224 locks_delete_block(&fl);
1225 break;
1226 }
1227
1228 return error;
1229}
1230
1231EXPORT_SYMBOL(locks_mandatory_area);
1232
1233static void lease_clear_pending(struct file_lock *fl, int arg)
1234{
1235 switch (arg) {
1236 case F_UNLCK:
1237 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1238
1239 case F_RDLCK:
1240 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1241 }
1242}
1243
1244
1245int lease_modify(struct file_lock **before, int arg)
1246{
1247 struct file_lock *fl = *before;
1248 int error = assign_type(fl, arg);
1249
1250 if (error)
1251 return error;
1252 lease_clear_pending(fl, arg);
1253 locks_wake_up_blocks(fl);
1254 if (arg == F_UNLCK) {
1255 struct file *filp = fl->fl_file;
1256
1257 f_delown(filp);
1258 filp->f_owner.signum = 0;
1259 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1260 if (fl->fl_fasync != NULL) {
1261 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1262 fl->fl_fasync = NULL;
1263 }
1264 locks_delete_lock(before);
1265 }
1266 return 0;
1267}
1268
1269EXPORT_SYMBOL(lease_modify);
1270
1271static bool past_time(unsigned long then)
1272{
1273 if (!then)
1274
1275 return false;
1276 return time_after(jiffies, then);
1277}
1278
1279static void time_out_leases(struct inode *inode)
1280{
1281 struct file_lock **before;
1282 struct file_lock *fl;
1283
1284 before = &inode->i_flock;
1285 while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
1286 if (past_time(fl->fl_downgrade_time))
1287 lease_modify(before, F_RDLCK);
1288 if (past_time(fl->fl_break_time))
1289 lease_modify(before, F_UNLCK);
1290 if (fl == *before)
1291 before = &fl->fl_next;
1292 }
1293}
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305int __break_lease(struct inode *inode, unsigned int mode)
1306{
1307 int error = 0;
1308 struct file_lock *new_fl, *flock;
1309 struct file_lock *fl;
1310 unsigned long break_time;
1311 int i_have_this_lease = 0;
1312 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1313
1314 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1315 if (IS_ERR(new_fl))
1316 return PTR_ERR(new_fl);
1317
1318 spin_lock(&inode->i_lock);
1319
1320 time_out_leases(inode);
1321
1322 flock = inode->i_flock;
1323 if ((flock == NULL) || !IS_LEASE(flock))
1324 goto out;
1325
1326 if (!locks_conflict(flock, new_fl))
1327 goto out;
1328
1329 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1330 if (fl->fl_owner == current->files)
1331 i_have_this_lease = 1;
1332
1333 break_time = 0;
1334 if (lease_break_time > 0) {
1335 break_time = jiffies + lease_break_time * HZ;
1336 if (break_time == 0)
1337 break_time++;
1338 }
1339
1340 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1341 if (want_write) {
1342 if (fl->fl_flags & FL_UNLOCK_PENDING)
1343 continue;
1344 fl->fl_flags |= FL_UNLOCK_PENDING;
1345 fl->fl_break_time = break_time;
1346 } else {
1347 if (lease_breaking(flock))
1348 continue;
1349 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1350 fl->fl_downgrade_time = break_time;
1351 }
1352 fl->fl_lmops->lm_break(fl);
1353 }
1354
1355 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1356 error = -EWOULDBLOCK;
1357 goto out;
1358 }
1359
1360restart:
1361 break_time = flock->fl_break_time;
1362 if (break_time != 0) {
1363 break_time -= jiffies;
1364 if (break_time == 0)
1365 break_time++;
1366 }
1367 locks_insert_block(flock, new_fl);
1368 spin_unlock(&inode->i_lock);
1369 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1370 !new_fl->fl_next, break_time);
1371 spin_lock(&inode->i_lock);
1372 locks_delete_block(new_fl);
1373 if (error >= 0) {
1374 if (error == 0)
1375 time_out_leases(inode);
1376
1377
1378
1379
1380 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1381 flock = flock->fl_next) {
1382 if (locks_conflict(new_fl, flock))
1383 goto restart;
1384 }
1385 error = 0;
1386 }
1387
1388out:
1389 spin_unlock(&inode->i_lock);
1390 locks_free_lock(new_fl);
1391 return error;
1392}
1393
1394EXPORT_SYMBOL(__break_lease);
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405void lease_get_mtime(struct inode *inode, struct timespec *time)
1406{
1407 struct file_lock *flock = inode->i_flock;
1408 if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
1409 *time = current_fs_time(inode->i_sb);
1410 else
1411 *time = inode->i_mtime;
1412}
1413
1414EXPORT_SYMBOL(lease_get_mtime);
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439int fcntl_getlease(struct file *filp)
1440{
1441 struct file_lock *fl;
1442 struct inode *inode = file_inode(filp);
1443 int type = F_UNLCK;
1444
1445 spin_lock(&inode->i_lock);
1446 time_out_leases(file_inode(filp));
1447 for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
1448 fl = fl->fl_next) {
1449 if (fl->fl_file == filp) {
1450 type = target_leasetype(fl);
1451 break;
1452 }
1453 }
1454 spin_unlock(&inode->i_lock);
1455 return type;
1456}
1457
1458static int generic_add_lease(struct file *filp, long arg, struct file_lock **flp)
1459{
1460 struct file_lock *fl, **before, **my_before = NULL, *lease;
1461 struct dentry *dentry = filp->f_path.dentry;
1462 struct inode *inode = dentry->d_inode;
1463 int error;
1464
1465 lease = *flp;
1466
1467 error = -EAGAIN;
1468 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1469 goto out;
1470 if ((arg == F_WRLCK)
1471 && ((d_count(dentry) > 1)
1472 || (atomic_read(&inode->i_count) > 1)))
1473 goto out;
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 error = -EAGAIN;
1484 for (before = &inode->i_flock;
1485 ((fl = *before) != NULL) && IS_LEASE(fl);
1486 before = &fl->fl_next) {
1487 if (fl->fl_file == filp) {
1488 my_before = before;
1489 continue;
1490 }
1491
1492
1493
1494
1495 if (arg == F_WRLCK)
1496 goto out;
1497
1498
1499
1500
1501 if (fl->fl_flags & FL_UNLOCK_PENDING)
1502 goto out;
1503 }
1504
1505 if (my_before != NULL) {
1506 error = lease->fl_lmops->lm_change(my_before, arg);
1507 if (!error)
1508 *flp = *my_before;
1509 goto out;
1510 }
1511
1512 error = -EINVAL;
1513 if (!leases_enable)
1514 goto out;
1515
1516 locks_insert_lock(before, lease);
1517 return 0;
1518
1519out:
1520 return error;
1521}
1522
1523static int generic_delete_lease(struct file *filp, struct file_lock **flp)
1524{
1525 struct file_lock *fl, **before;
1526 struct dentry *dentry = filp->f_path.dentry;
1527 struct inode *inode = dentry->d_inode;
1528
1529 for (before = &inode->i_flock;
1530 ((fl = *before) != NULL) && IS_LEASE(fl);
1531 before = &fl->fl_next) {
1532 if (fl->fl_file != filp)
1533 continue;
1534 return (*flp)->fl_lmops->lm_change(before, F_UNLCK);
1535 }
1536 return -EAGAIN;
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1551{
1552 struct dentry *dentry = filp->f_path.dentry;
1553 struct inode *inode = dentry->d_inode;
1554 int error;
1555
1556 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1557 return -EACCES;
1558 if (!S_ISREG(inode->i_mode))
1559 return -EINVAL;
1560 error = security_file_lock(filp, arg);
1561 if (error)
1562 return error;
1563
1564 time_out_leases(inode);
1565
1566 BUG_ON(!(*flp)->fl_lmops->lm_break);
1567
1568 switch (arg) {
1569 case F_UNLCK:
1570 return generic_delete_lease(filp, flp);
1571 case F_RDLCK:
1572 case F_WRLCK:
1573 return generic_add_lease(filp, arg, flp);
1574 default:
1575 return -EINVAL;
1576 }
1577}
1578EXPORT_SYMBOL(generic_setlease);
1579
1580static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1581{
1582 if (filp->f_op && filp->f_op->setlease)
1583 return filp->f_op->setlease(filp, arg, lease);
1584 else
1585 return generic_setlease(filp, arg, lease);
1586}
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1616{
1617 struct inode *inode = file_inode(filp);
1618 int error;
1619
1620 spin_lock(&inode->i_lock);
1621 error = __vfs_setlease(filp, arg, lease);
1622 spin_unlock(&inode->i_lock);
1623
1624 return error;
1625}
1626EXPORT_SYMBOL_GPL(vfs_setlease);
1627
1628static int do_fcntl_delete_lease(struct file *filp)
1629{
1630 struct file_lock fl, *flp = &fl;
1631
1632 lease_init(filp, F_UNLCK, flp);
1633
1634 return vfs_setlease(filp, F_UNLCK, &flp);
1635}
1636
1637static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1638{
1639 struct file_lock *fl, *ret;
1640 struct inode *inode = file_inode(filp);
1641 struct fasync_struct *new;
1642 int error;
1643
1644 fl = lease_alloc(filp, arg);
1645 if (IS_ERR(fl))
1646 return PTR_ERR(fl);
1647
1648 new = fasync_alloc();
1649 if (!new) {
1650 locks_free_lock(fl);
1651 return -ENOMEM;
1652 }
1653 ret = fl;
1654 spin_lock(&inode->i_lock);
1655 error = __vfs_setlease(filp, arg, &ret);
1656 if (error) {
1657 spin_unlock(&inode->i_lock);
1658 locks_free_lock(fl);
1659 goto out_free_fasync;
1660 }
1661 if (ret != fl)
1662 locks_free_lock(fl);
1663
1664
1665
1666
1667
1668
1669
1670 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1671 new = NULL;
1672
1673 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1674 spin_unlock(&inode->i_lock);
1675
1676out_free_fasync:
1677 if (new)
1678 fasync_free(new);
1679 return error;
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1693{
1694 if (arg == F_UNLCK)
1695 return do_fcntl_delete_lease(filp);
1696 return do_fcntl_add_lease(fd, filp, arg);
1697}
1698
1699
1700
1701
1702
1703
1704
1705
1706int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1707{
1708 int error;
1709 might_sleep();
1710 for (;;) {
1711 error = flock_lock_file(filp, fl);
1712 if (error != FILE_LOCK_DEFERRED)
1713 break;
1714 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1715 if (!error)
1716 continue;
1717
1718 locks_delete_block(fl);
1719 break;
1720 }
1721 return error;
1722}
1723
1724EXPORT_SYMBOL(flock_lock_file_wait);
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1746{
1747 struct fd f = fdget(fd);
1748 struct file_lock *lock;
1749 int can_sleep, unlock;
1750 int error;
1751
1752 error = -EBADF;
1753 if (!f.file)
1754 goto out;
1755
1756 can_sleep = !(cmd & LOCK_NB);
1757 cmd &= ~LOCK_NB;
1758 unlock = (cmd == LOCK_UN);
1759
1760 if (!unlock && !(cmd & LOCK_MAND) &&
1761 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
1762 goto out_putf;
1763
1764 error = flock_make_lock(f.file, &lock, cmd);
1765 if (error)
1766 goto out_putf;
1767 if (can_sleep)
1768 lock->fl_flags |= FL_SLEEP;
1769
1770 error = security_file_lock(f.file, lock->fl_type);
1771 if (error)
1772 goto out_free;
1773
1774 if (f.file->f_op && f.file->f_op->flock)
1775 error = f.file->f_op->flock(f.file,
1776 (can_sleep) ? F_SETLKW : F_SETLK,
1777 lock);
1778 else
1779 error = flock_lock_file_wait(f.file, lock);
1780
1781 out_free:
1782 locks_free_lock(lock);
1783
1784 out_putf:
1785 fdput(f);
1786 out:
1787 return error;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798int vfs_test_lock(struct file *filp, struct file_lock *fl)
1799{
1800 if (filp->f_op && filp->f_op->lock)
1801 return filp->f_op->lock(filp, F_GETLK, fl);
1802 posix_test_lock(filp, fl);
1803 return 0;
1804}
1805EXPORT_SYMBOL_GPL(vfs_test_lock);
1806
1807static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1808{
1809 flock->l_pid = fl->fl_pid;
1810#if BITS_PER_LONG == 32
1811
1812
1813
1814
1815 if (fl->fl_start > OFFT_OFFSET_MAX)
1816 return -EOVERFLOW;
1817 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1818 return -EOVERFLOW;
1819#endif
1820 flock->l_start = fl->fl_start;
1821 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1822 fl->fl_end - fl->fl_start + 1;
1823 flock->l_whence = 0;
1824 flock->l_type = fl->fl_type;
1825 return 0;
1826}
1827
1828#if BITS_PER_LONG == 32
1829static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1830{
1831 flock->l_pid = fl->fl_pid;
1832 flock->l_start = fl->fl_start;
1833 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1834 fl->fl_end - fl->fl_start + 1;
1835 flock->l_whence = 0;
1836 flock->l_type = fl->fl_type;
1837}
1838#endif
1839
1840
1841
1842
1843int fcntl_getlk(struct file *filp, struct flock __user *l)
1844{
1845 struct file_lock file_lock;
1846 struct flock flock;
1847 int error;
1848
1849 error = -EFAULT;
1850 if (copy_from_user(&flock, l, sizeof(flock)))
1851 goto out;
1852 error = -EINVAL;
1853 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1854 goto out;
1855
1856 error = flock_to_posix_lock(filp, &file_lock, &flock);
1857 if (error)
1858 goto out;
1859
1860 error = vfs_test_lock(filp, &file_lock);
1861 if (error)
1862 goto out;
1863
1864 flock.l_type = file_lock.fl_type;
1865 if (file_lock.fl_type != F_UNLCK) {
1866 error = posix_lock_to_flock(&flock, &file_lock);
1867 if (error)
1868 goto out;
1869 }
1870 error = -EFAULT;
1871 if (!copy_to_user(l, &flock, sizeof(flock)))
1872 error = 0;
1873out:
1874 return error;
1875}
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1911{
1912 if (filp->f_op && filp->f_op->lock)
1913 return filp->f_op->lock(filp, cmd, fl);
1914 else
1915 return posix_lock_file(filp, fl, conf);
1916}
1917EXPORT_SYMBOL_GPL(vfs_lock_file);
1918
1919static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1920 struct file_lock *fl)
1921{
1922 int error;
1923
1924 error = security_file_lock(filp, fl->fl_type);
1925 if (error)
1926 return error;
1927
1928 for (;;) {
1929 error = vfs_lock_file(filp, cmd, fl, NULL);
1930 if (error != FILE_LOCK_DEFERRED)
1931 break;
1932 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1933 if (!error)
1934 continue;
1935
1936 locks_delete_block(fl);
1937 break;
1938 }
1939
1940 return error;
1941}
1942
1943
1944
1945
1946int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1947 struct flock __user *l)
1948{
1949 struct file_lock *file_lock = locks_alloc_lock();
1950 struct flock flock;
1951 struct inode *inode;
1952 struct file *f;
1953 int error;
1954
1955 if (file_lock == NULL)
1956 return -ENOLCK;
1957
1958
1959
1960
1961 error = -EFAULT;
1962 if (copy_from_user(&flock, l, sizeof(flock)))
1963 goto out;
1964
1965 inode = file_inode(filp);
1966
1967
1968
1969
1970 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1971 error = -EAGAIN;
1972 goto out;
1973 }
1974
1975again:
1976 error = flock_to_posix_lock(filp, file_lock, &flock);
1977 if (error)
1978 goto out;
1979 if (cmd == F_SETLKW) {
1980 file_lock->fl_flags |= FL_SLEEP;
1981 }
1982
1983 error = -EBADF;
1984 switch (flock.l_type) {
1985 case F_RDLCK:
1986 if (!(filp->f_mode & FMODE_READ))
1987 goto out;
1988 break;
1989 case F_WRLCK:
1990 if (!(filp->f_mode & FMODE_WRITE))
1991 goto out;
1992 break;
1993 case F_UNLCK:
1994 break;
1995 default:
1996 error = -EINVAL;
1997 goto out;
1998 }
1999
2000 error = do_lock_file_wait(filp, cmd, file_lock);
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011 spin_lock(¤t->files->file_lock);
2012 f = fcheck(fd);
2013 spin_unlock(¤t->files->file_lock);
2014 if (!error && f != filp && flock.l_type != F_UNLCK) {
2015 flock.l_type = F_UNLCK;
2016 goto again;
2017 }
2018
2019out:
2020 locks_free_lock(file_lock);
2021 return error;
2022}
2023
2024#if BITS_PER_LONG == 32
2025
2026
2027
2028int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
2029{
2030 struct file_lock file_lock;
2031 struct flock64 flock;
2032 int error;
2033
2034 error = -EFAULT;
2035 if (copy_from_user(&flock, l, sizeof(flock)))
2036 goto out;
2037 error = -EINVAL;
2038 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2039 goto out;
2040
2041 error = flock64_to_posix_lock(filp, &file_lock, &flock);
2042 if (error)
2043 goto out;
2044
2045 error = vfs_test_lock(filp, &file_lock);
2046 if (error)
2047 goto out;
2048
2049 flock.l_type = file_lock.fl_type;
2050 if (file_lock.fl_type != F_UNLCK)
2051 posix_lock_to_flock64(&flock, &file_lock);
2052
2053 error = -EFAULT;
2054 if (!copy_to_user(l, &flock, sizeof(flock)))
2055 error = 0;
2056
2057out:
2058 return error;
2059}
2060
2061
2062
2063
2064int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2065 struct flock64 __user *l)
2066{
2067 struct file_lock *file_lock = locks_alloc_lock();
2068 struct flock64 flock;
2069 struct inode *inode;
2070 struct file *f;
2071 int error;
2072
2073 if (file_lock == NULL)
2074 return -ENOLCK;
2075
2076
2077
2078
2079 error = -EFAULT;
2080 if (copy_from_user(&flock, l, sizeof(flock)))
2081 goto out;
2082
2083 inode = file_inode(filp);
2084
2085
2086
2087
2088 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2089 error = -EAGAIN;
2090 goto out;
2091 }
2092
2093again:
2094 error = flock64_to_posix_lock(filp, file_lock, &flock);
2095 if (error)
2096 goto out;
2097 if (cmd == F_SETLKW64) {
2098 file_lock->fl_flags |= FL_SLEEP;
2099 }
2100
2101 error = -EBADF;
2102 switch (flock.l_type) {
2103 case F_RDLCK:
2104 if (!(filp->f_mode & FMODE_READ))
2105 goto out;
2106 break;
2107 case F_WRLCK:
2108 if (!(filp->f_mode & FMODE_WRITE))
2109 goto out;
2110 break;
2111 case F_UNLCK:
2112 break;
2113 default:
2114 error = -EINVAL;
2115 goto out;
2116 }
2117
2118 error = do_lock_file_wait(filp, cmd, file_lock);
2119
2120
2121
2122
2123
2124 spin_lock(¤t->files->file_lock);
2125 f = fcheck(fd);
2126 spin_unlock(¤t->files->file_lock);
2127 if (!error && f != filp && flock.l_type != F_UNLCK) {
2128 flock.l_type = F_UNLCK;
2129 goto again;
2130 }
2131
2132out:
2133 locks_free_lock(file_lock);
2134 return error;
2135}
2136#endif
2137
2138
2139
2140
2141
2142
2143void locks_remove_posix(struct file *filp, fl_owner_t owner)
2144{
2145 struct file_lock lock;
2146
2147
2148
2149
2150
2151
2152 if (!file_inode(filp)->i_flock)
2153 return;
2154
2155 lock.fl_type = F_UNLCK;
2156 lock.fl_flags = FL_POSIX | FL_CLOSE;
2157 lock.fl_start = 0;
2158 lock.fl_end = OFFSET_MAX;
2159 lock.fl_owner = owner;
2160 lock.fl_pid = current->tgid;
2161 lock.fl_file = filp;
2162 lock.fl_ops = NULL;
2163 lock.fl_lmops = NULL;
2164
2165 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2166
2167 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2168 lock.fl_ops->fl_release_private(&lock);
2169}
2170
2171EXPORT_SYMBOL(locks_remove_posix);
2172
2173
2174
2175
2176void locks_remove_flock(struct file *filp)
2177{
2178 struct inode * inode = file_inode(filp);
2179 struct file_lock *fl;
2180 struct file_lock **before;
2181
2182 if (!inode->i_flock)
2183 return;
2184
2185 if (filp->f_op && filp->f_op->flock) {
2186 struct file_lock fl = {
2187 .fl_pid = current->tgid,
2188 .fl_file = filp,
2189 .fl_flags = FL_FLOCK,
2190 .fl_type = F_UNLCK,
2191 .fl_end = OFFSET_MAX,
2192 };
2193 filp->f_op->flock(filp, F_SETLKW, &fl);
2194 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2195 fl.fl_ops->fl_release_private(&fl);
2196 }
2197
2198 spin_lock(&inode->i_lock);
2199 before = &inode->i_flock;
2200
2201 while ((fl = *before) != NULL) {
2202 if (fl->fl_file == filp) {
2203 if (IS_FLOCK(fl)) {
2204 locks_delete_lock(before);
2205 continue;
2206 }
2207 if (IS_LEASE(fl)) {
2208 lease_modify(before, F_UNLCK);
2209 continue;
2210 }
2211
2212 BUG();
2213 }
2214 before = &fl->fl_next;
2215 }
2216 spin_unlock(&inode->i_lock);
2217}
2218
2219
2220
2221
2222
2223
2224
2225int
2226posix_unblock_lock(struct file_lock *waiter)
2227{
2228 int status = 0;
2229
2230 spin_lock(&blocked_lock_lock);
2231 if (waiter->fl_next)
2232 __locks_delete_block(waiter);
2233 else
2234 status = -ENOENT;
2235 spin_unlock(&blocked_lock_lock);
2236 return status;
2237}
2238EXPORT_SYMBOL(posix_unblock_lock);
2239
2240
2241
2242
2243
2244
2245
2246
2247int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2248{
2249 if (filp->f_op && filp->f_op->lock)
2250 return filp->f_op->lock(filp, F_CANCELLK, fl);
2251 return 0;
2252}
2253
2254EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2255
2256#ifdef CONFIG_PROC_FS
2257#include <linux/proc_fs.h>
2258#include <linux/seq_file.h>
2259
2260struct locks_iterator {
2261 int li_cpu;
2262 loff_t li_pos;
2263};
2264
2265static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2266 loff_t id, char *pfx)
2267{
2268 struct inode *inode = NULL;
2269 unsigned int fl_pid;
2270
2271 if (fl->fl_nspid)
2272 fl_pid = pid_vnr(fl->fl_nspid);
2273 else
2274 fl_pid = fl->fl_pid;
2275
2276 if (fl->fl_file != NULL)
2277 inode = file_inode(fl->fl_file);
2278
2279 seq_printf(f, "%lld:%s ", id, pfx);
2280 if (IS_POSIX(fl)) {
2281 seq_printf(f, "%6s %s ",
2282 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2283 (inode == NULL) ? "*NOINODE*" :
2284 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2285 } else if (IS_FLOCK(fl)) {
2286 if (fl->fl_type & LOCK_MAND) {
2287 seq_printf(f, "FLOCK MSNFS ");
2288 } else {
2289 seq_printf(f, "FLOCK ADVISORY ");
2290 }
2291 } else if (IS_LEASE(fl)) {
2292 seq_printf(f, "LEASE ");
2293 if (lease_breaking(fl))
2294 seq_printf(f, "BREAKING ");
2295 else if (fl->fl_file)
2296 seq_printf(f, "ACTIVE ");
2297 else
2298 seq_printf(f, "BREAKER ");
2299 } else {
2300 seq_printf(f, "UNKNOWN UNKNOWN ");
2301 }
2302 if (fl->fl_type & LOCK_MAND) {
2303 seq_printf(f, "%s ",
2304 (fl->fl_type & LOCK_READ)
2305 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2306 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2307 } else {
2308 seq_printf(f, "%s ",
2309 (lease_breaking(fl))
2310 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2311 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2312 }
2313 if (inode) {
2314#ifdef WE_CAN_BREAK_LSLK_NOW
2315 seq_printf(f, "%d %s:%ld ", fl_pid,
2316 inode->i_sb->s_id, inode->i_ino);
2317#else
2318
2319 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2320 MAJOR(inode->i_sb->s_dev),
2321 MINOR(inode->i_sb->s_dev), inode->i_ino);
2322#endif
2323 } else {
2324 seq_printf(f, "%d <none>:0 ", fl_pid);
2325 }
2326 if (IS_POSIX(fl)) {
2327 if (fl->fl_end == OFFSET_MAX)
2328 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2329 else
2330 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2331 } else {
2332 seq_printf(f, "0 EOF\n");
2333 }
2334}
2335
2336static int locks_show(struct seq_file *f, void *v)
2337{
2338 struct locks_iterator *iter = f->private;
2339 struct file_lock *fl, *bfl;
2340
2341 fl = hlist_entry(v, struct file_lock, fl_link);
2342
2343 lock_get_status(f, fl, iter->li_pos, "");
2344
2345 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2346 lock_get_status(f, bfl, iter->li_pos, " ->");
2347
2348 return 0;
2349}
2350
2351static void *locks_start(struct seq_file *f, loff_t *pos)
2352{
2353 struct locks_iterator *iter = f->private;
2354
2355 iter->li_pos = *pos + 1;
2356 lg_global_lock(&file_lock_lglock);
2357 spin_lock(&blocked_lock_lock);
2358 return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos);
2359}
2360
2361static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2362{
2363 struct locks_iterator *iter = f->private;
2364
2365 ++iter->li_pos;
2366 return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos);
2367}
2368
2369static void locks_stop(struct seq_file *f, void *v)
2370{
2371 spin_unlock(&blocked_lock_lock);
2372 lg_global_unlock(&file_lock_lglock);
2373}
2374
2375static const struct seq_operations locks_seq_operations = {
2376 .start = locks_start,
2377 .next = locks_next,
2378 .stop = locks_stop,
2379 .show = locks_show,
2380};
2381
2382static int locks_open(struct inode *inode, struct file *filp)
2383{
2384 return seq_open_private(filp, &locks_seq_operations,
2385 sizeof(struct locks_iterator));
2386}
2387
2388static const struct file_operations proc_locks_operations = {
2389 .open = locks_open,
2390 .read = seq_read,
2391 .llseek = seq_lseek,
2392 .release = seq_release_private,
2393};
2394
2395static int __init proc_locks_init(void)
2396{
2397 proc_create("locks", 0, NULL, &proc_locks_operations);
2398 return 0;
2399}
2400module_init(proc_locks_init);
2401#endif
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2417{
2418 struct file_lock *fl;
2419 int result = 1;
2420
2421 spin_lock(&inode->i_lock);
2422 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2423 if (IS_POSIX(fl)) {
2424 if (fl->fl_type == F_RDLCK)
2425 continue;
2426 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2427 continue;
2428 } else if (IS_FLOCK(fl)) {
2429 if (!(fl->fl_type & LOCK_MAND))
2430 continue;
2431 if (fl->fl_type & LOCK_READ)
2432 continue;
2433 } else
2434 continue;
2435 result = 0;
2436 break;
2437 }
2438 spin_unlock(&inode->i_lock);
2439 return result;
2440}
2441
2442EXPORT_SYMBOL(lock_may_read);
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2458{
2459 struct file_lock *fl;
2460 int result = 1;
2461
2462 spin_lock(&inode->i_lock);
2463 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2464 if (IS_POSIX(fl)) {
2465 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2466 continue;
2467 } else if (IS_FLOCK(fl)) {
2468 if (!(fl->fl_type & LOCK_MAND))
2469 continue;
2470 if (fl->fl_type & LOCK_WRITE)
2471 continue;
2472 } else
2473 continue;
2474 result = 0;
2475 break;
2476 }
2477 spin_unlock(&inode->i_lock);
2478 return result;
2479}
2480
2481EXPORT_SYMBOL(lock_may_write);
2482
2483static int __init filelock_init(void)
2484{
2485 int i;
2486
2487 filelock_cache = kmem_cache_create("file_lock_cache",
2488 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2489
2490 lg_lock_init(&file_lock_lglock, "file_lock_lglock");
2491
2492 for_each_possible_cpu(i)
2493 INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i));
2494
2495 return 0;
2496}
2497
2498core_initcall(filelock_init);
2499