1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/security.h>
123#include <linux/slab.h>
124#include <linux/syscalls.h>
125#include <linux/time.h>
126#include <linux/rcupdate.h>
127#include <linux/pid_namespace.h>
128#include <linux/hashtable.h>
129#include <linux/percpu.h>
130
131#define CREATE_TRACE_POINTS
132#include <trace/events/filelock.h>
133
134#include <linux/uaccess.h>
135
136#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
137#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
138#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
139#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
140#define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
141
142static inline bool is_remote_lock(struct file *filp)
143{
144 return likely(!(filp->f_path.dentry->d_sb->s_flags & SB_NOREMOTELOCK));
145}
146
147static bool lease_breaking(struct file_lock *fl)
148{
149 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
150}
151
152static int target_leasetype(struct file_lock *fl)
153{
154 if (fl->fl_flags & FL_UNLOCK_PENDING)
155 return F_UNLCK;
156 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
157 return F_RDLCK;
158 return fl->fl_type;
159}
160
161int leases_enable = 1;
162int lease_break_time = 45;
163
164
165
166
167
168
169
170
171
172struct file_lock_list_struct {
173 spinlock_t lock;
174 struct hlist_head hlist;
175};
176static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
177DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
178
179
180
181
182
183
184
185
186
187
188
189
190#define BLOCKED_HASH_BITS 7
191static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210static DEFINE_SPINLOCK(blocked_lock_lock);
211
212static struct kmem_cache *flctx_cache __read_mostly;
213static struct kmem_cache *filelock_cache __read_mostly;
214
215static struct file_lock_context *
216locks_get_lock_context(struct inode *inode, int type)
217{
218 struct file_lock_context *ctx;
219
220
221 ctx = smp_load_acquire(&inode->i_flctx);
222 if (likely(ctx) || type == F_UNLCK)
223 goto out;
224
225 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
226 if (!ctx)
227 goto out;
228
229 spin_lock_init(&ctx->flc_lock);
230 INIT_LIST_HEAD(&ctx->flc_flock);
231 INIT_LIST_HEAD(&ctx->flc_posix);
232 INIT_LIST_HEAD(&ctx->flc_lease);
233
234
235
236
237
238 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
239 kmem_cache_free(flctx_cache, ctx);
240 ctx = smp_load_acquire(&inode->i_flctx);
241 }
242out:
243 trace_locks_get_lock_context(inode, type, ctx);
244 return ctx;
245}
246
247static void
248locks_dump_ctx_list(struct list_head *list, char *list_type)
249{
250 struct file_lock *fl;
251
252 list_for_each_entry(fl, list, fl_list) {
253 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
254 }
255}
256
257static void
258locks_check_ctx_lists(struct inode *inode)
259{
260 struct file_lock_context *ctx = inode->i_flctx;
261
262 if (unlikely(!list_empty(&ctx->flc_flock) ||
263 !list_empty(&ctx->flc_posix) ||
264 !list_empty(&ctx->flc_lease))) {
265 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
266 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
267 inode->i_ino);
268 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
269 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
270 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
271 }
272}
273
274static void
275locks_check_ctx_file_list(struct file *filp, struct list_head *list,
276 char *list_type)
277{
278 struct file_lock *fl;
279 struct inode *inode = locks_inode(filp);
280
281 list_for_each_entry(fl, list, fl_list)
282 if (fl->fl_file == filp)
283 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
284 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
285 list_type, MAJOR(inode->i_sb->s_dev),
286 MINOR(inode->i_sb->s_dev), inode->i_ino,
287 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
288}
289
290void
291locks_free_lock_context(struct inode *inode)
292{
293 struct file_lock_context *ctx = inode->i_flctx;
294
295 if (unlikely(ctx)) {
296 locks_check_ctx_lists(inode);
297 kmem_cache_free(flctx_cache, ctx);
298 }
299}
300
301static void locks_init_lock_heads(struct file_lock *fl)
302{
303 INIT_HLIST_NODE(&fl->fl_link);
304 INIT_LIST_HEAD(&fl->fl_list);
305 INIT_LIST_HEAD(&fl->fl_block);
306 init_waitqueue_head(&fl->fl_wait);
307}
308
309
310struct file_lock *locks_alloc_lock(void)
311{
312 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
313
314 if (fl)
315 locks_init_lock_heads(fl);
316
317 return fl;
318}
319EXPORT_SYMBOL_GPL(locks_alloc_lock);
320
321void locks_release_private(struct file_lock *fl)
322{
323 if (fl->fl_ops) {
324 if (fl->fl_ops->fl_release_private)
325 fl->fl_ops->fl_release_private(fl);
326 fl->fl_ops = NULL;
327 }
328
329 if (fl->fl_lmops) {
330 if (fl->fl_lmops->lm_put_owner) {
331 fl->fl_lmops->lm_put_owner(fl->fl_owner);
332 fl->fl_owner = NULL;
333 }
334 fl->fl_lmops = NULL;
335 }
336}
337EXPORT_SYMBOL_GPL(locks_release_private);
338
339
340void locks_free_lock(struct file_lock *fl)
341{
342 BUG_ON(waitqueue_active(&fl->fl_wait));
343 BUG_ON(!list_empty(&fl->fl_list));
344 BUG_ON(!list_empty(&fl->fl_block));
345 BUG_ON(!hlist_unhashed(&fl->fl_link));
346
347 locks_release_private(fl);
348 kmem_cache_free(filelock_cache, fl);
349}
350EXPORT_SYMBOL(locks_free_lock);
351
352static void
353locks_dispose_list(struct list_head *dispose)
354{
355 struct file_lock *fl;
356
357 while (!list_empty(dispose)) {
358 fl = list_first_entry(dispose, struct file_lock, fl_list);
359 list_del_init(&fl->fl_list);
360 locks_free_lock(fl);
361 }
362}
363
364void locks_init_lock(struct file_lock *fl)
365{
366 memset(fl, 0, sizeof(struct file_lock));
367 locks_init_lock_heads(fl);
368}
369
370EXPORT_SYMBOL(locks_init_lock);
371
372
373
374
375void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
376{
377 new->fl_owner = fl->fl_owner;
378 new->fl_pid = fl->fl_pid;
379 new->fl_file = NULL;
380 new->fl_flags = fl->fl_flags;
381 new->fl_type = fl->fl_type;
382 new->fl_start = fl->fl_start;
383 new->fl_end = fl->fl_end;
384 new->fl_lmops = fl->fl_lmops;
385 new->fl_ops = NULL;
386
387 if (fl->fl_lmops) {
388 if (fl->fl_lmops->lm_get_owner)
389 fl->fl_lmops->lm_get_owner(fl->fl_owner);
390 }
391}
392EXPORT_SYMBOL(locks_copy_conflock);
393
394void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
395{
396
397 WARN_ON_ONCE(new->fl_ops);
398
399 locks_copy_conflock(new, fl);
400
401 new->fl_file = fl->fl_file;
402 new->fl_ops = fl->fl_ops;
403
404 if (fl->fl_ops) {
405 if (fl->fl_ops->fl_copy_lock)
406 fl->fl_ops->fl_copy_lock(new, fl);
407 }
408}
409
410EXPORT_SYMBOL(locks_copy_lock);
411
412static inline int flock_translate_cmd(int cmd) {
413 if (cmd & LOCK_MAND)
414 return cmd & (LOCK_MAND | LOCK_RW);
415 switch (cmd) {
416 case LOCK_SH:
417 return F_RDLCK;
418 case LOCK_EX:
419 return F_WRLCK;
420 case LOCK_UN:
421 return F_UNLCK;
422 }
423 return -EINVAL;
424}
425
426
427static struct file_lock *
428flock_make_lock(struct file *filp, unsigned int cmd)
429{
430 struct file_lock *fl;
431 int type = flock_translate_cmd(cmd);
432
433 if (type < 0)
434 return ERR_PTR(type);
435
436 fl = locks_alloc_lock();
437 if (fl == NULL)
438 return ERR_PTR(-ENOMEM);
439
440 fl->fl_file = filp;
441 fl->fl_owner = filp;
442 fl->fl_pid = current->tgid;
443 fl->fl_flags = FL_FLOCK;
444 fl->fl_type = type;
445 fl->fl_end = OFFSET_MAX;
446
447 return fl;
448}
449
450static int assign_type(struct file_lock *fl, long type)
451{
452 switch (type) {
453 case F_RDLCK:
454 case F_WRLCK:
455 case F_UNLCK:
456 fl->fl_type = type;
457 break;
458 default:
459 return -EINVAL;
460 }
461 return 0;
462}
463
464static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
465 struct flock64 *l)
466{
467 switch (l->l_whence) {
468 case SEEK_SET:
469 fl->fl_start = 0;
470 break;
471 case SEEK_CUR:
472 fl->fl_start = filp->f_pos;
473 break;
474 case SEEK_END:
475 fl->fl_start = i_size_read(file_inode(filp));
476 break;
477 default:
478 return -EINVAL;
479 }
480 if (l->l_start > OFFSET_MAX - fl->fl_start)
481 return -EOVERFLOW;
482 fl->fl_start += l->l_start;
483 if (fl->fl_start < 0)
484 return -EINVAL;
485
486
487
488 if (l->l_len > 0) {
489 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
490 return -EOVERFLOW;
491 fl->fl_end = fl->fl_start + l->l_len - 1;
492
493 } else if (l->l_len < 0) {
494 if (fl->fl_start + l->l_len < 0)
495 return -EINVAL;
496 fl->fl_end = fl->fl_start - 1;
497 fl->fl_start += l->l_len;
498 } else
499 fl->fl_end = OFFSET_MAX;
500
501 fl->fl_owner = current->files;
502 fl->fl_pid = current->tgid;
503 fl->fl_file = filp;
504 fl->fl_flags = FL_POSIX;
505 fl->fl_ops = NULL;
506 fl->fl_lmops = NULL;
507
508 return assign_type(fl, l->l_type);
509}
510
511
512
513
514static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
515 struct flock *l)
516{
517 struct flock64 ll = {
518 .l_type = l->l_type,
519 .l_whence = l->l_whence,
520 .l_start = l->l_start,
521 .l_len = l->l_len,
522 };
523
524 return flock64_to_posix_lock(filp, fl, &ll);
525}
526
527
528static bool
529lease_break_callback(struct file_lock *fl)
530{
531 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
532 return false;
533}
534
535static void
536lease_setup(struct file_lock *fl, void **priv)
537{
538 struct file *filp = fl->fl_file;
539 struct fasync_struct *fa = *priv;
540
541
542
543
544
545
546 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
547 *priv = NULL;
548
549 __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
550}
551
552static const struct lock_manager_operations lease_manager_ops = {
553 .lm_break = lease_break_callback,
554 .lm_change = lease_modify,
555 .lm_setup = lease_setup,
556};
557
558
559
560
561static int lease_init(struct file *filp, long type, struct file_lock *fl)
562{
563 if (assign_type(fl, type) != 0)
564 return -EINVAL;
565
566 fl->fl_owner = filp;
567 fl->fl_pid = current->tgid;
568
569 fl->fl_file = filp;
570 fl->fl_flags = FL_LEASE;
571 fl->fl_start = 0;
572 fl->fl_end = OFFSET_MAX;
573 fl->fl_ops = NULL;
574 fl->fl_lmops = &lease_manager_ops;
575 return 0;
576}
577
578
579static struct file_lock *lease_alloc(struct file *filp, long type)
580{
581 struct file_lock *fl = locks_alloc_lock();
582 int error = -ENOMEM;
583
584 if (fl == NULL)
585 return ERR_PTR(error);
586
587 error = lease_init(filp, type, fl);
588 if (error) {
589 locks_free_lock(fl);
590 return ERR_PTR(error);
591 }
592 return fl;
593}
594
595
596
597static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
598{
599 return ((fl1->fl_end >= fl2->fl_start) &&
600 (fl2->fl_end >= fl1->fl_start));
601}
602
603
604
605
606static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
607{
608 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
609 return fl2->fl_lmops == fl1->fl_lmops &&
610 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
611 return fl1->fl_owner == fl2->fl_owner;
612}
613
614
615static void locks_insert_global_locks(struct file_lock *fl)
616{
617 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
618
619 percpu_rwsem_assert_held(&file_rwsem);
620
621 spin_lock(&fll->lock);
622 fl->fl_link_cpu = smp_processor_id();
623 hlist_add_head(&fl->fl_link, &fll->hlist);
624 spin_unlock(&fll->lock);
625}
626
627
628static void locks_delete_global_locks(struct file_lock *fl)
629{
630 struct file_lock_list_struct *fll;
631
632 percpu_rwsem_assert_held(&file_rwsem);
633
634
635
636
637
638
639 if (hlist_unhashed(&fl->fl_link))
640 return;
641
642 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
643 spin_lock(&fll->lock);
644 hlist_del_init(&fl->fl_link);
645 spin_unlock(&fll->lock);
646}
647
648static unsigned long
649posix_owner_key(struct file_lock *fl)
650{
651 if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
652 return fl->fl_lmops->lm_owner_key(fl);
653 return (unsigned long)fl->fl_owner;
654}
655
656static void locks_insert_global_blocked(struct file_lock *waiter)
657{
658 lockdep_assert_held(&blocked_lock_lock);
659
660 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
661}
662
663static void locks_delete_global_blocked(struct file_lock *waiter)
664{
665 lockdep_assert_held(&blocked_lock_lock);
666
667 hash_del(&waiter->fl_link);
668}
669
670
671
672
673
674
675static void __locks_delete_block(struct file_lock *waiter)
676{
677 locks_delete_global_blocked(waiter);
678 list_del_init(&waiter->fl_block);
679 waiter->fl_next = NULL;
680}
681
682static void locks_delete_block(struct file_lock *waiter)
683{
684 spin_lock(&blocked_lock_lock);
685 __locks_delete_block(waiter);
686 spin_unlock(&blocked_lock_lock);
687}
688
689
690
691
692
693
694
695
696
697
698
699static void __locks_insert_block(struct file_lock *blocker,
700 struct file_lock *waiter)
701{
702 BUG_ON(!list_empty(&waiter->fl_block));
703 waiter->fl_next = blocker;
704 list_add_tail(&waiter->fl_block, &blocker->fl_block);
705 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
706 locks_insert_global_blocked(waiter);
707}
708
709
710static void locks_insert_block(struct file_lock *blocker,
711 struct file_lock *waiter)
712{
713 spin_lock(&blocked_lock_lock);
714 __locks_insert_block(blocker, waiter);
715 spin_unlock(&blocked_lock_lock);
716}
717
718
719
720
721
722
723static void locks_wake_up_blocks(struct file_lock *blocker)
724{
725
726
727
728
729
730
731
732 if (list_empty(&blocker->fl_block))
733 return;
734
735 spin_lock(&blocked_lock_lock);
736 while (!list_empty(&blocker->fl_block)) {
737 struct file_lock *waiter;
738
739 waiter = list_first_entry(&blocker->fl_block,
740 struct file_lock, fl_block);
741 __locks_delete_block(waiter);
742 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
743 waiter->fl_lmops->lm_notify(waiter);
744 else
745 wake_up(&waiter->fl_wait);
746 }
747 spin_unlock(&blocked_lock_lock);
748}
749
750static void
751locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
752{
753 list_add_tail(&fl->fl_list, before);
754 locks_insert_global_locks(fl);
755}
756
757static void
758locks_unlink_lock_ctx(struct file_lock *fl)
759{
760 locks_delete_global_locks(fl);
761 list_del_init(&fl->fl_list);
762 locks_wake_up_blocks(fl);
763}
764
765static void
766locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
767{
768 locks_unlink_lock_ctx(fl);
769 if (dispose)
770 list_add(&fl->fl_list, dispose);
771 else
772 locks_free_lock(fl);
773}
774
775
776
777
778static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
779{
780 if (sys_fl->fl_type == F_WRLCK)
781 return 1;
782 if (caller_fl->fl_type == F_WRLCK)
783 return 1;
784 return 0;
785}
786
787
788
789
790static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
791{
792
793
794
795 if (posix_same_owner(caller_fl, sys_fl))
796 return (0);
797
798
799 if (!locks_overlap(caller_fl, sys_fl))
800 return 0;
801
802 return (locks_conflict(caller_fl, sys_fl));
803}
804
805
806
807
808static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
809{
810
811
812
813 if (caller_fl->fl_file == sys_fl->fl_file)
814 return (0);
815 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
816 return 0;
817
818 return (locks_conflict(caller_fl, sys_fl));
819}
820
821void
822posix_test_lock(struct file *filp, struct file_lock *fl)
823{
824 struct file_lock *cfl;
825 struct file_lock_context *ctx;
826 struct inode *inode = locks_inode(filp);
827
828 ctx = smp_load_acquire(&inode->i_flctx);
829 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
830 fl->fl_type = F_UNLCK;
831 return;
832 }
833
834 spin_lock(&ctx->flc_lock);
835 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
836 if (posix_locks_conflict(fl, cfl)) {
837 locks_copy_conflock(fl, cfl);
838 goto out;
839 }
840 }
841 fl->fl_type = F_UNLCK;
842out:
843 spin_unlock(&ctx->flc_lock);
844 return;
845}
846EXPORT_SYMBOL(posix_test_lock);
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881#define MAX_DEADLK_ITERATIONS 10
882
883
884static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
885{
886 struct file_lock *fl;
887
888 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
889 if (posix_same_owner(fl, block_fl))
890 return fl->fl_next;
891 }
892 return NULL;
893}
894
895
896static int posix_locks_deadlock(struct file_lock *caller_fl,
897 struct file_lock *block_fl)
898{
899 int i = 0;
900
901 lockdep_assert_held(&blocked_lock_lock);
902
903
904
905
906
907 if (IS_OFDLCK(caller_fl))
908 return 0;
909
910 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
911 if (i++ > MAX_DEADLK_ITERATIONS)
912 return 0;
913 if (posix_same_owner(caller_fl, block_fl))
914 return 1;
915 }
916 return 0;
917}
918
919
920
921
922
923
924
925
926static int flock_lock_inode(struct inode *inode, struct file_lock *request)
927{
928 struct file_lock *new_fl = NULL;
929 struct file_lock *fl;
930 struct file_lock_context *ctx;
931 int error = 0;
932 bool found = false;
933 LIST_HEAD(dispose);
934
935 ctx = locks_get_lock_context(inode, request->fl_type);
936 if (!ctx) {
937 if (request->fl_type != F_UNLCK)
938 return -ENOMEM;
939 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
940 }
941
942 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
943 new_fl = locks_alloc_lock();
944 if (!new_fl)
945 return -ENOMEM;
946 }
947
948 percpu_down_read_preempt_disable(&file_rwsem);
949 spin_lock(&ctx->flc_lock);
950 if (request->fl_flags & FL_ACCESS)
951 goto find_conflict;
952
953 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
954 if (request->fl_file != fl->fl_file)
955 continue;
956 if (request->fl_type == fl->fl_type)
957 goto out;
958 found = true;
959 locks_delete_lock_ctx(fl, &dispose);
960 break;
961 }
962
963 if (request->fl_type == F_UNLCK) {
964 if ((request->fl_flags & FL_EXISTS) && !found)
965 error = -ENOENT;
966 goto out;
967 }
968
969find_conflict:
970 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
971 if (!flock_locks_conflict(request, fl))
972 continue;
973 error = -EAGAIN;
974 if (!(request->fl_flags & FL_SLEEP))
975 goto out;
976 error = FILE_LOCK_DEFERRED;
977 locks_insert_block(fl, request);
978 goto out;
979 }
980 if (request->fl_flags & FL_ACCESS)
981 goto out;
982 locks_copy_lock(new_fl, request);
983 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
984 new_fl = NULL;
985 error = 0;
986
987out:
988 spin_unlock(&ctx->flc_lock);
989 percpu_up_read_preempt_enable(&file_rwsem);
990 if (new_fl)
991 locks_free_lock(new_fl);
992 locks_dispose_list(&dispose);
993 return error;
994}
995
996static int posix_lock_inode(struct inode *inode, struct file_lock *request,
997 struct file_lock *conflock)
998{
999 struct file_lock *fl, *tmp;
1000 struct file_lock *new_fl = NULL;
1001 struct file_lock *new_fl2 = NULL;
1002 struct file_lock *left = NULL;
1003 struct file_lock *right = NULL;
1004 struct file_lock_context *ctx;
1005 int error;
1006 bool added = false;
1007 LIST_HEAD(dispose);
1008
1009 ctx = locks_get_lock_context(inode, request->fl_type);
1010 if (!ctx)
1011 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1012
1013
1014
1015
1016
1017
1018
1019 if (!(request->fl_flags & FL_ACCESS) &&
1020 (request->fl_type != F_UNLCK ||
1021 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1022 new_fl = locks_alloc_lock();
1023 new_fl2 = locks_alloc_lock();
1024 }
1025
1026 percpu_down_read_preempt_disable(&file_rwsem);
1027 spin_lock(&ctx->flc_lock);
1028
1029
1030
1031
1032
1033 if (request->fl_type != F_UNLCK) {
1034 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1035 if (!posix_locks_conflict(request, fl))
1036 continue;
1037 if (conflock)
1038 locks_copy_conflock(conflock, fl);
1039 error = -EAGAIN;
1040 if (!(request->fl_flags & FL_SLEEP))
1041 goto out;
1042
1043
1044
1045
1046 error = -EDEADLK;
1047 spin_lock(&blocked_lock_lock);
1048 if (likely(!posix_locks_deadlock(request, fl))) {
1049 error = FILE_LOCK_DEFERRED;
1050 __locks_insert_block(fl, request);
1051 }
1052 spin_unlock(&blocked_lock_lock);
1053 goto out;
1054 }
1055 }
1056
1057
1058 error = 0;
1059 if (request->fl_flags & FL_ACCESS)
1060 goto out;
1061
1062
1063 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1064 if (posix_same_owner(request, fl))
1065 break;
1066 }
1067
1068
1069 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1070 if (!posix_same_owner(request, fl))
1071 break;
1072
1073
1074 if (request->fl_type == fl->fl_type) {
1075
1076
1077
1078
1079 if (fl->fl_end < request->fl_start - 1)
1080 continue;
1081
1082
1083
1084 if (fl->fl_start - 1 > request->fl_end)
1085 break;
1086
1087
1088
1089
1090
1091
1092 if (fl->fl_start > request->fl_start)
1093 fl->fl_start = request->fl_start;
1094 else
1095 request->fl_start = fl->fl_start;
1096 if (fl->fl_end < request->fl_end)
1097 fl->fl_end = request->fl_end;
1098 else
1099 request->fl_end = fl->fl_end;
1100 if (added) {
1101 locks_delete_lock_ctx(fl, &dispose);
1102 continue;
1103 }
1104 request = fl;
1105 added = true;
1106 } else {
1107
1108
1109
1110 if (fl->fl_end < request->fl_start)
1111 continue;
1112 if (fl->fl_start > request->fl_end)
1113 break;
1114 if (request->fl_type == F_UNLCK)
1115 added = true;
1116 if (fl->fl_start < request->fl_start)
1117 left = fl;
1118
1119
1120
1121 if (fl->fl_end > request->fl_end) {
1122 right = fl;
1123 break;
1124 }
1125 if (fl->fl_start >= request->fl_start) {
1126
1127
1128
1129 if (added) {
1130 locks_delete_lock_ctx(fl, &dispose);
1131 continue;
1132 }
1133
1134
1135
1136
1137
1138
1139
1140 error = -ENOLCK;
1141 if (!new_fl)
1142 goto out;
1143 locks_copy_lock(new_fl, request);
1144 request = new_fl;
1145 new_fl = NULL;
1146 locks_insert_lock_ctx(request, &fl->fl_list);
1147 locks_delete_lock_ctx(fl, &dispose);
1148 added = true;
1149 }
1150 }
1151 }
1152
1153
1154
1155
1156
1157
1158 error = -ENOLCK;
1159 if (right && left == right && !new_fl2)
1160 goto out;
1161
1162 error = 0;
1163 if (!added) {
1164 if (request->fl_type == F_UNLCK) {
1165 if (request->fl_flags & FL_EXISTS)
1166 error = -ENOENT;
1167 goto out;
1168 }
1169
1170 if (!new_fl) {
1171 error = -ENOLCK;
1172 goto out;
1173 }
1174 locks_copy_lock(new_fl, request);
1175 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1176 fl = new_fl;
1177 new_fl = NULL;
1178 }
1179 if (right) {
1180 if (left == right) {
1181
1182
1183
1184 left = new_fl2;
1185 new_fl2 = NULL;
1186 locks_copy_lock(left, right);
1187 locks_insert_lock_ctx(left, &fl->fl_list);
1188 }
1189 right->fl_start = request->fl_end + 1;
1190 locks_wake_up_blocks(right);
1191 }
1192 if (left) {
1193 left->fl_end = request->fl_start - 1;
1194 locks_wake_up_blocks(left);
1195 }
1196 out:
1197 spin_unlock(&ctx->flc_lock);
1198 percpu_up_read_preempt_enable(&file_rwsem);
1199
1200
1201
1202 if (new_fl)
1203 locks_free_lock(new_fl);
1204 if (new_fl2)
1205 locks_free_lock(new_fl2);
1206 locks_dispose_list(&dispose);
1207 trace_posix_lock_inode(inode, request, error);
1208
1209 return error;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226int posix_lock_file(struct file *filp, struct file_lock *fl,
1227 struct file_lock *conflock)
1228{
1229 return posix_lock_inode(locks_inode(filp), fl, conflock);
1230}
1231EXPORT_SYMBOL(posix_lock_file);
1232
1233
1234
1235
1236
1237
1238
1239
1240static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1241{
1242 int error;
1243 might_sleep ();
1244 for (;;) {
1245 error = posix_lock_inode(inode, fl, NULL);
1246 if (error != FILE_LOCK_DEFERRED)
1247 break;
1248 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1249 if (!error)
1250 continue;
1251
1252 locks_delete_block(fl);
1253 break;
1254 }
1255 return error;
1256}
1257
1258#ifdef CONFIG_MANDATORY_FILE_LOCKING
1259
1260
1261
1262
1263
1264
1265
1266int locks_mandatory_locked(struct file *file)
1267{
1268 int ret;
1269 struct inode *inode = locks_inode(file);
1270 struct file_lock_context *ctx;
1271 struct file_lock *fl;
1272
1273 ctx = smp_load_acquire(&inode->i_flctx);
1274 if (!ctx || list_empty_careful(&ctx->flc_posix))
1275 return 0;
1276
1277
1278
1279
1280 spin_lock(&ctx->flc_lock);
1281 ret = 0;
1282 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1283 if (fl->fl_owner != current->files &&
1284 fl->fl_owner != file) {
1285 ret = -EAGAIN;
1286 break;
1287 }
1288 }
1289 spin_unlock(&ctx->flc_lock);
1290 return ret;
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1304 loff_t end, unsigned char type)
1305{
1306 struct file_lock fl;
1307 int error;
1308 bool sleep = false;
1309
1310 locks_init_lock(&fl);
1311 fl.fl_pid = current->tgid;
1312 fl.fl_file = filp;
1313 fl.fl_flags = FL_POSIX | FL_ACCESS;
1314 if (filp && !(filp->f_flags & O_NONBLOCK))
1315 sleep = true;
1316 fl.fl_type = type;
1317 fl.fl_start = start;
1318 fl.fl_end = end;
1319
1320 for (;;) {
1321 if (filp) {
1322 fl.fl_owner = filp;
1323 fl.fl_flags &= ~FL_SLEEP;
1324 error = posix_lock_inode(inode, &fl, NULL);
1325 if (!error)
1326 break;
1327 }
1328
1329 if (sleep)
1330 fl.fl_flags |= FL_SLEEP;
1331 fl.fl_owner = current->files;
1332 error = posix_lock_inode(inode, &fl, NULL);
1333 if (error != FILE_LOCK_DEFERRED)
1334 break;
1335 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1336 if (!error) {
1337
1338
1339
1340
1341 if (__mandatory_lock(inode))
1342 continue;
1343 }
1344
1345 locks_delete_block(&fl);
1346 break;
1347 }
1348
1349 return error;
1350}
1351
1352EXPORT_SYMBOL(locks_mandatory_area);
1353#endif
1354
1355static void lease_clear_pending(struct file_lock *fl, int arg)
1356{
1357 switch (arg) {
1358 case F_UNLCK:
1359 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1360
1361 case F_RDLCK:
1362 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1363 }
1364}
1365
1366
1367int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1368{
1369 int error = assign_type(fl, arg);
1370
1371 if (error)
1372 return error;
1373 lease_clear_pending(fl, arg);
1374 locks_wake_up_blocks(fl);
1375 if (arg == F_UNLCK) {
1376 struct file *filp = fl->fl_file;
1377
1378 f_delown(filp);
1379 filp->f_owner.signum = 0;
1380 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1381 if (fl->fl_fasync != NULL) {
1382 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1383 fl->fl_fasync = NULL;
1384 }
1385 locks_delete_lock_ctx(fl, dispose);
1386 }
1387 return 0;
1388}
1389EXPORT_SYMBOL(lease_modify);
1390
1391static bool past_time(unsigned long then)
1392{
1393 if (!then)
1394
1395 return false;
1396 return time_after(jiffies, then);
1397}
1398
1399static void time_out_leases(struct inode *inode, struct list_head *dispose)
1400{
1401 struct file_lock_context *ctx = inode->i_flctx;
1402 struct file_lock *fl, *tmp;
1403
1404 lockdep_assert_held(&ctx->flc_lock);
1405
1406 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1407 trace_time_out_leases(inode, fl);
1408 if (past_time(fl->fl_downgrade_time))
1409 lease_modify(fl, F_RDLCK, dispose);
1410 if (past_time(fl->fl_break_time))
1411 lease_modify(fl, F_UNLCK, dispose);
1412 }
1413}
1414
1415static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1416{
1417 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1418 return false;
1419 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1420 return false;
1421 return locks_conflict(breaker, lease);
1422}
1423
1424static bool
1425any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1426{
1427 struct file_lock_context *ctx = inode->i_flctx;
1428 struct file_lock *fl;
1429
1430 lockdep_assert_held(&ctx->flc_lock);
1431
1432 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1433 if (leases_conflict(fl, breaker))
1434 return true;
1435 }
1436 return false;
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1453{
1454 int error = 0;
1455 struct file_lock_context *ctx;
1456 struct file_lock *new_fl, *fl, *tmp;
1457 unsigned long break_time;
1458 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1459 LIST_HEAD(dispose);
1460
1461 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1462 if (IS_ERR(new_fl))
1463 return PTR_ERR(new_fl);
1464 new_fl->fl_flags = type;
1465
1466
1467 ctx = smp_load_acquire(&inode->i_flctx);
1468 if (!ctx) {
1469 WARN_ON_ONCE(1);
1470 return error;
1471 }
1472
1473 percpu_down_read_preempt_disable(&file_rwsem);
1474 spin_lock(&ctx->flc_lock);
1475
1476 time_out_leases(inode, &dispose);
1477
1478 if (!any_leases_conflict(inode, new_fl))
1479 goto out;
1480
1481 break_time = 0;
1482 if (lease_break_time > 0) {
1483 break_time = jiffies + lease_break_time * HZ;
1484 if (break_time == 0)
1485 break_time++;
1486 }
1487
1488 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1489 if (!leases_conflict(fl, new_fl))
1490 continue;
1491 if (want_write) {
1492 if (fl->fl_flags & FL_UNLOCK_PENDING)
1493 continue;
1494 fl->fl_flags |= FL_UNLOCK_PENDING;
1495 fl->fl_break_time = break_time;
1496 } else {
1497 if (lease_breaking(fl))
1498 continue;
1499 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1500 fl->fl_downgrade_time = break_time;
1501 }
1502 if (fl->fl_lmops->lm_break(fl))
1503 locks_delete_lock_ctx(fl, &dispose);
1504 }
1505
1506 if (list_empty(&ctx->flc_lease))
1507 goto out;
1508
1509 if (mode & O_NONBLOCK) {
1510 trace_break_lease_noblock(inode, new_fl);
1511 error = -EWOULDBLOCK;
1512 goto out;
1513 }
1514
1515restart:
1516 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1517 break_time = fl->fl_break_time;
1518 if (break_time != 0)
1519 break_time -= jiffies;
1520 if (break_time == 0)
1521 break_time++;
1522 locks_insert_block(fl, new_fl);
1523 trace_break_lease_block(inode, new_fl);
1524 spin_unlock(&ctx->flc_lock);
1525 percpu_up_read_preempt_enable(&file_rwsem);
1526
1527 locks_dispose_list(&dispose);
1528 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1529 !new_fl->fl_next, break_time);
1530
1531 percpu_down_read_preempt_disable(&file_rwsem);
1532 spin_lock(&ctx->flc_lock);
1533 trace_break_lease_unblock(inode, new_fl);
1534 locks_delete_block(new_fl);
1535 if (error >= 0) {
1536
1537
1538
1539
1540 if (error == 0)
1541 time_out_leases(inode, &dispose);
1542 if (any_leases_conflict(inode, new_fl))
1543 goto restart;
1544 error = 0;
1545 }
1546out:
1547 spin_unlock(&ctx->flc_lock);
1548 percpu_up_read_preempt_enable(&file_rwsem);
1549 locks_dispose_list(&dispose);
1550 locks_free_lock(new_fl);
1551 return error;
1552}
1553
1554EXPORT_SYMBOL(__break_lease);
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565void lease_get_mtime(struct inode *inode, struct timespec *time)
1566{
1567 bool has_lease = false;
1568 struct file_lock_context *ctx;
1569 struct file_lock *fl;
1570
1571 ctx = smp_load_acquire(&inode->i_flctx);
1572 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1573 spin_lock(&ctx->flc_lock);
1574 fl = list_first_entry_or_null(&ctx->flc_lease,
1575 struct file_lock, fl_list);
1576 if (fl && (fl->fl_type == F_WRLCK))
1577 has_lease = true;
1578 spin_unlock(&ctx->flc_lock);
1579 }
1580
1581 if (has_lease)
1582 *time = current_time(inode);
1583}
1584
1585EXPORT_SYMBOL(lease_get_mtime);
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610int fcntl_getlease(struct file *filp)
1611{
1612 struct file_lock *fl;
1613 struct inode *inode = locks_inode(filp);
1614 struct file_lock_context *ctx;
1615 int type = F_UNLCK;
1616 LIST_HEAD(dispose);
1617
1618 ctx = smp_load_acquire(&inode->i_flctx);
1619 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1620 percpu_down_read_preempt_disable(&file_rwsem);
1621 spin_lock(&ctx->flc_lock);
1622 time_out_leases(inode, &dispose);
1623 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1624 if (fl->fl_file != filp)
1625 continue;
1626 type = target_leasetype(fl);
1627 break;
1628 }
1629 spin_unlock(&ctx->flc_lock);
1630 percpu_up_read_preempt_enable(&file_rwsem);
1631
1632 locks_dispose_list(&dispose);
1633 }
1634 return type;
1635}
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648static int
1649check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1650{
1651 int ret = 0;
1652 struct inode *inode = dentry->d_inode;
1653
1654 if (flags & FL_LAYOUT)
1655 return 0;
1656
1657 if ((arg == F_RDLCK) &&
1658 (atomic_read(&d_real_inode(dentry)->i_writecount) > 0))
1659 return -EAGAIN;
1660
1661 if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1662 (atomic_read(&inode->i_count) > 1)))
1663 ret = -EAGAIN;
1664
1665 return ret;
1666}
1667
1668static int
1669generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1670{
1671 struct file_lock *fl, *my_fl = NULL, *lease;
1672 struct dentry *dentry = filp->f_path.dentry;
1673 struct inode *inode = dentry->d_inode;
1674 struct file_lock_context *ctx;
1675 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1676 int error;
1677 LIST_HEAD(dispose);
1678
1679 lease = *flp;
1680 trace_generic_add_lease(inode, lease);
1681
1682
1683 ctx = locks_get_lock_context(inode, arg);
1684 if (!ctx)
1685 return -ENOMEM;
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695 if (is_deleg && !inode_trylock(inode))
1696 return -EAGAIN;
1697
1698 if (is_deleg && arg == F_WRLCK) {
1699
1700 inode_unlock(inode);
1701 WARN_ON_ONCE(1);
1702 return -EINVAL;
1703 }
1704
1705 percpu_down_read_preempt_disable(&file_rwsem);
1706 spin_lock(&ctx->flc_lock);
1707 time_out_leases(inode, &dispose);
1708 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1709 if (error)
1710 goto out;
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 error = -EAGAIN;
1721 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1722 if (fl->fl_file == filp &&
1723 fl->fl_owner == lease->fl_owner) {
1724 my_fl = fl;
1725 continue;
1726 }
1727
1728
1729
1730
1731
1732 if (arg == F_WRLCK)
1733 goto out;
1734
1735
1736
1737
1738 if (fl->fl_flags & FL_UNLOCK_PENDING)
1739 goto out;
1740 }
1741
1742 if (my_fl != NULL) {
1743 lease = my_fl;
1744 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1745 if (error)
1746 goto out;
1747 goto out_setup;
1748 }
1749
1750 error = -EINVAL;
1751 if (!leases_enable)
1752 goto out;
1753
1754 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764 smp_mb();
1765 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1766 if (error) {
1767 locks_unlink_lock_ctx(lease);
1768 goto out;
1769 }
1770
1771out_setup:
1772 if (lease->fl_lmops->lm_setup)
1773 lease->fl_lmops->lm_setup(lease, priv);
1774out:
1775 spin_unlock(&ctx->flc_lock);
1776 percpu_up_read_preempt_enable(&file_rwsem);
1777 locks_dispose_list(&dispose);
1778 if (is_deleg)
1779 inode_unlock(inode);
1780 if (!error && !my_fl)
1781 *flp = NULL;
1782 return error;
1783}
1784
1785static int generic_delete_lease(struct file *filp, void *owner)
1786{
1787 int error = -EAGAIN;
1788 struct file_lock *fl, *victim = NULL;
1789 struct inode *inode = locks_inode(filp);
1790 struct file_lock_context *ctx;
1791 LIST_HEAD(dispose);
1792
1793 ctx = smp_load_acquire(&inode->i_flctx);
1794 if (!ctx) {
1795 trace_generic_delete_lease(inode, NULL);
1796 return error;
1797 }
1798
1799 percpu_down_read_preempt_disable(&file_rwsem);
1800 spin_lock(&ctx->flc_lock);
1801 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1802 if (fl->fl_file == filp &&
1803 fl->fl_owner == owner) {
1804 victim = fl;
1805 break;
1806 }
1807 }
1808 trace_generic_delete_lease(inode, victim);
1809 if (victim)
1810 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1811 spin_unlock(&ctx->flc_lock);
1812 percpu_up_read_preempt_enable(&file_rwsem);
1813 locks_dispose_list(&dispose);
1814 return error;
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1829 void **priv)
1830{
1831 struct inode *inode = locks_inode(filp);
1832 int error;
1833
1834 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1835 return -EACCES;
1836 if (!S_ISREG(inode->i_mode))
1837 return -EINVAL;
1838 error = security_file_lock(filp, arg);
1839 if (error)
1840 return error;
1841
1842 switch (arg) {
1843 case F_UNLCK:
1844 return generic_delete_lease(filp, *priv);
1845 case F_RDLCK:
1846 case F_WRLCK:
1847 if (!(*flp)->fl_lmops->lm_break) {
1848 WARN_ON_ONCE(1);
1849 return -ENOLCK;
1850 }
1851
1852 return generic_add_lease(filp, arg, flp, priv);
1853 default:
1854 return -EINVAL;
1855 }
1856}
1857EXPORT_SYMBOL(generic_setlease);
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876int
1877vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1878{
1879 if (filp->f_op->setlease && is_remote_lock(filp))
1880 return filp->f_op->setlease(filp, arg, lease, priv);
1881 else
1882 return generic_setlease(filp, arg, lease, priv);
1883}
1884EXPORT_SYMBOL_GPL(vfs_setlease);
1885
1886static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1887{
1888 struct file_lock *fl;
1889 struct fasync_struct *new;
1890 int error;
1891
1892 fl = lease_alloc(filp, arg);
1893 if (IS_ERR(fl))
1894 return PTR_ERR(fl);
1895
1896 new = fasync_alloc();
1897 if (!new) {
1898 locks_free_lock(fl);
1899 return -ENOMEM;
1900 }
1901 new->fa_fd = fd;
1902
1903 error = vfs_setlease(filp, arg, &fl, (void **)&new);
1904 if (fl)
1905 locks_free_lock(fl);
1906 if (new)
1907 fasync_free(new);
1908 return error;
1909}
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1922{
1923 if (arg == F_UNLCK)
1924 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1925 return do_fcntl_add_lease(fd, filp, arg);
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1936{
1937 int error;
1938 might_sleep();
1939 for (;;) {
1940 error = flock_lock_inode(inode, fl);
1941 if (error != FILE_LOCK_DEFERRED)
1942 break;
1943 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1944 if (!error)
1945 continue;
1946
1947 locks_delete_block(fl);
1948 break;
1949 }
1950 return error;
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1961{
1962 int res = 0;
1963 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
1964 case FL_POSIX:
1965 res = posix_lock_inode_wait(inode, fl);
1966 break;
1967 case FL_FLOCK:
1968 res = flock_lock_inode_wait(inode, fl);
1969 break;
1970 default:
1971 BUG();
1972 }
1973 return res;
1974}
1975EXPORT_SYMBOL(locks_lock_inode_wait);
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1995{
1996 struct fd f = fdget(fd);
1997 struct file_lock *lock;
1998 int can_sleep, unlock;
1999 int error;
2000
2001 error = -EBADF;
2002 if (!f.file)
2003 goto out;
2004
2005 can_sleep = !(cmd & LOCK_NB);
2006 cmd &= ~LOCK_NB;
2007 unlock = (cmd == LOCK_UN);
2008
2009 if (!unlock && !(cmd & LOCK_MAND) &&
2010 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2011 goto out_putf;
2012
2013 lock = flock_make_lock(f.file, cmd);
2014 if (IS_ERR(lock)) {
2015 error = PTR_ERR(lock);
2016 goto out_putf;
2017 }
2018
2019 if (can_sleep)
2020 lock->fl_flags |= FL_SLEEP;
2021
2022 error = security_file_lock(f.file, lock->fl_type);
2023 if (error)
2024 goto out_free;
2025
2026 if (f.file->f_op->flock && is_remote_lock(f.file))
2027 error = f.file->f_op->flock(f.file,
2028 (can_sleep) ? F_SETLKW : F_SETLK,
2029 lock);
2030 else
2031 error = locks_lock_file_wait(f.file, lock);
2032
2033 out_free:
2034 locks_free_lock(lock);
2035
2036 out_putf:
2037 fdput(f);
2038 out:
2039 return error;
2040}
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050int vfs_test_lock(struct file *filp, struct file_lock *fl)
2051{
2052 if (filp->f_op->lock && is_remote_lock(filp))
2053 return filp->f_op->lock(filp, F_GETLK, fl);
2054 posix_test_lock(filp, fl);
2055 return 0;
2056}
2057EXPORT_SYMBOL_GPL(vfs_test_lock);
2058
2059
2060
2061
2062
2063
2064
2065
2066static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2067{
2068 pid_t vnr;
2069 struct pid *pid;
2070
2071 if (IS_OFDLCK(fl))
2072 return -1;
2073 if (IS_REMOTELCK(fl))
2074 return fl->fl_pid;
2075
2076 rcu_read_lock();
2077 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2078 vnr = pid_nr_ns(pid, ns);
2079 rcu_read_unlock();
2080 return vnr;
2081}
2082
2083static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2084{
2085 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2086#if BITS_PER_LONG == 32
2087
2088
2089
2090
2091 if (fl->fl_start > OFFT_OFFSET_MAX)
2092 return -EOVERFLOW;
2093 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2094 return -EOVERFLOW;
2095#endif
2096 flock->l_start = fl->fl_start;
2097 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2098 fl->fl_end - fl->fl_start + 1;
2099 flock->l_whence = 0;
2100 flock->l_type = fl->fl_type;
2101 return 0;
2102}
2103
2104#if BITS_PER_LONG == 32
2105static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2106{
2107 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2108 flock->l_start = fl->fl_start;
2109 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2110 fl->fl_end - fl->fl_start + 1;
2111 flock->l_whence = 0;
2112 flock->l_type = fl->fl_type;
2113}
2114#endif
2115
2116
2117
2118
2119int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2120{
2121 struct file_lock *fl;
2122 int error;
2123
2124 fl = locks_alloc_lock();
2125 if (fl == NULL)
2126 return -ENOMEM;
2127 error = -EINVAL;
2128 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2129 goto out;
2130
2131 error = flock_to_posix_lock(filp, fl, flock);
2132 if (error)
2133 goto out;
2134
2135 if (cmd == F_OFD_GETLK) {
2136 error = -EINVAL;
2137 if (flock->l_pid != 0)
2138 goto out;
2139
2140 cmd = F_GETLK;
2141 fl->fl_flags |= FL_OFDLCK;
2142 fl->fl_owner = filp;
2143 }
2144
2145 error = vfs_test_lock(filp, fl);
2146 if (error)
2147 goto out;
2148
2149 flock->l_type = fl->fl_type;
2150 if (fl->fl_type != F_UNLCK) {
2151 error = posix_lock_to_flock(flock, fl);
2152 if (error)
2153 goto out;
2154 }
2155out:
2156 locks_free_lock(fl);
2157 return error;
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2194{
2195 if (filp->f_op->lock && is_remote_lock(filp))
2196 return filp->f_op->lock(filp, cmd, fl);
2197 else
2198 return posix_lock_file(filp, fl, conf);
2199}
2200EXPORT_SYMBOL_GPL(vfs_lock_file);
2201
2202static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2203 struct file_lock *fl)
2204{
2205 int error;
2206
2207 error = security_file_lock(filp, fl->fl_type);
2208 if (error)
2209 return error;
2210
2211 for (;;) {
2212 error = vfs_lock_file(filp, cmd, fl, NULL);
2213 if (error != FILE_LOCK_DEFERRED)
2214 break;
2215 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
2216 if (!error)
2217 continue;
2218
2219 locks_delete_block(fl);
2220 break;
2221 }
2222
2223 return error;
2224}
2225
2226
2227static int
2228check_fmode_for_setlk(struct file_lock *fl)
2229{
2230 switch (fl->fl_type) {
2231 case F_RDLCK:
2232 if (!(fl->fl_file->f_mode & FMODE_READ))
2233 return -EBADF;
2234 break;
2235 case F_WRLCK:
2236 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2237 return -EBADF;
2238 }
2239 return 0;
2240}
2241
2242
2243
2244
2245int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2246 struct flock *flock)
2247{
2248 struct file_lock *file_lock = locks_alloc_lock();
2249 struct inode *inode = locks_inode(filp);
2250 struct file *f;
2251 int error;
2252
2253 if (file_lock == NULL)
2254 return -ENOLCK;
2255
2256
2257
2258
2259 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2260 error = -EAGAIN;
2261 goto out;
2262 }
2263
2264 error = flock_to_posix_lock(filp, file_lock, flock);
2265 if (error)
2266 goto out;
2267
2268 error = check_fmode_for_setlk(file_lock);
2269 if (error)
2270 goto out;
2271
2272
2273
2274
2275
2276 switch (cmd) {
2277 case F_OFD_SETLK:
2278 error = -EINVAL;
2279 if (flock->l_pid != 0)
2280 goto out;
2281
2282 cmd = F_SETLK;
2283 file_lock->fl_flags |= FL_OFDLCK;
2284 file_lock->fl_owner = filp;
2285 break;
2286 case F_OFD_SETLKW:
2287 error = -EINVAL;
2288 if (flock->l_pid != 0)
2289 goto out;
2290
2291 cmd = F_SETLKW;
2292 file_lock->fl_flags |= FL_OFDLCK;
2293 file_lock->fl_owner = filp;
2294
2295 case F_SETLKW:
2296 file_lock->fl_flags |= FL_SLEEP;
2297 }
2298
2299 error = do_lock_file_wait(filp, cmd, file_lock);
2300
2301
2302
2303
2304
2305
2306 if (!error && file_lock->fl_type != F_UNLCK &&
2307 !(file_lock->fl_flags & FL_OFDLCK)) {
2308
2309
2310
2311
2312
2313 spin_lock(¤t->files->file_lock);
2314 f = fcheck(fd);
2315 spin_unlock(¤t->files->file_lock);
2316 if (f != filp) {
2317 file_lock->fl_type = F_UNLCK;
2318 error = do_lock_file_wait(filp, cmd, file_lock);
2319 WARN_ON_ONCE(error);
2320 error = -EBADF;
2321 }
2322 }
2323out:
2324 trace_fcntl_setlk(inode, file_lock, error);
2325 locks_free_lock(file_lock);
2326 return error;
2327}
2328
2329#if BITS_PER_LONG == 32
2330
2331
2332
2333int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2334{
2335 struct file_lock *fl;
2336 int error;
2337
2338 fl = locks_alloc_lock();
2339 if (fl == NULL)
2340 return -ENOMEM;
2341
2342 error = -EINVAL;
2343 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2344 goto out;
2345
2346 error = flock64_to_posix_lock(filp, fl, flock);
2347 if (error)
2348 goto out;
2349
2350 if (cmd == F_OFD_GETLK) {
2351 error = -EINVAL;
2352 if (flock->l_pid != 0)
2353 goto out;
2354
2355 cmd = F_GETLK64;
2356 fl->fl_flags |= FL_OFDLCK;
2357 fl->fl_owner = filp;
2358 }
2359
2360 error = vfs_test_lock(filp, fl);
2361 if (error)
2362 goto out;
2363
2364 flock->l_type = fl->fl_type;
2365 if (fl->fl_type != F_UNLCK)
2366 posix_lock_to_flock64(flock, fl);
2367
2368out:
2369 locks_free_lock(fl);
2370 return error;
2371}
2372
2373
2374
2375
2376int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2377 struct flock64 *flock)
2378{
2379 struct file_lock *file_lock = locks_alloc_lock();
2380 struct inode *inode = locks_inode(filp);
2381 struct file *f;
2382 int error;
2383
2384 if (file_lock == NULL)
2385 return -ENOLCK;
2386
2387
2388
2389
2390 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2391 error = -EAGAIN;
2392 goto out;
2393 }
2394
2395 error = flock64_to_posix_lock(filp, file_lock, flock);
2396 if (error)
2397 goto out;
2398
2399 error = check_fmode_for_setlk(file_lock);
2400 if (error)
2401 goto out;
2402
2403
2404
2405
2406
2407 switch (cmd) {
2408 case F_OFD_SETLK:
2409 error = -EINVAL;
2410 if (flock->l_pid != 0)
2411 goto out;
2412
2413 cmd = F_SETLK64;
2414 file_lock->fl_flags |= FL_OFDLCK;
2415 file_lock->fl_owner = filp;
2416 break;
2417 case F_OFD_SETLKW:
2418 error = -EINVAL;
2419 if (flock->l_pid != 0)
2420 goto out;
2421
2422 cmd = F_SETLKW64;
2423 file_lock->fl_flags |= FL_OFDLCK;
2424 file_lock->fl_owner = filp;
2425
2426 case F_SETLKW64:
2427 file_lock->fl_flags |= FL_SLEEP;
2428 }
2429
2430 error = do_lock_file_wait(filp, cmd, file_lock);
2431
2432
2433
2434
2435
2436
2437 if (!error && file_lock->fl_type != F_UNLCK &&
2438 !(file_lock->fl_flags & FL_OFDLCK)) {
2439
2440
2441
2442
2443
2444 spin_lock(¤t->files->file_lock);
2445 f = fcheck(fd);
2446 spin_unlock(¤t->files->file_lock);
2447 if (f != filp) {
2448 file_lock->fl_type = F_UNLCK;
2449 error = do_lock_file_wait(filp, cmd, file_lock);
2450 WARN_ON_ONCE(error);
2451 error = -EBADF;
2452 }
2453 }
2454out:
2455 locks_free_lock(file_lock);
2456 return error;
2457}
2458#endif
2459
2460
2461
2462
2463
2464
2465void locks_remove_posix(struct file *filp, fl_owner_t owner)
2466{
2467 int error;
2468 struct inode *inode = locks_inode(filp);
2469 struct file_lock lock;
2470 struct file_lock_context *ctx;
2471
2472
2473
2474
2475
2476
2477 ctx = smp_load_acquire(&inode->i_flctx);
2478 if (!ctx || list_empty(&ctx->flc_posix))
2479 return;
2480
2481 lock.fl_type = F_UNLCK;
2482 lock.fl_flags = FL_POSIX | FL_CLOSE;
2483 lock.fl_start = 0;
2484 lock.fl_end = OFFSET_MAX;
2485 lock.fl_owner = owner;
2486 lock.fl_pid = current->tgid;
2487 lock.fl_file = filp;
2488 lock.fl_ops = NULL;
2489 lock.fl_lmops = NULL;
2490
2491 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2492
2493 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2494 lock.fl_ops->fl_release_private(&lock);
2495 trace_locks_remove_posix(inode, &lock, error);
2496}
2497
2498EXPORT_SYMBOL(locks_remove_posix);
2499
2500
2501static void
2502locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2503{
2504 struct file_lock fl = {
2505 .fl_owner = filp,
2506 .fl_pid = current->tgid,
2507 .fl_file = filp,
2508 .fl_flags = FL_FLOCK | FL_CLOSE,
2509 .fl_type = F_UNLCK,
2510 .fl_end = OFFSET_MAX,
2511 };
2512 struct inode *inode = locks_inode(filp);
2513
2514 if (list_empty(&flctx->flc_flock))
2515 return;
2516
2517 if (filp->f_op->flock && is_remote_lock(filp))
2518 filp->f_op->flock(filp, F_SETLKW, &fl);
2519 else
2520 flock_lock_inode(inode, &fl);
2521
2522 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2523 fl.fl_ops->fl_release_private(&fl);
2524}
2525
2526
2527static void
2528locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2529{
2530 struct file_lock *fl, *tmp;
2531 LIST_HEAD(dispose);
2532
2533 if (list_empty(&ctx->flc_lease))
2534 return;
2535
2536 percpu_down_read_preempt_disable(&file_rwsem);
2537 spin_lock(&ctx->flc_lock);
2538 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2539 if (filp == fl->fl_file)
2540 lease_modify(fl, F_UNLCK, &dispose);
2541 spin_unlock(&ctx->flc_lock);
2542 percpu_up_read_preempt_enable(&file_rwsem);
2543
2544 locks_dispose_list(&dispose);
2545}
2546
2547
2548
2549
2550void locks_remove_file(struct file *filp)
2551{
2552 struct file_lock_context *ctx;
2553
2554 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2555 if (!ctx)
2556 return;
2557
2558
2559 locks_remove_posix(filp, filp);
2560
2561
2562 locks_remove_flock(filp, ctx);
2563
2564
2565 locks_remove_lease(filp, ctx);
2566
2567 spin_lock(&ctx->flc_lock);
2568 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2569 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2570 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2571 spin_unlock(&ctx->flc_lock);
2572}
2573
2574
2575
2576
2577
2578
2579
2580int
2581posix_unblock_lock(struct file_lock *waiter)
2582{
2583 int status = 0;
2584
2585 spin_lock(&blocked_lock_lock);
2586 if (waiter->fl_next)
2587 __locks_delete_block(waiter);
2588 else
2589 status = -ENOENT;
2590 spin_unlock(&blocked_lock_lock);
2591 return status;
2592}
2593EXPORT_SYMBOL(posix_unblock_lock);
2594
2595
2596
2597
2598
2599
2600
2601
2602int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2603{
2604 if (filp->f_op->lock && is_remote_lock(filp))
2605 return filp->f_op->lock(filp, F_CANCELLK, fl);
2606 return 0;
2607}
2608
2609EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2610
2611#ifdef CONFIG_PROC_FS
2612#include <linux/proc_fs.h>
2613#include <linux/seq_file.h>
2614
2615struct locks_iterator {
2616 int li_cpu;
2617 loff_t li_pos;
2618};
2619
2620static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2621 loff_t id, char *pfx)
2622{
2623 struct inode *inode = NULL;
2624 unsigned int fl_pid;
2625 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2626
2627 fl_pid = locks_translate_pid(fl, proc_pidns);
2628
2629
2630
2631
2632
2633 if (fl_pid == 0)
2634 return;
2635
2636 if (fl->fl_file != NULL)
2637 inode = locks_inode(fl->fl_file);
2638
2639 seq_printf(f, "%lld:%s ", id, pfx);
2640 if (IS_POSIX(fl)) {
2641 if (fl->fl_flags & FL_ACCESS)
2642 seq_puts(f, "ACCESS");
2643 else if (IS_OFDLCK(fl))
2644 seq_puts(f, "OFDLCK");
2645 else
2646 seq_puts(f, "POSIX ");
2647
2648 seq_printf(f, " %s ",
2649 (inode == NULL) ? "*NOINODE*" :
2650 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2651 } else if (IS_FLOCK(fl)) {
2652 if (fl->fl_type & LOCK_MAND) {
2653 seq_puts(f, "FLOCK MSNFS ");
2654 } else {
2655 seq_puts(f, "FLOCK ADVISORY ");
2656 }
2657 } else if (IS_LEASE(fl)) {
2658 if (fl->fl_flags & FL_DELEG)
2659 seq_puts(f, "DELEG ");
2660 else
2661 seq_puts(f, "LEASE ");
2662
2663 if (lease_breaking(fl))
2664 seq_puts(f, "BREAKING ");
2665 else if (fl->fl_file)
2666 seq_puts(f, "ACTIVE ");
2667 else
2668 seq_puts(f, "BREAKER ");
2669 } else {
2670 seq_puts(f, "UNKNOWN UNKNOWN ");
2671 }
2672 if (fl->fl_type & LOCK_MAND) {
2673 seq_printf(f, "%s ",
2674 (fl->fl_type & LOCK_READ)
2675 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2676 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2677 } else {
2678 seq_printf(f, "%s ",
2679 (lease_breaking(fl))
2680 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2681 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2682 }
2683 if (inode) {
2684
2685 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2686 MAJOR(inode->i_sb->s_dev),
2687 MINOR(inode->i_sb->s_dev), inode->i_ino);
2688 } else {
2689 seq_printf(f, "%d <none>:0 ", fl_pid);
2690 }
2691 if (IS_POSIX(fl)) {
2692 if (fl->fl_end == OFFSET_MAX)
2693 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2694 else
2695 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2696 } else {
2697 seq_puts(f, "0 EOF\n");
2698 }
2699}
2700
2701static int locks_show(struct seq_file *f, void *v)
2702{
2703 struct locks_iterator *iter = f->private;
2704 struct file_lock *fl, *bfl;
2705 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2706
2707 fl = hlist_entry(v, struct file_lock, fl_link);
2708
2709 if (locks_translate_pid(fl, proc_pidns) == 0)
2710 return 0;
2711
2712 lock_get_status(f, fl, iter->li_pos, "");
2713
2714 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2715 lock_get_status(f, bfl, iter->li_pos, " ->");
2716
2717 return 0;
2718}
2719
2720static void __show_fd_locks(struct seq_file *f,
2721 struct list_head *head, int *id,
2722 struct file *filp, struct files_struct *files)
2723{
2724 struct file_lock *fl;
2725
2726 list_for_each_entry(fl, head, fl_list) {
2727
2728 if (filp != fl->fl_file)
2729 continue;
2730 if (fl->fl_owner != files &&
2731 fl->fl_owner != filp)
2732 continue;
2733
2734 (*id)++;
2735 seq_puts(f, "lock:\t");
2736 lock_get_status(f, fl, *id, "");
2737 }
2738}
2739
2740void show_fd_locks(struct seq_file *f,
2741 struct file *filp, struct files_struct *files)
2742{
2743 struct inode *inode = locks_inode(filp);
2744 struct file_lock_context *ctx;
2745 int id = 0;
2746
2747 ctx = smp_load_acquire(&inode->i_flctx);
2748 if (!ctx)
2749 return;
2750
2751 spin_lock(&ctx->flc_lock);
2752 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2753 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2754 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2755 spin_unlock(&ctx->flc_lock);
2756}
2757
2758static void *locks_start(struct seq_file *f, loff_t *pos)
2759 __acquires(&blocked_lock_lock)
2760{
2761 struct locks_iterator *iter = f->private;
2762
2763 iter->li_pos = *pos + 1;
2764 percpu_down_write(&file_rwsem);
2765 spin_lock(&blocked_lock_lock);
2766 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2767}
2768
2769static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2770{
2771 struct locks_iterator *iter = f->private;
2772
2773 ++iter->li_pos;
2774 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2775}
2776
2777static void locks_stop(struct seq_file *f, void *v)
2778 __releases(&blocked_lock_lock)
2779{
2780 spin_unlock(&blocked_lock_lock);
2781 percpu_up_write(&file_rwsem);
2782}
2783
2784static const struct seq_operations locks_seq_operations = {
2785 .start = locks_start,
2786 .next = locks_next,
2787 .stop = locks_stop,
2788 .show = locks_show,
2789};
2790
2791static int locks_open(struct inode *inode, struct file *filp)
2792{
2793 return seq_open_private(filp, &locks_seq_operations,
2794 sizeof(struct locks_iterator));
2795}
2796
2797static const struct file_operations proc_locks_operations = {
2798 .open = locks_open,
2799 .read = seq_read,
2800 .llseek = seq_lseek,
2801 .release = seq_release_private,
2802};
2803
2804static int __init proc_locks_init(void)
2805{
2806 proc_create("locks", 0, NULL, &proc_locks_operations);
2807 return 0;
2808}
2809fs_initcall(proc_locks_init);
2810#endif
2811
2812static int __init filelock_init(void)
2813{
2814 int i;
2815
2816 flctx_cache = kmem_cache_create("file_lock_ctx",
2817 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2818
2819 filelock_cache = kmem_cache_create("file_lock_cache",
2820 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2821
2822
2823 for_each_possible_cpu(i) {
2824 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2825
2826 spin_lock_init(&fll->lock);
2827 INIT_HLIST_HEAD(&fll->hlist);
2828 }
2829
2830 return 0;
2831}
2832
2833core_initcall(filelock_init);
2834