1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158#include <linux/capability.h>
159#include <linux/file.h>
160#include <linux/fdtable.h>
161#include <linux/fs.h>
162#include <linux/init.h>
163#include <linux/security.h>
164#include <linux/slab.h>
165#include <linux/syscalls.h>
166#include <linux/time.h>
167#include <linux/rcupdate.h>
168#include <linux/pid_namespace.h>
169#include <linux/hashtable.h>
170#include <linux/percpu.h>
171
172#define CREATE_TRACE_POINTS
173#include <trace/events/filelock.h>
174
175#include <linux/uaccess.h>
176
177#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
178#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
179#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
180#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
181#define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
182
183static bool lease_breaking(struct file_lock *fl)
184{
185 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
186}
187
188static int target_leasetype(struct file_lock *fl)
189{
190 if (fl->fl_flags & FL_UNLOCK_PENDING)
191 return F_UNLCK;
192 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
193 return F_RDLCK;
194 return fl->fl_type;
195}
196
197int leases_enable = 1;
198int lease_break_time = 45;
199
200
201
202
203
204
205
206
207
208struct file_lock_list_struct {
209 spinlock_t lock;
210 struct hlist_head hlist;
211};
212static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
213DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
214
215
216
217
218
219
220
221
222
223
224
225
226#define BLOCKED_HASH_BITS 7
227static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242static DEFINE_SPINLOCK(blocked_lock_lock);
243
244static struct kmem_cache *flctx_cache __read_mostly;
245static struct kmem_cache *filelock_cache __read_mostly;
246
247static struct file_lock_context *
248locks_get_lock_context(struct inode *inode, int type)
249{
250 struct file_lock_context *ctx;
251
252
253 ctx = smp_load_acquire(&inode->i_flctx);
254 if (likely(ctx) || type == F_UNLCK)
255 goto out;
256
257 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
258 if (!ctx)
259 goto out;
260
261 spin_lock_init(&ctx->flc_lock);
262 INIT_LIST_HEAD(&ctx->flc_flock);
263 INIT_LIST_HEAD(&ctx->flc_posix);
264 INIT_LIST_HEAD(&ctx->flc_lease);
265
266
267
268
269
270 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
271 kmem_cache_free(flctx_cache, ctx);
272 ctx = smp_load_acquire(&inode->i_flctx);
273 }
274out:
275 trace_locks_get_lock_context(inode, type, ctx);
276 return ctx;
277}
278
279static void
280locks_dump_ctx_list(struct list_head *list, char *list_type)
281{
282 struct file_lock *fl;
283
284 list_for_each_entry(fl, list, fl_list) {
285 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
286 }
287}
288
289static void
290locks_check_ctx_lists(struct inode *inode)
291{
292 struct file_lock_context *ctx = inode->i_flctx;
293
294 if (unlikely(!list_empty(&ctx->flc_flock) ||
295 !list_empty(&ctx->flc_posix) ||
296 !list_empty(&ctx->flc_lease))) {
297 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
298 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
299 inode->i_ino);
300 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
301 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
302 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
303 }
304}
305
306static void
307locks_check_ctx_file_list(struct file *filp, struct list_head *list,
308 char *list_type)
309{
310 struct file_lock *fl;
311 struct inode *inode = locks_inode(filp);
312
313 list_for_each_entry(fl, list, fl_list)
314 if (fl->fl_file == filp)
315 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
316 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
317 list_type, MAJOR(inode->i_sb->s_dev),
318 MINOR(inode->i_sb->s_dev), inode->i_ino,
319 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
320}
321
322void
323locks_free_lock_context(struct inode *inode)
324{
325 struct file_lock_context *ctx = inode->i_flctx;
326
327 if (unlikely(ctx)) {
328 locks_check_ctx_lists(inode);
329 kmem_cache_free(flctx_cache, ctx);
330 }
331}
332
333static void locks_init_lock_heads(struct file_lock *fl)
334{
335 INIT_HLIST_NODE(&fl->fl_link);
336 INIT_LIST_HEAD(&fl->fl_list);
337 INIT_LIST_HEAD(&fl->fl_blocked_requests);
338 INIT_LIST_HEAD(&fl->fl_blocked_member);
339 init_waitqueue_head(&fl->fl_wait);
340}
341
342
343struct file_lock *locks_alloc_lock(void)
344{
345 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
346
347 if (fl)
348 locks_init_lock_heads(fl);
349
350 return fl;
351}
352EXPORT_SYMBOL_GPL(locks_alloc_lock);
353
354void locks_release_private(struct file_lock *fl)
355{
356 BUG_ON(waitqueue_active(&fl->fl_wait));
357 BUG_ON(!list_empty(&fl->fl_list));
358 BUG_ON(!list_empty(&fl->fl_blocked_requests));
359 BUG_ON(!list_empty(&fl->fl_blocked_member));
360 BUG_ON(!hlist_unhashed(&fl->fl_link));
361
362 if (fl->fl_ops) {
363 if (fl->fl_ops->fl_release_private)
364 fl->fl_ops->fl_release_private(fl);
365 fl->fl_ops = NULL;
366 }
367
368 if (fl->fl_lmops) {
369 if (fl->fl_lmops->lm_put_owner) {
370 fl->fl_lmops->lm_put_owner(fl->fl_owner);
371 fl->fl_owner = NULL;
372 }
373 fl->fl_lmops = NULL;
374 }
375}
376EXPORT_SYMBOL_GPL(locks_release_private);
377
378
379void locks_free_lock(struct file_lock *fl)
380{
381 locks_release_private(fl);
382 kmem_cache_free(filelock_cache, fl);
383}
384EXPORT_SYMBOL(locks_free_lock);
385
386static void
387locks_dispose_list(struct list_head *dispose)
388{
389 struct file_lock *fl;
390
391 while (!list_empty(dispose)) {
392 fl = list_first_entry(dispose, struct file_lock, fl_list);
393 list_del_init(&fl->fl_list);
394 locks_free_lock(fl);
395 }
396}
397
398void locks_init_lock(struct file_lock *fl)
399{
400 memset(fl, 0, sizeof(struct file_lock));
401 locks_init_lock_heads(fl);
402}
403EXPORT_SYMBOL(locks_init_lock);
404
405
406
407
408void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
409{
410 new->fl_owner = fl->fl_owner;
411 new->fl_pid = fl->fl_pid;
412 new->fl_file = NULL;
413 new->fl_flags = fl->fl_flags;
414 new->fl_type = fl->fl_type;
415 new->fl_start = fl->fl_start;
416 new->fl_end = fl->fl_end;
417 new->fl_lmops = fl->fl_lmops;
418 new->fl_ops = NULL;
419
420 if (fl->fl_lmops) {
421 if (fl->fl_lmops->lm_get_owner)
422 fl->fl_lmops->lm_get_owner(fl->fl_owner);
423 }
424}
425EXPORT_SYMBOL(locks_copy_conflock);
426
427void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
428{
429
430 WARN_ON_ONCE(new->fl_ops);
431
432 locks_copy_conflock(new, fl);
433
434 new->fl_file = fl->fl_file;
435 new->fl_ops = fl->fl_ops;
436
437 if (fl->fl_ops) {
438 if (fl->fl_ops->fl_copy_lock)
439 fl->fl_ops->fl_copy_lock(new, fl);
440 }
441}
442EXPORT_SYMBOL(locks_copy_lock);
443
444static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
445{
446 struct file_lock *f;
447
448
449
450
451
452
453 if (list_empty(&fl->fl_blocked_requests))
454 return;
455 spin_lock(&blocked_lock_lock);
456 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
457 list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
458 f->fl_blocker = new;
459 spin_unlock(&blocked_lock_lock);
460}
461
462static inline int flock_translate_cmd(int cmd) {
463 if (cmd & LOCK_MAND)
464 return cmd & (LOCK_MAND | LOCK_RW);
465 switch (cmd) {
466 case LOCK_SH:
467 return F_RDLCK;
468 case LOCK_EX:
469 return F_WRLCK;
470 case LOCK_UN:
471 return F_UNLCK;
472 }
473 return -EINVAL;
474}
475
476
477static struct file_lock *
478flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
479{
480 int type = flock_translate_cmd(cmd);
481
482 if (type < 0)
483 return ERR_PTR(type);
484
485 if (fl == NULL) {
486 fl = locks_alloc_lock();
487 if (fl == NULL)
488 return ERR_PTR(-ENOMEM);
489 } else {
490 locks_init_lock(fl);
491 }
492
493 fl->fl_file = filp;
494 fl->fl_owner = filp;
495 fl->fl_pid = current->tgid;
496 fl->fl_flags = FL_FLOCK;
497 fl->fl_type = type;
498 fl->fl_end = OFFSET_MAX;
499
500 return fl;
501}
502
503static int assign_type(struct file_lock *fl, long type)
504{
505 switch (type) {
506 case F_RDLCK:
507 case F_WRLCK:
508 case F_UNLCK:
509 fl->fl_type = type;
510 break;
511 default:
512 return -EINVAL;
513 }
514 return 0;
515}
516
517static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
518 struct flock64 *l)
519{
520 switch (l->l_whence) {
521 case SEEK_SET:
522 fl->fl_start = 0;
523 break;
524 case SEEK_CUR:
525 fl->fl_start = filp->f_pos;
526 break;
527 case SEEK_END:
528 fl->fl_start = i_size_read(file_inode(filp));
529 break;
530 default:
531 return -EINVAL;
532 }
533 if (l->l_start > OFFSET_MAX - fl->fl_start)
534 return -EOVERFLOW;
535 fl->fl_start += l->l_start;
536 if (fl->fl_start < 0)
537 return -EINVAL;
538
539
540
541 if (l->l_len > 0) {
542 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
543 return -EOVERFLOW;
544 fl->fl_end = fl->fl_start + l->l_len - 1;
545
546 } else if (l->l_len < 0) {
547 if (fl->fl_start + l->l_len < 0)
548 return -EINVAL;
549 fl->fl_end = fl->fl_start - 1;
550 fl->fl_start += l->l_len;
551 } else
552 fl->fl_end = OFFSET_MAX;
553
554 fl->fl_owner = current->files;
555 fl->fl_pid = current->tgid;
556 fl->fl_file = filp;
557 fl->fl_flags = FL_POSIX;
558 fl->fl_ops = NULL;
559 fl->fl_lmops = NULL;
560
561 return assign_type(fl, l->l_type);
562}
563
564
565
566
567static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
568 struct flock *l)
569{
570 struct flock64 ll = {
571 .l_type = l->l_type,
572 .l_whence = l->l_whence,
573 .l_start = l->l_start,
574 .l_len = l->l_len,
575 };
576
577 return flock64_to_posix_lock(filp, fl, &ll);
578}
579
580
581static bool
582lease_break_callback(struct file_lock *fl)
583{
584 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
585 return false;
586}
587
588static void
589lease_setup(struct file_lock *fl, void **priv)
590{
591 struct file *filp = fl->fl_file;
592 struct fasync_struct *fa = *priv;
593
594
595
596
597
598
599 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
600 *priv = NULL;
601
602 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
603}
604
605static const struct lock_manager_operations lease_manager_ops = {
606 .lm_break = lease_break_callback,
607 .lm_change = lease_modify,
608 .lm_setup = lease_setup,
609};
610
611
612
613
614static int lease_init(struct file *filp, long type, struct file_lock *fl)
615{
616 if (assign_type(fl, type) != 0)
617 return -EINVAL;
618
619 fl->fl_owner = filp;
620 fl->fl_pid = current->tgid;
621
622 fl->fl_file = filp;
623 fl->fl_flags = FL_LEASE;
624 fl->fl_start = 0;
625 fl->fl_end = OFFSET_MAX;
626 fl->fl_ops = NULL;
627 fl->fl_lmops = &lease_manager_ops;
628 return 0;
629}
630
631
632static struct file_lock *lease_alloc(struct file *filp, long type)
633{
634 struct file_lock *fl = locks_alloc_lock();
635 int error = -ENOMEM;
636
637 if (fl == NULL)
638 return ERR_PTR(error);
639
640 error = lease_init(filp, type, fl);
641 if (error) {
642 locks_free_lock(fl);
643 return ERR_PTR(error);
644 }
645 return fl;
646}
647
648
649
650static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
651{
652 return ((fl1->fl_end >= fl2->fl_start) &&
653 (fl2->fl_end >= fl1->fl_start));
654}
655
656
657
658
659static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
660{
661 return fl1->fl_owner == fl2->fl_owner;
662}
663
664
665static void locks_insert_global_locks(struct file_lock *fl)
666{
667 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
668
669 percpu_rwsem_assert_held(&file_rwsem);
670
671 spin_lock(&fll->lock);
672 fl->fl_link_cpu = smp_processor_id();
673 hlist_add_head(&fl->fl_link, &fll->hlist);
674 spin_unlock(&fll->lock);
675}
676
677
678static void locks_delete_global_locks(struct file_lock *fl)
679{
680 struct file_lock_list_struct *fll;
681
682 percpu_rwsem_assert_held(&file_rwsem);
683
684
685
686
687
688
689 if (hlist_unhashed(&fl->fl_link))
690 return;
691
692 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
693 spin_lock(&fll->lock);
694 hlist_del_init(&fl->fl_link);
695 spin_unlock(&fll->lock);
696}
697
698static unsigned long
699posix_owner_key(struct file_lock *fl)
700{
701 return (unsigned long)fl->fl_owner;
702}
703
704static void locks_insert_global_blocked(struct file_lock *waiter)
705{
706 lockdep_assert_held(&blocked_lock_lock);
707
708 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
709}
710
711static void locks_delete_global_blocked(struct file_lock *waiter)
712{
713 lockdep_assert_held(&blocked_lock_lock);
714
715 hash_del(&waiter->fl_link);
716}
717
718
719
720
721
722
723static void __locks_delete_block(struct file_lock *waiter)
724{
725 locks_delete_global_blocked(waiter);
726 list_del_init(&waiter->fl_blocked_member);
727 waiter->fl_blocker = NULL;
728}
729
730static void __locks_wake_up_blocks(struct file_lock *blocker)
731{
732 while (!list_empty(&blocker->fl_blocked_requests)) {
733 struct file_lock *waiter;
734
735 waiter = list_first_entry(&blocker->fl_blocked_requests,
736 struct file_lock, fl_blocked_member);
737 __locks_delete_block(waiter);
738 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
739 waiter->fl_lmops->lm_notify(waiter);
740 else
741 wake_up(&waiter->fl_wait);
742 }
743}
744
745
746
747
748
749
750
751int locks_delete_block(struct file_lock *waiter)
752{
753 int status = -ENOENT;
754
755
756
757
758
759
760
761
762
763
764
765
766 if (waiter->fl_blocker == NULL &&
767 list_empty(&waiter->fl_blocked_requests))
768 return status;
769 spin_lock(&blocked_lock_lock);
770 if (waiter->fl_blocker)
771 status = 0;
772 __locks_wake_up_blocks(waiter);
773 __locks_delete_block(waiter);
774 spin_unlock(&blocked_lock_lock);
775 return status;
776}
777EXPORT_SYMBOL(locks_delete_block);
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794static void __locks_insert_block(struct file_lock *blocker,
795 struct file_lock *waiter,
796 bool conflict(struct file_lock *,
797 struct file_lock *))
798{
799 struct file_lock *fl;
800 BUG_ON(!list_empty(&waiter->fl_blocked_member));
801
802new_blocker:
803 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
804 if (conflict(fl, waiter)) {
805 blocker = fl;
806 goto new_blocker;
807 }
808 waiter->fl_blocker = blocker;
809 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
810 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
811 locks_insert_global_blocked(waiter);
812
813
814
815
816
817 __locks_wake_up_blocks(waiter);
818}
819
820
821static void locks_insert_block(struct file_lock *blocker,
822 struct file_lock *waiter,
823 bool conflict(struct file_lock *,
824 struct file_lock *))
825{
826 spin_lock(&blocked_lock_lock);
827 __locks_insert_block(blocker, waiter, conflict);
828 spin_unlock(&blocked_lock_lock);
829}
830
831
832
833
834
835
836static void locks_wake_up_blocks(struct file_lock *blocker)
837{
838
839
840
841
842
843
844
845 if (list_empty(&blocker->fl_blocked_requests))
846 return;
847
848 spin_lock(&blocked_lock_lock);
849 __locks_wake_up_blocks(blocker);
850 spin_unlock(&blocked_lock_lock);
851}
852
853static void
854locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
855{
856 list_add_tail(&fl->fl_list, before);
857 locks_insert_global_locks(fl);
858}
859
860static void
861locks_unlink_lock_ctx(struct file_lock *fl)
862{
863 locks_delete_global_locks(fl);
864 list_del_init(&fl->fl_list);
865 locks_wake_up_blocks(fl);
866}
867
868static void
869locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
870{
871 locks_unlink_lock_ctx(fl);
872 if (dispose)
873 list_add(&fl->fl_list, dispose);
874 else
875 locks_free_lock(fl);
876}
877
878
879
880
881static bool locks_conflict(struct file_lock *caller_fl,
882 struct file_lock *sys_fl)
883{
884 if (sys_fl->fl_type == F_WRLCK)
885 return true;
886 if (caller_fl->fl_type == F_WRLCK)
887 return true;
888 return false;
889}
890
891
892
893
894static bool posix_locks_conflict(struct file_lock *caller_fl,
895 struct file_lock *sys_fl)
896{
897
898
899
900 if (posix_same_owner(caller_fl, sys_fl))
901 return false;
902
903
904 if (!locks_overlap(caller_fl, sys_fl))
905 return false;
906
907 return locks_conflict(caller_fl, sys_fl);
908}
909
910
911
912
913static bool flock_locks_conflict(struct file_lock *caller_fl,
914 struct file_lock *sys_fl)
915{
916
917
918
919 if (caller_fl->fl_file == sys_fl->fl_file)
920 return false;
921 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
922 return false;
923
924 return locks_conflict(caller_fl, sys_fl);
925}
926
927void
928posix_test_lock(struct file *filp, struct file_lock *fl)
929{
930 struct file_lock *cfl;
931 struct file_lock_context *ctx;
932 struct inode *inode = locks_inode(filp);
933
934 ctx = smp_load_acquire(&inode->i_flctx);
935 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
936 fl->fl_type = F_UNLCK;
937 return;
938 }
939
940 spin_lock(&ctx->flc_lock);
941 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
942 if (posix_locks_conflict(fl, cfl)) {
943 locks_copy_conflock(fl, cfl);
944 goto out;
945 }
946 }
947 fl->fl_type = F_UNLCK;
948out:
949 spin_unlock(&ctx->flc_lock);
950 return;
951}
952EXPORT_SYMBOL(posix_test_lock);
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987#define MAX_DEADLK_ITERATIONS 10
988
989
990static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
991{
992 struct file_lock *fl;
993
994 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
995 if (posix_same_owner(fl, block_fl)) {
996 while (fl->fl_blocker)
997 fl = fl->fl_blocker;
998 return fl;
999 }
1000 }
1001 return NULL;
1002}
1003
1004
1005static int posix_locks_deadlock(struct file_lock *caller_fl,
1006 struct file_lock *block_fl)
1007{
1008 int i = 0;
1009
1010 lockdep_assert_held(&blocked_lock_lock);
1011
1012
1013
1014
1015
1016 if (IS_OFDLCK(caller_fl))
1017 return 0;
1018
1019 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
1020 if (i++ > MAX_DEADLK_ITERATIONS)
1021 return 0;
1022 if (posix_same_owner(caller_fl, block_fl))
1023 return 1;
1024 }
1025 return 0;
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1036{
1037 struct file_lock *new_fl = NULL;
1038 struct file_lock *fl;
1039 struct file_lock_context *ctx;
1040 int error = 0;
1041 bool found = false;
1042 LIST_HEAD(dispose);
1043
1044 ctx = locks_get_lock_context(inode, request->fl_type);
1045 if (!ctx) {
1046 if (request->fl_type != F_UNLCK)
1047 return -ENOMEM;
1048 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1049 }
1050
1051 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1052 new_fl = locks_alloc_lock();
1053 if (!new_fl)
1054 return -ENOMEM;
1055 }
1056
1057 percpu_down_read(&file_rwsem);
1058 spin_lock(&ctx->flc_lock);
1059 if (request->fl_flags & FL_ACCESS)
1060 goto find_conflict;
1061
1062 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1063 if (request->fl_file != fl->fl_file)
1064 continue;
1065 if (request->fl_type == fl->fl_type)
1066 goto out;
1067 found = true;
1068 locks_delete_lock_ctx(fl, &dispose);
1069 break;
1070 }
1071
1072 if (request->fl_type == F_UNLCK) {
1073 if ((request->fl_flags & FL_EXISTS) && !found)
1074 error = -ENOENT;
1075 goto out;
1076 }
1077
1078find_conflict:
1079 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1080 if (!flock_locks_conflict(request, fl))
1081 continue;
1082 error = -EAGAIN;
1083 if (!(request->fl_flags & FL_SLEEP))
1084 goto out;
1085 error = FILE_LOCK_DEFERRED;
1086 locks_insert_block(fl, request, flock_locks_conflict);
1087 goto out;
1088 }
1089 if (request->fl_flags & FL_ACCESS)
1090 goto out;
1091 locks_copy_lock(new_fl, request);
1092 locks_move_blocks(new_fl, request);
1093 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1094 new_fl = NULL;
1095 error = 0;
1096
1097out:
1098 spin_unlock(&ctx->flc_lock);
1099 percpu_up_read(&file_rwsem);
1100 if (new_fl)
1101 locks_free_lock(new_fl);
1102 locks_dispose_list(&dispose);
1103 trace_flock_lock_inode(inode, request, error);
1104 return error;
1105}
1106
1107static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1108 struct file_lock *conflock)
1109{
1110 struct file_lock *fl, *tmp;
1111 struct file_lock *new_fl = NULL;
1112 struct file_lock *new_fl2 = NULL;
1113 struct file_lock *left = NULL;
1114 struct file_lock *right = NULL;
1115 struct file_lock_context *ctx;
1116 int error;
1117 bool added = false;
1118 LIST_HEAD(dispose);
1119
1120 ctx = locks_get_lock_context(inode, request->fl_type);
1121 if (!ctx)
1122 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1123
1124
1125
1126
1127
1128
1129
1130 if (!(request->fl_flags & FL_ACCESS) &&
1131 (request->fl_type != F_UNLCK ||
1132 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1133 new_fl = locks_alloc_lock();
1134 new_fl2 = locks_alloc_lock();
1135 }
1136
1137 percpu_down_read(&file_rwsem);
1138 spin_lock(&ctx->flc_lock);
1139
1140
1141
1142
1143
1144 if (request->fl_type != F_UNLCK) {
1145 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1146 if (!posix_locks_conflict(request, fl))
1147 continue;
1148 if (conflock)
1149 locks_copy_conflock(conflock, fl);
1150 error = -EAGAIN;
1151 if (!(request->fl_flags & FL_SLEEP))
1152 goto out;
1153
1154
1155
1156
1157 error = -EDEADLK;
1158 spin_lock(&blocked_lock_lock);
1159
1160
1161
1162
1163 __locks_wake_up_blocks(request);
1164 if (likely(!posix_locks_deadlock(request, fl))) {
1165 error = FILE_LOCK_DEFERRED;
1166 __locks_insert_block(fl, request,
1167 posix_locks_conflict);
1168 }
1169 spin_unlock(&blocked_lock_lock);
1170 goto out;
1171 }
1172 }
1173
1174
1175 error = 0;
1176 if (request->fl_flags & FL_ACCESS)
1177 goto out;
1178
1179
1180 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1181 if (posix_same_owner(request, fl))
1182 break;
1183 }
1184
1185
1186 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1187 if (!posix_same_owner(request, fl))
1188 break;
1189
1190
1191 if (request->fl_type == fl->fl_type) {
1192
1193
1194
1195
1196 if (fl->fl_end < request->fl_start - 1)
1197 continue;
1198
1199
1200
1201 if (fl->fl_start - 1 > request->fl_end)
1202 break;
1203
1204
1205
1206
1207
1208
1209 if (fl->fl_start > request->fl_start)
1210 fl->fl_start = request->fl_start;
1211 else
1212 request->fl_start = fl->fl_start;
1213 if (fl->fl_end < request->fl_end)
1214 fl->fl_end = request->fl_end;
1215 else
1216 request->fl_end = fl->fl_end;
1217 if (added) {
1218 locks_delete_lock_ctx(fl, &dispose);
1219 continue;
1220 }
1221 request = fl;
1222 added = true;
1223 } else {
1224
1225
1226
1227 if (fl->fl_end < request->fl_start)
1228 continue;
1229 if (fl->fl_start > request->fl_end)
1230 break;
1231 if (request->fl_type == F_UNLCK)
1232 added = true;
1233 if (fl->fl_start < request->fl_start)
1234 left = fl;
1235
1236
1237
1238 if (fl->fl_end > request->fl_end) {
1239 right = fl;
1240 break;
1241 }
1242 if (fl->fl_start >= request->fl_start) {
1243
1244
1245
1246 if (added) {
1247 locks_delete_lock_ctx(fl, &dispose);
1248 continue;
1249 }
1250
1251
1252
1253
1254
1255
1256
1257 error = -ENOLCK;
1258 if (!new_fl)
1259 goto out;
1260 locks_copy_lock(new_fl, request);
1261 request = new_fl;
1262 new_fl = NULL;
1263 locks_insert_lock_ctx(request, &fl->fl_list);
1264 locks_delete_lock_ctx(fl, &dispose);
1265 added = true;
1266 }
1267 }
1268 }
1269
1270
1271
1272
1273
1274
1275 error = -ENOLCK;
1276 if (right && left == right && !new_fl2)
1277 goto out;
1278
1279 error = 0;
1280 if (!added) {
1281 if (request->fl_type == F_UNLCK) {
1282 if (request->fl_flags & FL_EXISTS)
1283 error = -ENOENT;
1284 goto out;
1285 }
1286
1287 if (!new_fl) {
1288 error = -ENOLCK;
1289 goto out;
1290 }
1291 locks_copy_lock(new_fl, request);
1292 locks_move_blocks(new_fl, request);
1293 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1294 fl = new_fl;
1295 new_fl = NULL;
1296 }
1297 if (right) {
1298 if (left == right) {
1299
1300
1301
1302 left = new_fl2;
1303 new_fl2 = NULL;
1304 locks_copy_lock(left, right);
1305 locks_insert_lock_ctx(left, &fl->fl_list);
1306 }
1307 right->fl_start = request->fl_end + 1;
1308 locks_wake_up_blocks(right);
1309 }
1310 if (left) {
1311 left->fl_end = request->fl_start - 1;
1312 locks_wake_up_blocks(left);
1313 }
1314 out:
1315 spin_unlock(&ctx->flc_lock);
1316 percpu_up_read(&file_rwsem);
1317
1318
1319
1320 if (new_fl)
1321 locks_free_lock(new_fl);
1322 if (new_fl2)
1323 locks_free_lock(new_fl2);
1324 locks_dispose_list(&dispose);
1325 trace_posix_lock_inode(inode, request, error);
1326
1327 return error;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344int posix_lock_file(struct file *filp, struct file_lock *fl,
1345 struct file_lock *conflock)
1346{
1347 return posix_lock_inode(locks_inode(filp), fl, conflock);
1348}
1349EXPORT_SYMBOL(posix_lock_file);
1350
1351
1352
1353
1354
1355
1356
1357
1358static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1359{
1360 int error;
1361 might_sleep ();
1362 for (;;) {
1363 error = posix_lock_inode(inode, fl, NULL);
1364 if (error != FILE_LOCK_DEFERRED)
1365 break;
1366 error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
1367 if (error)
1368 break;
1369 }
1370 locks_delete_block(fl);
1371 return error;
1372}
1373
1374#ifdef CONFIG_MANDATORY_FILE_LOCKING
1375
1376
1377
1378
1379
1380
1381
1382int locks_mandatory_locked(struct file *file)
1383{
1384 int ret;
1385 struct inode *inode = locks_inode(file);
1386 struct file_lock_context *ctx;
1387 struct file_lock *fl;
1388
1389 ctx = smp_load_acquire(&inode->i_flctx);
1390 if (!ctx || list_empty_careful(&ctx->flc_posix))
1391 return 0;
1392
1393
1394
1395
1396 spin_lock(&ctx->flc_lock);
1397 ret = 0;
1398 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1399 if (fl->fl_owner != current->files &&
1400 fl->fl_owner != file) {
1401 ret = -EAGAIN;
1402 break;
1403 }
1404 }
1405 spin_unlock(&ctx->flc_lock);
1406 return ret;
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1420 loff_t end, unsigned char type)
1421{
1422 struct file_lock fl;
1423 int error;
1424 bool sleep = false;
1425
1426 locks_init_lock(&fl);
1427 fl.fl_pid = current->tgid;
1428 fl.fl_file = filp;
1429 fl.fl_flags = FL_POSIX | FL_ACCESS;
1430 if (filp && !(filp->f_flags & O_NONBLOCK))
1431 sleep = true;
1432 fl.fl_type = type;
1433 fl.fl_start = start;
1434 fl.fl_end = end;
1435
1436 for (;;) {
1437 if (filp) {
1438 fl.fl_owner = filp;
1439 fl.fl_flags &= ~FL_SLEEP;
1440 error = posix_lock_inode(inode, &fl, NULL);
1441 if (!error)
1442 break;
1443 }
1444
1445 if (sleep)
1446 fl.fl_flags |= FL_SLEEP;
1447 fl.fl_owner = current->files;
1448 error = posix_lock_inode(inode, &fl, NULL);
1449 if (error != FILE_LOCK_DEFERRED)
1450 break;
1451 error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
1452 if (!error) {
1453
1454
1455
1456
1457 if (__mandatory_lock(inode))
1458 continue;
1459 }
1460
1461 break;
1462 }
1463 locks_delete_block(&fl);
1464
1465 return error;
1466}
1467EXPORT_SYMBOL(locks_mandatory_area);
1468#endif
1469
1470static void lease_clear_pending(struct file_lock *fl, int arg)
1471{
1472 switch (arg) {
1473 case F_UNLCK:
1474 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1475
1476 case F_RDLCK:
1477 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1478 }
1479}
1480
1481
1482int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1483{
1484 int error = assign_type(fl, arg);
1485
1486 if (error)
1487 return error;
1488 lease_clear_pending(fl, arg);
1489 locks_wake_up_blocks(fl);
1490 if (arg == F_UNLCK) {
1491 struct file *filp = fl->fl_file;
1492
1493 f_delown(filp);
1494 filp->f_owner.signum = 0;
1495 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1496 if (fl->fl_fasync != NULL) {
1497 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1498 fl->fl_fasync = NULL;
1499 }
1500 locks_delete_lock_ctx(fl, dispose);
1501 }
1502 return 0;
1503}
1504EXPORT_SYMBOL(lease_modify);
1505
1506static bool past_time(unsigned long then)
1507{
1508 if (!then)
1509
1510 return false;
1511 return time_after(jiffies, then);
1512}
1513
1514static void time_out_leases(struct inode *inode, struct list_head *dispose)
1515{
1516 struct file_lock_context *ctx = inode->i_flctx;
1517 struct file_lock *fl, *tmp;
1518
1519 lockdep_assert_held(&ctx->flc_lock);
1520
1521 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1522 trace_time_out_leases(inode, fl);
1523 if (past_time(fl->fl_downgrade_time))
1524 lease_modify(fl, F_RDLCK, dispose);
1525 if (past_time(fl->fl_break_time))
1526 lease_modify(fl, F_UNLCK, dispose);
1527 }
1528}
1529
1530static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1531{
1532 bool rc;
1533
1534 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1535 rc = false;
1536 goto trace;
1537 }
1538 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1539 rc = false;
1540 goto trace;
1541 }
1542
1543 rc = locks_conflict(breaker, lease);
1544trace:
1545 trace_leases_conflict(rc, lease, breaker);
1546 return rc;
1547}
1548
1549static bool
1550any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1551{
1552 struct file_lock_context *ctx = inode->i_flctx;
1553 struct file_lock *fl;
1554
1555 lockdep_assert_held(&ctx->flc_lock);
1556
1557 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1558 if (leases_conflict(fl, breaker))
1559 return true;
1560 }
1561 return false;
1562}
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1578{
1579 int error = 0;
1580 struct file_lock_context *ctx;
1581 struct file_lock *new_fl, *fl, *tmp;
1582 unsigned long break_time;
1583 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1584 LIST_HEAD(dispose);
1585
1586 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1587 if (IS_ERR(new_fl))
1588 return PTR_ERR(new_fl);
1589 new_fl->fl_flags = type;
1590
1591
1592 ctx = smp_load_acquire(&inode->i_flctx);
1593 if (!ctx) {
1594 WARN_ON_ONCE(1);
1595 return error;
1596 }
1597
1598 percpu_down_read(&file_rwsem);
1599 spin_lock(&ctx->flc_lock);
1600
1601 time_out_leases(inode, &dispose);
1602
1603 if (!any_leases_conflict(inode, new_fl))
1604 goto out;
1605
1606 break_time = 0;
1607 if (lease_break_time > 0) {
1608 break_time = jiffies + lease_break_time * HZ;
1609 if (break_time == 0)
1610 break_time++;
1611 }
1612
1613 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1614 if (!leases_conflict(fl, new_fl))
1615 continue;
1616 if (want_write) {
1617 if (fl->fl_flags & FL_UNLOCK_PENDING)
1618 continue;
1619 fl->fl_flags |= FL_UNLOCK_PENDING;
1620 fl->fl_break_time = break_time;
1621 } else {
1622 if (lease_breaking(fl))
1623 continue;
1624 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1625 fl->fl_downgrade_time = break_time;
1626 }
1627 if (fl->fl_lmops->lm_break(fl))
1628 locks_delete_lock_ctx(fl, &dispose);
1629 }
1630
1631 if (list_empty(&ctx->flc_lease))
1632 goto out;
1633
1634 if (mode & O_NONBLOCK) {
1635 trace_break_lease_noblock(inode, new_fl);
1636 error = -EWOULDBLOCK;
1637 goto out;
1638 }
1639
1640restart:
1641 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1642 break_time = fl->fl_break_time;
1643 if (break_time != 0)
1644 break_time -= jiffies;
1645 if (break_time == 0)
1646 break_time++;
1647 locks_insert_block(fl, new_fl, leases_conflict);
1648 trace_break_lease_block(inode, new_fl);
1649 spin_unlock(&ctx->flc_lock);
1650 percpu_up_read(&file_rwsem);
1651
1652 locks_dispose_list(&dispose);
1653 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1654 !new_fl->fl_blocker, break_time);
1655
1656 percpu_down_read(&file_rwsem);
1657 spin_lock(&ctx->flc_lock);
1658 trace_break_lease_unblock(inode, new_fl);
1659 locks_delete_block(new_fl);
1660 if (error >= 0) {
1661
1662
1663
1664
1665 if (error == 0)
1666 time_out_leases(inode, &dispose);
1667 if (any_leases_conflict(inode, new_fl))
1668 goto restart;
1669 error = 0;
1670 }
1671out:
1672 spin_unlock(&ctx->flc_lock);
1673 percpu_up_read(&file_rwsem);
1674 locks_dispose_list(&dispose);
1675 locks_free_lock(new_fl);
1676 return error;
1677}
1678EXPORT_SYMBOL(__break_lease);
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1690{
1691 bool has_lease = false;
1692 struct file_lock_context *ctx;
1693 struct file_lock *fl;
1694
1695 ctx = smp_load_acquire(&inode->i_flctx);
1696 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1697 spin_lock(&ctx->flc_lock);
1698 fl = list_first_entry_or_null(&ctx->flc_lease,
1699 struct file_lock, fl_list);
1700 if (fl && (fl->fl_type == F_WRLCK))
1701 has_lease = true;
1702 spin_unlock(&ctx->flc_lock);
1703 }
1704
1705 if (has_lease)
1706 *time = current_time(inode);
1707}
1708EXPORT_SYMBOL(lease_get_mtime);
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733int fcntl_getlease(struct file *filp)
1734{
1735 struct file_lock *fl;
1736 struct inode *inode = locks_inode(filp);
1737 struct file_lock_context *ctx;
1738 int type = F_UNLCK;
1739 LIST_HEAD(dispose);
1740
1741 ctx = smp_load_acquire(&inode->i_flctx);
1742 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1743 percpu_down_read(&file_rwsem);
1744 spin_lock(&ctx->flc_lock);
1745 time_out_leases(inode, &dispose);
1746 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1747 if (fl->fl_file != filp)
1748 continue;
1749 type = target_leasetype(fl);
1750 break;
1751 }
1752 spin_unlock(&ctx->flc_lock);
1753 percpu_up_read(&file_rwsem);
1754
1755 locks_dispose_list(&dispose);
1756 }
1757 return type;
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771static int
1772check_conflicting_open(struct file *filp, const long arg, int flags)
1773{
1774 struct inode *inode = locks_inode(filp);
1775 int self_wcount = 0, self_rcount = 0;
1776
1777 if (flags & FL_LAYOUT)
1778 return 0;
1779
1780 if (arg == F_RDLCK)
1781 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1782 else if (arg != F_WRLCK)
1783 return 0;
1784
1785
1786
1787
1788
1789
1790
1791 if (filp->f_mode & FMODE_WRITE)
1792 self_wcount = 1;
1793 else if (filp->f_mode & FMODE_READ)
1794 self_rcount = 1;
1795
1796 if (atomic_read(&inode->i_writecount) != self_wcount ||
1797 atomic_read(&inode->i_readcount) != self_rcount)
1798 return -EAGAIN;
1799
1800 return 0;
1801}
1802
1803static int
1804generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1805{
1806 struct file_lock *fl, *my_fl = NULL, *lease;
1807 struct inode *inode = locks_inode(filp);
1808 struct file_lock_context *ctx;
1809 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1810 int error;
1811 LIST_HEAD(dispose);
1812
1813 lease = *flp;
1814 trace_generic_add_lease(inode, lease);
1815
1816
1817 ctx = locks_get_lock_context(inode, arg);
1818 if (!ctx)
1819 return -ENOMEM;
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 if (is_deleg && !inode_trylock(inode))
1830 return -EAGAIN;
1831
1832 if (is_deleg && arg == F_WRLCK) {
1833
1834 inode_unlock(inode);
1835 WARN_ON_ONCE(1);
1836 return -EINVAL;
1837 }
1838
1839 percpu_down_read(&file_rwsem);
1840 spin_lock(&ctx->flc_lock);
1841 time_out_leases(inode, &dispose);
1842 error = check_conflicting_open(filp, arg, lease->fl_flags);
1843 if (error)
1844 goto out;
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854 error = -EAGAIN;
1855 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1856 if (fl->fl_file == filp &&
1857 fl->fl_owner == lease->fl_owner) {
1858 my_fl = fl;
1859 continue;
1860 }
1861
1862
1863
1864
1865
1866 if (arg == F_WRLCK)
1867 goto out;
1868
1869
1870
1871
1872 if (fl->fl_flags & FL_UNLOCK_PENDING)
1873 goto out;
1874 }
1875
1876 if (my_fl != NULL) {
1877 lease = my_fl;
1878 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1879 if (error)
1880 goto out;
1881 goto out_setup;
1882 }
1883
1884 error = -EINVAL;
1885 if (!leases_enable)
1886 goto out;
1887
1888 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898 smp_mb();
1899 error = check_conflicting_open(filp, arg, lease->fl_flags);
1900 if (error) {
1901 locks_unlink_lock_ctx(lease);
1902 goto out;
1903 }
1904
1905out_setup:
1906 if (lease->fl_lmops->lm_setup)
1907 lease->fl_lmops->lm_setup(lease, priv);
1908out:
1909 spin_unlock(&ctx->flc_lock);
1910 percpu_up_read(&file_rwsem);
1911 locks_dispose_list(&dispose);
1912 if (is_deleg)
1913 inode_unlock(inode);
1914 if (!error && !my_fl)
1915 *flp = NULL;
1916 return error;
1917}
1918
1919static int generic_delete_lease(struct file *filp, void *owner)
1920{
1921 int error = -EAGAIN;
1922 struct file_lock *fl, *victim = NULL;
1923 struct inode *inode = locks_inode(filp);
1924 struct file_lock_context *ctx;
1925 LIST_HEAD(dispose);
1926
1927 ctx = smp_load_acquire(&inode->i_flctx);
1928 if (!ctx) {
1929 trace_generic_delete_lease(inode, NULL);
1930 return error;
1931 }
1932
1933 percpu_down_read(&file_rwsem);
1934 spin_lock(&ctx->flc_lock);
1935 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1936 if (fl->fl_file == filp &&
1937 fl->fl_owner == owner) {
1938 victim = fl;
1939 break;
1940 }
1941 }
1942 trace_generic_delete_lease(inode, victim);
1943 if (victim)
1944 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1945 spin_unlock(&ctx->flc_lock);
1946 percpu_up_read(&file_rwsem);
1947 locks_dispose_list(&dispose);
1948 return error;
1949}
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1963 void **priv)
1964{
1965 struct inode *inode = locks_inode(filp);
1966 int error;
1967
1968 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1969 return -EACCES;
1970 if (!S_ISREG(inode->i_mode))
1971 return -EINVAL;
1972 error = security_file_lock(filp, arg);
1973 if (error)
1974 return error;
1975
1976 switch (arg) {
1977 case F_UNLCK:
1978 return generic_delete_lease(filp, *priv);
1979 case F_RDLCK:
1980 case F_WRLCK:
1981 if (!(*flp)->fl_lmops->lm_break) {
1982 WARN_ON_ONCE(1);
1983 return -ENOLCK;
1984 }
1985
1986 return generic_add_lease(filp, arg, flp, priv);
1987 default:
1988 return -EINVAL;
1989 }
1990}
1991EXPORT_SYMBOL(generic_setlease);
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010int
2011vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
2012{
2013 if (filp->f_op->setlease)
2014 return filp->f_op->setlease(filp, arg, lease, priv);
2015 else
2016 return generic_setlease(filp, arg, lease, priv);
2017}
2018EXPORT_SYMBOL_GPL(vfs_setlease);
2019
2020static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
2021{
2022 struct file_lock *fl;
2023 struct fasync_struct *new;
2024 int error;
2025
2026 fl = lease_alloc(filp, arg);
2027 if (IS_ERR(fl))
2028 return PTR_ERR(fl);
2029
2030 new = fasync_alloc();
2031 if (!new) {
2032 locks_free_lock(fl);
2033 return -ENOMEM;
2034 }
2035 new->fa_fd = fd;
2036
2037 error = vfs_setlease(filp, arg, &fl, (void **)&new);
2038 if (fl)
2039 locks_free_lock(fl);
2040 if (new)
2041 fasync_free(new);
2042 return error;
2043}
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
2056{
2057 if (arg == F_UNLCK)
2058 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2059 return do_fcntl_add_lease(fd, filp, arg);
2060}
2061
2062
2063
2064
2065
2066
2067
2068
2069static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2070{
2071 int error;
2072 might_sleep();
2073 for (;;) {
2074 error = flock_lock_inode(inode, fl);
2075 if (error != FILE_LOCK_DEFERRED)
2076 break;
2077 error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
2078 if (error)
2079 break;
2080 }
2081 locks_delete_block(fl);
2082 return error;
2083}
2084
2085
2086
2087
2088
2089
2090
2091
2092int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2093{
2094 int res = 0;
2095 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2096 case FL_POSIX:
2097 res = posix_lock_inode_wait(inode, fl);
2098 break;
2099 case FL_FLOCK:
2100 res = flock_lock_inode_wait(inode, fl);
2101 break;
2102 default:
2103 BUG();
2104 }
2105 return res;
2106}
2107EXPORT_SYMBOL(locks_lock_inode_wait);
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2127{
2128 struct fd f = fdget(fd);
2129 struct file_lock *lock;
2130 int can_sleep, unlock;
2131 int error;
2132
2133 error = -EBADF;
2134 if (!f.file)
2135 goto out;
2136
2137 can_sleep = !(cmd & LOCK_NB);
2138 cmd &= ~LOCK_NB;
2139 unlock = (cmd == LOCK_UN);
2140
2141 if (!unlock && !(cmd & LOCK_MAND) &&
2142 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2143 goto out_putf;
2144
2145 lock = flock_make_lock(f.file, cmd, NULL);
2146 if (IS_ERR(lock)) {
2147 error = PTR_ERR(lock);
2148 goto out_putf;
2149 }
2150
2151 if (can_sleep)
2152 lock->fl_flags |= FL_SLEEP;
2153
2154 error = security_file_lock(f.file, lock->fl_type);
2155 if (error)
2156 goto out_free;
2157
2158 if (f.file->f_op->flock)
2159 error = f.file->f_op->flock(f.file,
2160 (can_sleep) ? F_SETLKW : F_SETLK,
2161 lock);
2162 else
2163 error = locks_lock_file_wait(f.file, lock);
2164
2165 out_free:
2166 locks_free_lock(lock);
2167
2168 out_putf:
2169 fdput(f);
2170 out:
2171 return error;
2172}
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182int vfs_test_lock(struct file *filp, struct file_lock *fl)
2183{
2184 if (filp->f_op->lock)
2185 return filp->f_op->lock(filp, F_GETLK, fl);
2186 posix_test_lock(filp, fl);
2187 return 0;
2188}
2189EXPORT_SYMBOL_GPL(vfs_test_lock);
2190
2191
2192
2193
2194
2195
2196
2197
2198static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2199{
2200 pid_t vnr;
2201 struct pid *pid;
2202
2203 if (IS_OFDLCK(fl))
2204 return -1;
2205 if (IS_REMOTELCK(fl))
2206 return fl->fl_pid;
2207
2208
2209
2210
2211
2212 if (ns == &init_pid_ns)
2213 return (pid_t)fl->fl_pid;
2214
2215 rcu_read_lock();
2216 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2217 vnr = pid_nr_ns(pid, ns);
2218 rcu_read_unlock();
2219 return vnr;
2220}
2221
2222static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2223{
2224 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2225#if BITS_PER_LONG == 32
2226
2227
2228
2229
2230 if (fl->fl_start > OFFT_OFFSET_MAX)
2231 return -EOVERFLOW;
2232 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2233 return -EOVERFLOW;
2234#endif
2235 flock->l_start = fl->fl_start;
2236 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2237 fl->fl_end - fl->fl_start + 1;
2238 flock->l_whence = 0;
2239 flock->l_type = fl->fl_type;
2240 return 0;
2241}
2242
2243#if BITS_PER_LONG == 32
2244static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2245{
2246 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2247 flock->l_start = fl->fl_start;
2248 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2249 fl->fl_end - fl->fl_start + 1;
2250 flock->l_whence = 0;
2251 flock->l_type = fl->fl_type;
2252}
2253#endif
2254
2255
2256
2257
2258int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2259{
2260 struct file_lock *fl;
2261 int error;
2262
2263 fl = locks_alloc_lock();
2264 if (fl == NULL)
2265 return -ENOMEM;
2266 error = -EINVAL;
2267 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2268 goto out;
2269
2270 error = flock_to_posix_lock(filp, fl, flock);
2271 if (error)
2272 goto out;
2273
2274 if (cmd == F_OFD_GETLK) {
2275 error = -EINVAL;
2276 if (flock->l_pid != 0)
2277 goto out;
2278
2279 cmd = F_GETLK;
2280 fl->fl_flags |= FL_OFDLCK;
2281 fl->fl_owner = filp;
2282 }
2283
2284 error = vfs_test_lock(filp, fl);
2285 if (error)
2286 goto out;
2287
2288 flock->l_type = fl->fl_type;
2289 if (fl->fl_type != F_UNLCK) {
2290 error = posix_lock_to_flock(flock, fl);
2291 if (error)
2292 goto out;
2293 }
2294out:
2295 locks_free_lock(fl);
2296 return error;
2297}
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2333{
2334 if (filp->f_op->lock)
2335 return filp->f_op->lock(filp, cmd, fl);
2336 else
2337 return posix_lock_file(filp, fl, conf);
2338}
2339EXPORT_SYMBOL_GPL(vfs_lock_file);
2340
2341static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2342 struct file_lock *fl)
2343{
2344 int error;
2345
2346 error = security_file_lock(filp, fl->fl_type);
2347 if (error)
2348 return error;
2349
2350 for (;;) {
2351 error = vfs_lock_file(filp, cmd, fl, NULL);
2352 if (error != FILE_LOCK_DEFERRED)
2353 break;
2354 error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
2355 if (error)
2356 break;
2357 }
2358 locks_delete_block(fl);
2359
2360 return error;
2361}
2362
2363
2364static int
2365check_fmode_for_setlk(struct file_lock *fl)
2366{
2367 switch (fl->fl_type) {
2368 case F_RDLCK:
2369 if (!(fl->fl_file->f_mode & FMODE_READ))
2370 return -EBADF;
2371 break;
2372 case F_WRLCK:
2373 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2374 return -EBADF;
2375 }
2376 return 0;
2377}
2378
2379
2380
2381
2382int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2383 struct flock *flock)
2384{
2385 struct file_lock *file_lock = locks_alloc_lock();
2386 struct inode *inode = locks_inode(filp);
2387 struct file *f;
2388 int error;
2389
2390 if (file_lock == NULL)
2391 return -ENOLCK;
2392
2393
2394
2395
2396 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2397 error = -EAGAIN;
2398 goto out;
2399 }
2400
2401 error = flock_to_posix_lock(filp, file_lock, flock);
2402 if (error)
2403 goto out;
2404
2405 error = check_fmode_for_setlk(file_lock);
2406 if (error)
2407 goto out;
2408
2409
2410
2411
2412
2413 switch (cmd) {
2414 case F_OFD_SETLK:
2415 error = -EINVAL;
2416 if (flock->l_pid != 0)
2417 goto out;
2418
2419 cmd = F_SETLK;
2420 file_lock->fl_flags |= FL_OFDLCK;
2421 file_lock->fl_owner = filp;
2422 break;
2423 case F_OFD_SETLKW:
2424 error = -EINVAL;
2425 if (flock->l_pid != 0)
2426 goto out;
2427
2428 cmd = F_SETLKW;
2429 file_lock->fl_flags |= FL_OFDLCK;
2430 file_lock->fl_owner = filp;
2431
2432 case F_SETLKW:
2433 file_lock->fl_flags |= FL_SLEEP;
2434 }
2435
2436 error = do_lock_file_wait(filp, cmd, file_lock);
2437
2438
2439
2440
2441
2442
2443 if (!error && file_lock->fl_type != F_UNLCK &&
2444 !(file_lock->fl_flags & FL_OFDLCK)) {
2445
2446
2447
2448
2449
2450 spin_lock(¤t->files->file_lock);
2451 f = fcheck(fd);
2452 spin_unlock(¤t->files->file_lock);
2453 if (f != filp) {
2454 file_lock->fl_type = F_UNLCK;
2455 error = do_lock_file_wait(filp, cmd, file_lock);
2456 WARN_ON_ONCE(error);
2457 error = -EBADF;
2458 }
2459 }
2460out:
2461 trace_fcntl_setlk(inode, file_lock, error);
2462 locks_free_lock(file_lock);
2463 return error;
2464}
2465
2466#if BITS_PER_LONG == 32
2467
2468
2469
2470int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2471{
2472 struct file_lock *fl;
2473 int error;
2474
2475 fl = locks_alloc_lock();
2476 if (fl == NULL)
2477 return -ENOMEM;
2478
2479 error = -EINVAL;
2480 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2481 goto out;
2482
2483 error = flock64_to_posix_lock(filp, fl, flock);
2484 if (error)
2485 goto out;
2486
2487 if (cmd == F_OFD_GETLK) {
2488 error = -EINVAL;
2489 if (flock->l_pid != 0)
2490 goto out;
2491
2492 cmd = F_GETLK64;
2493 fl->fl_flags |= FL_OFDLCK;
2494 fl->fl_owner = filp;
2495 }
2496
2497 error = vfs_test_lock(filp, fl);
2498 if (error)
2499 goto out;
2500
2501 flock->l_type = fl->fl_type;
2502 if (fl->fl_type != F_UNLCK)
2503 posix_lock_to_flock64(flock, fl);
2504
2505out:
2506 locks_free_lock(fl);
2507 return error;
2508}
2509
2510
2511
2512
2513int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2514 struct flock64 *flock)
2515{
2516 struct file_lock *file_lock = locks_alloc_lock();
2517 struct inode *inode = locks_inode(filp);
2518 struct file *f;
2519 int error;
2520
2521 if (file_lock == NULL)
2522 return -ENOLCK;
2523
2524
2525
2526
2527 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2528 error = -EAGAIN;
2529 goto out;
2530 }
2531
2532 error = flock64_to_posix_lock(filp, file_lock, flock);
2533 if (error)
2534 goto out;
2535
2536 error = check_fmode_for_setlk(file_lock);
2537 if (error)
2538 goto out;
2539
2540
2541
2542
2543
2544 switch (cmd) {
2545 case F_OFD_SETLK:
2546 error = -EINVAL;
2547 if (flock->l_pid != 0)
2548 goto out;
2549
2550 cmd = F_SETLK64;
2551 file_lock->fl_flags |= FL_OFDLCK;
2552 file_lock->fl_owner = filp;
2553 break;
2554 case F_OFD_SETLKW:
2555 error = -EINVAL;
2556 if (flock->l_pid != 0)
2557 goto out;
2558
2559 cmd = F_SETLKW64;
2560 file_lock->fl_flags |= FL_OFDLCK;
2561 file_lock->fl_owner = filp;
2562
2563 case F_SETLKW64:
2564 file_lock->fl_flags |= FL_SLEEP;
2565 }
2566
2567 error = do_lock_file_wait(filp, cmd, file_lock);
2568
2569
2570
2571
2572
2573
2574 if (!error && file_lock->fl_type != F_UNLCK &&
2575 !(file_lock->fl_flags & FL_OFDLCK)) {
2576
2577
2578
2579
2580
2581 spin_lock(¤t->files->file_lock);
2582 f = fcheck(fd);
2583 spin_unlock(¤t->files->file_lock);
2584 if (f != filp) {
2585 file_lock->fl_type = F_UNLCK;
2586 error = do_lock_file_wait(filp, cmd, file_lock);
2587 WARN_ON_ONCE(error);
2588 error = -EBADF;
2589 }
2590 }
2591out:
2592 locks_free_lock(file_lock);
2593 return error;
2594}
2595#endif
2596
2597
2598
2599
2600
2601
2602void locks_remove_posix(struct file *filp, fl_owner_t owner)
2603{
2604 int error;
2605 struct inode *inode = locks_inode(filp);
2606 struct file_lock lock;
2607 struct file_lock_context *ctx;
2608
2609
2610
2611
2612
2613
2614 ctx = smp_load_acquire(&inode->i_flctx);
2615 if (!ctx || list_empty(&ctx->flc_posix))
2616 return;
2617
2618 locks_init_lock(&lock);
2619 lock.fl_type = F_UNLCK;
2620 lock.fl_flags = FL_POSIX | FL_CLOSE;
2621 lock.fl_start = 0;
2622 lock.fl_end = OFFSET_MAX;
2623 lock.fl_owner = owner;
2624 lock.fl_pid = current->tgid;
2625 lock.fl_file = filp;
2626 lock.fl_ops = NULL;
2627 lock.fl_lmops = NULL;
2628
2629 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2630
2631 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2632 lock.fl_ops->fl_release_private(&lock);
2633 trace_locks_remove_posix(inode, &lock, error);
2634}
2635EXPORT_SYMBOL(locks_remove_posix);
2636
2637
2638static void
2639locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2640{
2641 struct file_lock fl;
2642 struct inode *inode = locks_inode(filp);
2643
2644 if (list_empty(&flctx->flc_flock))
2645 return;
2646
2647 flock_make_lock(filp, LOCK_UN, &fl);
2648 fl.fl_flags |= FL_CLOSE;
2649
2650 if (filp->f_op->flock)
2651 filp->f_op->flock(filp, F_SETLKW, &fl);
2652 else
2653 flock_lock_inode(inode, &fl);
2654
2655 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2656 fl.fl_ops->fl_release_private(&fl);
2657}
2658
2659
2660static void
2661locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2662{
2663 struct file_lock *fl, *tmp;
2664 LIST_HEAD(dispose);
2665
2666 if (list_empty(&ctx->flc_lease))
2667 return;
2668
2669 percpu_down_read(&file_rwsem);
2670 spin_lock(&ctx->flc_lock);
2671 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2672 if (filp == fl->fl_file)
2673 lease_modify(fl, F_UNLCK, &dispose);
2674 spin_unlock(&ctx->flc_lock);
2675 percpu_up_read(&file_rwsem);
2676
2677 locks_dispose_list(&dispose);
2678}
2679
2680
2681
2682
2683void locks_remove_file(struct file *filp)
2684{
2685 struct file_lock_context *ctx;
2686
2687 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2688 if (!ctx)
2689 return;
2690
2691
2692 locks_remove_posix(filp, filp);
2693
2694
2695 locks_remove_flock(filp, ctx);
2696
2697
2698 locks_remove_lease(filp, ctx);
2699
2700 spin_lock(&ctx->flc_lock);
2701 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2702 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2703 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2704 spin_unlock(&ctx->flc_lock);
2705}
2706
2707
2708
2709
2710
2711
2712
2713
2714int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2715{
2716 if (filp->f_op->lock)
2717 return filp->f_op->lock(filp, F_CANCELLK, fl);
2718 return 0;
2719}
2720EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2721
2722#ifdef CONFIG_PROC_FS
2723#include <linux/proc_fs.h>
2724#include <linux/seq_file.h>
2725
2726struct locks_iterator {
2727 int li_cpu;
2728 loff_t li_pos;
2729};
2730
2731static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2732 loff_t id, char *pfx)
2733{
2734 struct inode *inode = NULL;
2735 unsigned int fl_pid;
2736 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2737
2738 fl_pid = locks_translate_pid(fl, proc_pidns);
2739
2740
2741
2742
2743
2744
2745 if (fl->fl_file != NULL)
2746 inode = locks_inode(fl->fl_file);
2747
2748 seq_printf(f, "%lld:%s ", id, pfx);
2749 if (IS_POSIX(fl)) {
2750 if (fl->fl_flags & FL_ACCESS)
2751 seq_puts(f, "ACCESS");
2752 else if (IS_OFDLCK(fl))
2753 seq_puts(f, "OFDLCK");
2754 else
2755 seq_puts(f, "POSIX ");
2756
2757 seq_printf(f, " %s ",
2758 (inode == NULL) ? "*NOINODE*" :
2759 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2760 } else if (IS_FLOCK(fl)) {
2761 if (fl->fl_type & LOCK_MAND) {
2762 seq_puts(f, "FLOCK MSNFS ");
2763 } else {
2764 seq_puts(f, "FLOCK ADVISORY ");
2765 }
2766 } else if (IS_LEASE(fl)) {
2767 if (fl->fl_flags & FL_DELEG)
2768 seq_puts(f, "DELEG ");
2769 else
2770 seq_puts(f, "LEASE ");
2771
2772 if (lease_breaking(fl))
2773 seq_puts(f, "BREAKING ");
2774 else if (fl->fl_file)
2775 seq_puts(f, "ACTIVE ");
2776 else
2777 seq_puts(f, "BREAKER ");
2778 } else {
2779 seq_puts(f, "UNKNOWN UNKNOWN ");
2780 }
2781 if (fl->fl_type & LOCK_MAND) {
2782 seq_printf(f, "%s ",
2783 (fl->fl_type & LOCK_READ)
2784 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2785 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2786 } else {
2787 seq_printf(f, "%s ",
2788 (lease_breaking(fl))
2789 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2790 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2791 }
2792 if (inode) {
2793
2794 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2795 MAJOR(inode->i_sb->s_dev),
2796 MINOR(inode->i_sb->s_dev), inode->i_ino);
2797 } else {
2798 seq_printf(f, "%d <none>:0 ", fl_pid);
2799 }
2800 if (IS_POSIX(fl)) {
2801 if (fl->fl_end == OFFSET_MAX)
2802 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2803 else
2804 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2805 } else {
2806 seq_puts(f, "0 EOF\n");
2807 }
2808}
2809
2810static int locks_show(struct seq_file *f, void *v)
2811{
2812 struct locks_iterator *iter = f->private;
2813 struct file_lock *fl, *bfl;
2814 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2815
2816 fl = hlist_entry(v, struct file_lock, fl_link);
2817
2818 if (locks_translate_pid(fl, proc_pidns) == 0)
2819 return 0;
2820
2821 lock_get_status(f, fl, iter->li_pos, "");
2822
2823 list_for_each_entry(bfl, &fl->fl_blocked_requests, fl_blocked_member)
2824 lock_get_status(f, bfl, iter->li_pos, " ->");
2825
2826 return 0;
2827}
2828
2829static void __show_fd_locks(struct seq_file *f,
2830 struct list_head *head, int *id,
2831 struct file *filp, struct files_struct *files)
2832{
2833 struct file_lock *fl;
2834
2835 list_for_each_entry(fl, head, fl_list) {
2836
2837 if (filp != fl->fl_file)
2838 continue;
2839 if (fl->fl_owner != files &&
2840 fl->fl_owner != filp)
2841 continue;
2842
2843 (*id)++;
2844 seq_puts(f, "lock:\t");
2845 lock_get_status(f, fl, *id, "");
2846 }
2847}
2848
2849void show_fd_locks(struct seq_file *f,
2850 struct file *filp, struct files_struct *files)
2851{
2852 struct inode *inode = locks_inode(filp);
2853 struct file_lock_context *ctx;
2854 int id = 0;
2855
2856 ctx = smp_load_acquire(&inode->i_flctx);
2857 if (!ctx)
2858 return;
2859
2860 spin_lock(&ctx->flc_lock);
2861 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2862 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2863 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2864 spin_unlock(&ctx->flc_lock);
2865}
2866
2867static void *locks_start(struct seq_file *f, loff_t *pos)
2868 __acquires(&blocked_lock_lock)
2869{
2870 struct locks_iterator *iter = f->private;
2871
2872 iter->li_pos = *pos + 1;
2873 percpu_down_write(&file_rwsem);
2874 spin_lock(&blocked_lock_lock);
2875 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2876}
2877
2878static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2879{
2880 struct locks_iterator *iter = f->private;
2881
2882 ++iter->li_pos;
2883 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2884}
2885
2886static void locks_stop(struct seq_file *f, void *v)
2887 __releases(&blocked_lock_lock)
2888{
2889 spin_unlock(&blocked_lock_lock);
2890 percpu_up_write(&file_rwsem);
2891}
2892
2893static const struct seq_operations locks_seq_operations = {
2894 .start = locks_start,
2895 .next = locks_next,
2896 .stop = locks_stop,
2897 .show = locks_show,
2898};
2899
2900static int __init proc_locks_init(void)
2901{
2902 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2903 sizeof(struct locks_iterator), NULL);
2904 return 0;
2905}
2906fs_initcall(proc_locks_init);
2907#endif
2908
2909static int __init filelock_init(void)
2910{
2911 int i;
2912
2913 flctx_cache = kmem_cache_create("file_lock_ctx",
2914 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2915
2916 filelock_cache = kmem_cache_create("file_lock_cache",
2917 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2918
2919 for_each_possible_cpu(i) {
2920 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2921
2922 spin_lock_init(&fll->lock);
2923 INIT_HLIST_HEAD(&fll->hlist);
2924 }
2925
2926 return 0;
2927}
2928core_initcall(filelock_init);
2929