1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/capability.h>
53#include <linux/file.h>
54#include <linux/fdtable.h>
55#include <linux/fs.h>
56#include <linux/init.h>
57#include <linux/security.h>
58#include <linux/slab.h>
59#include <linux/syscalls.h>
60#include <linux/time.h>
61#include <linux/rcupdate.h>
62#include <linux/pid_namespace.h>
63#include <linux/hashtable.h>
64#include <linux/percpu.h>
65#include <linux/sysctl.h>
66
67#define CREATE_TRACE_POINTS
68#include <trace/events/filelock.h>
69
70#include <linux/uaccess.h>
71
72#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
73#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
74#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
75#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
76#define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
77
78static bool lease_breaking(struct file_lock *fl)
79{
80 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
81}
82
83static int target_leasetype(struct file_lock *fl)
84{
85 if (fl->fl_flags & FL_UNLOCK_PENDING)
86 return F_UNLCK;
87 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
88 return F_RDLCK;
89 return fl->fl_type;
90}
91
92static int leases_enable = 1;
93static int lease_break_time = 45;
94
95#ifdef CONFIG_SYSCTL
96static struct ctl_table locks_sysctls[] = {
97 {
98 .procname = "leases-enable",
99 .data = &leases_enable,
100 .maxlen = sizeof(int),
101 .mode = 0644,
102 .proc_handler = proc_dointvec,
103 },
104#ifdef CONFIG_MMU
105 {
106 .procname = "lease-break-time",
107 .data = &lease_break_time,
108 .maxlen = sizeof(int),
109 .mode = 0644,
110 .proc_handler = proc_dointvec,
111 },
112#endif
113 {}
114};
115
116static int __init init_fs_locks_sysctls(void)
117{
118 register_sysctl_init("fs", locks_sysctls);
119 return 0;
120}
121early_initcall(init_fs_locks_sysctls);
122#endif
123
124
125
126
127
128
129
130
131
132struct file_lock_list_struct {
133 spinlock_t lock;
134 struct hlist_head hlist;
135};
136static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
137DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
138
139
140
141
142
143
144
145
146
147
148
149
150
151#define BLOCKED_HASH_BITS 7
152static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167static DEFINE_SPINLOCK(blocked_lock_lock);
168
169static struct kmem_cache *flctx_cache __read_mostly;
170static struct kmem_cache *filelock_cache __read_mostly;
171
172static struct file_lock_context *
173locks_get_lock_context(struct inode *inode, int type)
174{
175 struct file_lock_context *ctx;
176
177
178 ctx = smp_load_acquire(&inode->i_flctx);
179 if (likely(ctx) || type == F_UNLCK)
180 goto out;
181
182 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
183 if (!ctx)
184 goto out;
185
186 spin_lock_init(&ctx->flc_lock);
187 INIT_LIST_HEAD(&ctx->flc_flock);
188 INIT_LIST_HEAD(&ctx->flc_posix);
189 INIT_LIST_HEAD(&ctx->flc_lease);
190
191
192
193
194
195 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
196 kmem_cache_free(flctx_cache, ctx);
197 ctx = smp_load_acquire(&inode->i_flctx);
198 }
199out:
200 trace_locks_get_lock_context(inode, type, ctx);
201 return ctx;
202}
203
204static void
205locks_dump_ctx_list(struct list_head *list, char *list_type)
206{
207 struct file_lock *fl;
208
209 list_for_each_entry(fl, list, fl_list) {
210 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
211 }
212}
213
214static void
215locks_check_ctx_lists(struct inode *inode)
216{
217 struct file_lock_context *ctx = inode->i_flctx;
218
219 if (unlikely(!list_empty(&ctx->flc_flock) ||
220 !list_empty(&ctx->flc_posix) ||
221 !list_empty(&ctx->flc_lease))) {
222 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
223 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
224 inode->i_ino);
225 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
226 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
227 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
228 }
229}
230
231static void
232locks_check_ctx_file_list(struct file *filp, struct list_head *list,
233 char *list_type)
234{
235 struct file_lock *fl;
236 struct inode *inode = locks_inode(filp);
237
238 list_for_each_entry(fl, list, fl_list)
239 if (fl->fl_file == filp)
240 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
241 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
242 list_type, MAJOR(inode->i_sb->s_dev),
243 MINOR(inode->i_sb->s_dev), inode->i_ino,
244 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
245}
246
247void
248locks_free_lock_context(struct inode *inode)
249{
250 struct file_lock_context *ctx = inode->i_flctx;
251
252 if (unlikely(ctx)) {
253 locks_check_ctx_lists(inode);
254 kmem_cache_free(flctx_cache, ctx);
255 }
256}
257
258static void locks_init_lock_heads(struct file_lock *fl)
259{
260 INIT_HLIST_NODE(&fl->fl_link);
261 INIT_LIST_HEAD(&fl->fl_list);
262 INIT_LIST_HEAD(&fl->fl_blocked_requests);
263 INIT_LIST_HEAD(&fl->fl_blocked_member);
264 init_waitqueue_head(&fl->fl_wait);
265}
266
267
268struct file_lock *locks_alloc_lock(void)
269{
270 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
271
272 if (fl)
273 locks_init_lock_heads(fl);
274
275 return fl;
276}
277EXPORT_SYMBOL_GPL(locks_alloc_lock);
278
279void locks_release_private(struct file_lock *fl)
280{
281 BUG_ON(waitqueue_active(&fl->fl_wait));
282 BUG_ON(!list_empty(&fl->fl_list));
283 BUG_ON(!list_empty(&fl->fl_blocked_requests));
284 BUG_ON(!list_empty(&fl->fl_blocked_member));
285 BUG_ON(!hlist_unhashed(&fl->fl_link));
286
287 if (fl->fl_ops) {
288 if (fl->fl_ops->fl_release_private)
289 fl->fl_ops->fl_release_private(fl);
290 fl->fl_ops = NULL;
291 }
292
293 if (fl->fl_lmops) {
294 if (fl->fl_lmops->lm_put_owner) {
295 fl->fl_lmops->lm_put_owner(fl->fl_owner);
296 fl->fl_owner = NULL;
297 }
298 fl->fl_lmops = NULL;
299 }
300}
301EXPORT_SYMBOL_GPL(locks_release_private);
302
303
304
305
306
307
308
309
310
311
312bool locks_owner_has_blockers(struct file_lock_context *flctx,
313 fl_owner_t owner)
314{
315 struct file_lock *fl;
316
317 spin_lock(&flctx->flc_lock);
318 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
319 if (fl->fl_owner != owner)
320 continue;
321 if (!list_empty(&fl->fl_blocked_requests)) {
322 spin_unlock(&flctx->flc_lock);
323 return true;
324 }
325 }
326 spin_unlock(&flctx->flc_lock);
327 return false;
328}
329EXPORT_SYMBOL_GPL(locks_owner_has_blockers);
330
331
332void locks_free_lock(struct file_lock *fl)
333{
334 locks_release_private(fl);
335 kmem_cache_free(filelock_cache, fl);
336}
337EXPORT_SYMBOL(locks_free_lock);
338
339static void
340locks_dispose_list(struct list_head *dispose)
341{
342 struct file_lock *fl;
343
344 while (!list_empty(dispose)) {
345 fl = list_first_entry(dispose, struct file_lock, fl_list);
346 list_del_init(&fl->fl_list);
347 locks_free_lock(fl);
348 }
349}
350
351void locks_init_lock(struct file_lock *fl)
352{
353 memset(fl, 0, sizeof(struct file_lock));
354 locks_init_lock_heads(fl);
355}
356EXPORT_SYMBOL(locks_init_lock);
357
358
359
360
361void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
362{
363 new->fl_owner = fl->fl_owner;
364 new->fl_pid = fl->fl_pid;
365 new->fl_file = NULL;
366 new->fl_flags = fl->fl_flags;
367 new->fl_type = fl->fl_type;
368 new->fl_start = fl->fl_start;
369 new->fl_end = fl->fl_end;
370 new->fl_lmops = fl->fl_lmops;
371 new->fl_ops = NULL;
372
373 if (fl->fl_lmops) {
374 if (fl->fl_lmops->lm_get_owner)
375 fl->fl_lmops->lm_get_owner(fl->fl_owner);
376 }
377}
378EXPORT_SYMBOL(locks_copy_conflock);
379
380void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
381{
382
383 WARN_ON_ONCE(new->fl_ops);
384
385 locks_copy_conflock(new, fl);
386
387 new->fl_file = fl->fl_file;
388 new->fl_ops = fl->fl_ops;
389
390 if (fl->fl_ops) {
391 if (fl->fl_ops->fl_copy_lock)
392 fl->fl_ops->fl_copy_lock(new, fl);
393 }
394}
395EXPORT_SYMBOL(locks_copy_lock);
396
397static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
398{
399 struct file_lock *f;
400
401
402
403
404
405
406 if (list_empty(&fl->fl_blocked_requests))
407 return;
408 spin_lock(&blocked_lock_lock);
409 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
410 list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member)
411 f->fl_blocker = new;
412 spin_unlock(&blocked_lock_lock);
413}
414
415static inline int flock_translate_cmd(int cmd) {
416 switch (cmd) {
417 case LOCK_SH:
418 return F_RDLCK;
419 case LOCK_EX:
420 return F_WRLCK;
421 case LOCK_UN:
422 return F_UNLCK;
423 }
424 return -EINVAL;
425}
426
427
428static struct file_lock *
429flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
430{
431 int type = flock_translate_cmd(cmd);
432
433 if (type < 0)
434 return ERR_PTR(type);
435
436 if (fl == NULL) {
437 fl = locks_alloc_lock();
438 if (fl == NULL)
439 return ERR_PTR(-ENOMEM);
440 } else {
441 locks_init_lock(fl);
442 }
443
444 fl->fl_file = filp;
445 fl->fl_owner = filp;
446 fl->fl_pid = current->tgid;
447 fl->fl_flags = FL_FLOCK;
448 fl->fl_type = type;
449 fl->fl_end = OFFSET_MAX;
450
451 return fl;
452}
453
454static int assign_type(struct file_lock *fl, long type)
455{
456 switch (type) {
457 case F_RDLCK:
458 case F_WRLCK:
459 case F_UNLCK:
460 fl->fl_type = type;
461 break;
462 default:
463 return -EINVAL;
464 }
465 return 0;
466}
467
468static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
469 struct flock64 *l)
470{
471 switch (l->l_whence) {
472 case SEEK_SET:
473 fl->fl_start = 0;
474 break;
475 case SEEK_CUR:
476 fl->fl_start = filp->f_pos;
477 break;
478 case SEEK_END:
479 fl->fl_start = i_size_read(file_inode(filp));
480 break;
481 default:
482 return -EINVAL;
483 }
484 if (l->l_start > OFFSET_MAX - fl->fl_start)
485 return -EOVERFLOW;
486 fl->fl_start += l->l_start;
487 if (fl->fl_start < 0)
488 return -EINVAL;
489
490
491
492 if (l->l_len > 0) {
493 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
494 return -EOVERFLOW;
495 fl->fl_end = fl->fl_start + (l->l_len - 1);
496
497 } else if (l->l_len < 0) {
498 if (fl->fl_start + l->l_len < 0)
499 return -EINVAL;
500 fl->fl_end = fl->fl_start - 1;
501 fl->fl_start += l->l_len;
502 } else
503 fl->fl_end = OFFSET_MAX;
504
505 fl->fl_owner = current->files;
506 fl->fl_pid = current->tgid;
507 fl->fl_file = filp;
508 fl->fl_flags = FL_POSIX;
509 fl->fl_ops = NULL;
510 fl->fl_lmops = NULL;
511
512 return assign_type(fl, l->l_type);
513}
514
515
516
517
518static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
519 struct flock *l)
520{
521 struct flock64 ll = {
522 .l_type = l->l_type,
523 .l_whence = l->l_whence,
524 .l_start = l->l_start,
525 .l_len = l->l_len,
526 };
527
528 return flock64_to_posix_lock(filp, fl, &ll);
529}
530
531
532static bool
533lease_break_callback(struct file_lock *fl)
534{
535 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
536 return false;
537}
538
539static void
540lease_setup(struct file_lock *fl, void **priv)
541{
542 struct file *filp = fl->fl_file;
543 struct fasync_struct *fa = *priv;
544
545
546
547
548
549
550 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
551 *priv = NULL;
552
553 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
554}
555
556static const struct lock_manager_operations lease_manager_ops = {
557 .lm_break = lease_break_callback,
558 .lm_change = lease_modify,
559 .lm_setup = lease_setup,
560};
561
562
563
564
565static int lease_init(struct file *filp, long type, struct file_lock *fl)
566{
567 if (assign_type(fl, type) != 0)
568 return -EINVAL;
569
570 fl->fl_owner = filp;
571 fl->fl_pid = current->tgid;
572
573 fl->fl_file = filp;
574 fl->fl_flags = FL_LEASE;
575 fl->fl_start = 0;
576 fl->fl_end = OFFSET_MAX;
577 fl->fl_ops = NULL;
578 fl->fl_lmops = &lease_manager_ops;
579 return 0;
580}
581
582
583static struct file_lock *lease_alloc(struct file *filp, long type)
584{
585 struct file_lock *fl = locks_alloc_lock();
586 int error = -ENOMEM;
587
588 if (fl == NULL)
589 return ERR_PTR(error);
590
591 error = lease_init(filp, type, fl);
592 if (error) {
593 locks_free_lock(fl);
594 return ERR_PTR(error);
595 }
596 return fl;
597}
598
599
600
601static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
602{
603 return ((fl1->fl_end >= fl2->fl_start) &&
604 (fl2->fl_end >= fl1->fl_start));
605}
606
607
608
609
610static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
611{
612 return fl1->fl_owner == fl2->fl_owner;
613}
614
615
616static void locks_insert_global_locks(struct file_lock *fl)
617{
618 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
619
620 percpu_rwsem_assert_held(&file_rwsem);
621
622 spin_lock(&fll->lock);
623 fl->fl_link_cpu = smp_processor_id();
624 hlist_add_head(&fl->fl_link, &fll->hlist);
625 spin_unlock(&fll->lock);
626}
627
628
629static void locks_delete_global_locks(struct file_lock *fl)
630{
631 struct file_lock_list_struct *fll;
632
633 percpu_rwsem_assert_held(&file_rwsem);
634
635
636
637
638
639
640 if (hlist_unhashed(&fl->fl_link))
641 return;
642
643 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
644 spin_lock(&fll->lock);
645 hlist_del_init(&fl->fl_link);
646 spin_unlock(&fll->lock);
647}
648
649static unsigned long
650posix_owner_key(struct file_lock *fl)
651{
652 return (unsigned long)fl->fl_owner;
653}
654
655static void locks_insert_global_blocked(struct file_lock *waiter)
656{
657 lockdep_assert_held(&blocked_lock_lock);
658
659 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
660}
661
662static void locks_delete_global_blocked(struct file_lock *waiter)
663{
664 lockdep_assert_held(&blocked_lock_lock);
665
666 hash_del(&waiter->fl_link);
667}
668
669
670
671
672
673
674static void __locks_delete_block(struct file_lock *waiter)
675{
676 locks_delete_global_blocked(waiter);
677 list_del_init(&waiter->fl_blocked_member);
678}
679
680static void __locks_wake_up_blocks(struct file_lock *blocker)
681{
682 while (!list_empty(&blocker->fl_blocked_requests)) {
683 struct file_lock *waiter;
684
685 waiter = list_first_entry(&blocker->fl_blocked_requests,
686 struct file_lock, fl_blocked_member);
687 __locks_delete_block(waiter);
688 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
689 waiter->fl_lmops->lm_notify(waiter);
690 else
691 wake_up(&waiter->fl_wait);
692
693
694
695
696
697
698 smp_store_release(&waiter->fl_blocker, NULL);
699 }
700}
701
702
703
704
705
706
707
708int locks_delete_block(struct file_lock *waiter)
709{
710 int status = -ENOENT;
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733 if (!smp_load_acquire(&waiter->fl_blocker) &&
734 list_empty(&waiter->fl_blocked_requests))
735 return status;
736
737 spin_lock(&blocked_lock_lock);
738 if (waiter->fl_blocker)
739 status = 0;
740 __locks_wake_up_blocks(waiter);
741 __locks_delete_block(waiter);
742
743
744
745
746
747 smp_store_release(&waiter->fl_blocker, NULL);
748 spin_unlock(&blocked_lock_lock);
749 return status;
750}
751EXPORT_SYMBOL(locks_delete_block);
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768static void __locks_insert_block(struct file_lock *blocker,
769 struct file_lock *waiter,
770 bool conflict(struct file_lock *,
771 struct file_lock *))
772{
773 struct file_lock *fl;
774 BUG_ON(!list_empty(&waiter->fl_blocked_member));
775
776new_blocker:
777 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
778 if (conflict(fl, waiter)) {
779 blocker = fl;
780 goto new_blocker;
781 }
782 waiter->fl_blocker = blocker;
783 list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
784 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
785 locks_insert_global_blocked(waiter);
786
787
788
789
790
791 __locks_wake_up_blocks(waiter);
792}
793
794
795static void locks_insert_block(struct file_lock *blocker,
796 struct file_lock *waiter,
797 bool conflict(struct file_lock *,
798 struct file_lock *))
799{
800 spin_lock(&blocked_lock_lock);
801 __locks_insert_block(blocker, waiter, conflict);
802 spin_unlock(&blocked_lock_lock);
803}
804
805
806
807
808
809
810static void locks_wake_up_blocks(struct file_lock *blocker)
811{
812
813
814
815
816
817
818
819 if (list_empty(&blocker->fl_blocked_requests))
820 return;
821
822 spin_lock(&blocked_lock_lock);
823 __locks_wake_up_blocks(blocker);
824 spin_unlock(&blocked_lock_lock);
825}
826
827static void
828locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
829{
830 list_add_tail(&fl->fl_list, before);
831 locks_insert_global_locks(fl);
832}
833
834static void
835locks_unlink_lock_ctx(struct file_lock *fl)
836{
837 locks_delete_global_locks(fl);
838 list_del_init(&fl->fl_list);
839 locks_wake_up_blocks(fl);
840}
841
842static void
843locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
844{
845 locks_unlink_lock_ctx(fl);
846 if (dispose)
847 list_add(&fl->fl_list, dispose);
848 else
849 locks_free_lock(fl);
850}
851
852
853
854
855static bool locks_conflict(struct file_lock *caller_fl,
856 struct file_lock *sys_fl)
857{
858 if (sys_fl->fl_type == F_WRLCK)
859 return true;
860 if (caller_fl->fl_type == F_WRLCK)
861 return true;
862 return false;
863}
864
865
866
867
868static bool posix_locks_conflict(struct file_lock *caller_fl,
869 struct file_lock *sys_fl)
870{
871
872
873
874 if (posix_same_owner(caller_fl, sys_fl))
875 return false;
876
877
878 if (!locks_overlap(caller_fl, sys_fl))
879 return false;
880
881 return locks_conflict(caller_fl, sys_fl);
882}
883
884
885
886
887static bool flock_locks_conflict(struct file_lock *caller_fl,
888 struct file_lock *sys_fl)
889{
890
891
892
893 if (caller_fl->fl_file == sys_fl->fl_file)
894 return false;
895
896 return locks_conflict(caller_fl, sys_fl);
897}
898
899void
900posix_test_lock(struct file *filp, struct file_lock *fl)
901{
902 struct file_lock *cfl;
903 struct file_lock_context *ctx;
904 struct inode *inode = locks_inode(filp);
905 void *owner;
906 void (*func)(void);
907
908 ctx = smp_load_acquire(&inode->i_flctx);
909 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
910 fl->fl_type = F_UNLCK;
911 return;
912 }
913
914retry:
915 spin_lock(&ctx->flc_lock);
916 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
917 if (!posix_locks_conflict(fl, cfl))
918 continue;
919 if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
920 && (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
921 owner = cfl->fl_lmops->lm_mod_owner;
922 func = cfl->fl_lmops->lm_expire_lock;
923 __module_get(owner);
924 spin_unlock(&ctx->flc_lock);
925 (*func)();
926 module_put(owner);
927 goto retry;
928 }
929 locks_copy_conflock(fl, cfl);
930 goto out;
931 }
932 fl->fl_type = F_UNLCK;
933out:
934 spin_unlock(&ctx->flc_lock);
935 return;
936}
937EXPORT_SYMBOL(posix_test_lock);
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972#define MAX_DEADLK_ITERATIONS 10
973
974
975static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
976{
977 struct file_lock *fl;
978
979 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
980 if (posix_same_owner(fl, block_fl)) {
981 while (fl->fl_blocker)
982 fl = fl->fl_blocker;
983 return fl;
984 }
985 }
986 return NULL;
987}
988
989
990static int posix_locks_deadlock(struct file_lock *caller_fl,
991 struct file_lock *block_fl)
992{
993 int i = 0;
994
995 lockdep_assert_held(&blocked_lock_lock);
996
997
998
999
1000
1001 if (IS_OFDLCK(caller_fl))
1002 return 0;
1003
1004 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
1005 if (i++ > MAX_DEADLK_ITERATIONS)
1006 return 0;
1007 if (posix_same_owner(caller_fl, block_fl))
1008 return 1;
1009 }
1010 return 0;
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020static int flock_lock_inode(struct inode *inode, struct file_lock *request)
1021{
1022 struct file_lock *new_fl = NULL;
1023 struct file_lock *fl;
1024 struct file_lock_context *ctx;
1025 int error = 0;
1026 bool found = false;
1027 LIST_HEAD(dispose);
1028
1029 ctx = locks_get_lock_context(inode, request->fl_type);
1030 if (!ctx) {
1031 if (request->fl_type != F_UNLCK)
1032 return -ENOMEM;
1033 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
1034 }
1035
1036 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
1037 new_fl = locks_alloc_lock();
1038 if (!new_fl)
1039 return -ENOMEM;
1040 }
1041
1042 percpu_down_read(&file_rwsem);
1043 spin_lock(&ctx->flc_lock);
1044 if (request->fl_flags & FL_ACCESS)
1045 goto find_conflict;
1046
1047 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1048 if (request->fl_file != fl->fl_file)
1049 continue;
1050 if (request->fl_type == fl->fl_type)
1051 goto out;
1052 found = true;
1053 locks_delete_lock_ctx(fl, &dispose);
1054 break;
1055 }
1056
1057 if (request->fl_type == F_UNLCK) {
1058 if ((request->fl_flags & FL_EXISTS) && !found)
1059 error = -ENOENT;
1060 goto out;
1061 }
1062
1063find_conflict:
1064 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1065 if (!flock_locks_conflict(request, fl))
1066 continue;
1067 error = -EAGAIN;
1068 if (!(request->fl_flags & FL_SLEEP))
1069 goto out;
1070 error = FILE_LOCK_DEFERRED;
1071 locks_insert_block(fl, request, flock_locks_conflict);
1072 goto out;
1073 }
1074 if (request->fl_flags & FL_ACCESS)
1075 goto out;
1076 locks_copy_lock(new_fl, request);
1077 locks_move_blocks(new_fl, request);
1078 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
1079 new_fl = NULL;
1080 error = 0;
1081
1082out:
1083 spin_unlock(&ctx->flc_lock);
1084 percpu_up_read(&file_rwsem);
1085 if (new_fl)
1086 locks_free_lock(new_fl);
1087 locks_dispose_list(&dispose);
1088 trace_flock_lock_inode(inode, request, error);
1089 return error;
1090}
1091
1092static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1093 struct file_lock *conflock)
1094{
1095 struct file_lock *fl, *tmp;
1096 struct file_lock *new_fl = NULL;
1097 struct file_lock *new_fl2 = NULL;
1098 struct file_lock *left = NULL;
1099 struct file_lock *right = NULL;
1100 struct file_lock_context *ctx;
1101 int error;
1102 bool added = false;
1103 LIST_HEAD(dispose);
1104 void *owner;
1105 void (*func)(void);
1106
1107 ctx = locks_get_lock_context(inode, request->fl_type);
1108 if (!ctx)
1109 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1110
1111
1112
1113
1114
1115
1116
1117 if (!(request->fl_flags & FL_ACCESS) &&
1118 (request->fl_type != F_UNLCK ||
1119 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1120 new_fl = locks_alloc_lock();
1121 new_fl2 = locks_alloc_lock();
1122 }
1123
1124retry:
1125 percpu_down_read(&file_rwsem);
1126 spin_lock(&ctx->flc_lock);
1127
1128
1129
1130
1131
1132 if (request->fl_type != F_UNLCK) {
1133 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1134 if (!posix_locks_conflict(request, fl))
1135 continue;
1136 if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1137 && (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1138 owner = fl->fl_lmops->lm_mod_owner;
1139 func = fl->fl_lmops->lm_expire_lock;
1140 __module_get(owner);
1141 spin_unlock(&ctx->flc_lock);
1142 percpu_up_read(&file_rwsem);
1143 (*func)();
1144 module_put(owner);
1145 goto retry;
1146 }
1147 if (conflock)
1148 locks_copy_conflock(conflock, fl);
1149 error = -EAGAIN;
1150 if (!(request->fl_flags & FL_SLEEP))
1151 goto out;
1152
1153
1154
1155
1156 error = -EDEADLK;
1157 spin_lock(&blocked_lock_lock);
1158
1159
1160
1161
1162 __locks_wake_up_blocks(request);
1163 if (likely(!posix_locks_deadlock(request, fl))) {
1164 error = FILE_LOCK_DEFERRED;
1165 __locks_insert_block(fl, request,
1166 posix_locks_conflict);
1167 }
1168 spin_unlock(&blocked_lock_lock);
1169 goto out;
1170 }
1171 }
1172
1173
1174 error = 0;
1175 if (request->fl_flags & FL_ACCESS)
1176 goto out;
1177
1178
1179 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1180 if (posix_same_owner(request, fl))
1181 break;
1182 }
1183
1184
1185 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1186 if (!posix_same_owner(request, fl))
1187 break;
1188
1189
1190 if (request->fl_type == fl->fl_type) {
1191
1192
1193
1194
1195 if (fl->fl_end < request->fl_start - 1)
1196 continue;
1197
1198
1199
1200 if (fl->fl_start - 1 > request->fl_end)
1201 break;
1202
1203
1204
1205
1206
1207
1208 if (fl->fl_start > request->fl_start)
1209 fl->fl_start = request->fl_start;
1210 else
1211 request->fl_start = fl->fl_start;
1212 if (fl->fl_end < request->fl_end)
1213 fl->fl_end = request->fl_end;
1214 else
1215 request->fl_end = fl->fl_end;
1216 if (added) {
1217 locks_delete_lock_ctx(fl, &dispose);
1218 continue;
1219 }
1220 request = fl;
1221 added = true;
1222 } else {
1223
1224
1225
1226 if (fl->fl_end < request->fl_start)
1227 continue;
1228 if (fl->fl_start > request->fl_end)
1229 break;
1230 if (request->fl_type == F_UNLCK)
1231 added = true;
1232 if (fl->fl_start < request->fl_start)
1233 left = fl;
1234
1235
1236
1237 if (fl->fl_end > request->fl_end) {
1238 right = fl;
1239 break;
1240 }
1241 if (fl->fl_start >= request->fl_start) {
1242
1243
1244
1245 if (added) {
1246 locks_delete_lock_ctx(fl, &dispose);
1247 continue;
1248 }
1249
1250
1251
1252
1253
1254
1255
1256 error = -ENOLCK;
1257 if (!new_fl)
1258 goto out;
1259 locks_copy_lock(new_fl, request);
1260 locks_move_blocks(new_fl, request);
1261 request = new_fl;
1262 new_fl = NULL;
1263 locks_insert_lock_ctx(request, &fl->fl_list);
1264 locks_delete_lock_ctx(fl, &dispose);
1265 added = true;
1266 }
1267 }
1268 }
1269
1270
1271
1272
1273
1274
1275 error = -ENOLCK;
1276 if (right && left == right && !new_fl2)
1277 goto out;
1278
1279 error = 0;
1280 if (!added) {
1281 if (request->fl_type == F_UNLCK) {
1282 if (request->fl_flags & FL_EXISTS)
1283 error = -ENOENT;
1284 goto out;
1285 }
1286
1287 if (!new_fl) {
1288 error = -ENOLCK;
1289 goto out;
1290 }
1291 locks_copy_lock(new_fl, request);
1292 locks_move_blocks(new_fl, request);
1293 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1294 fl = new_fl;
1295 new_fl = NULL;
1296 }
1297 if (right) {
1298 if (left == right) {
1299
1300
1301
1302 left = new_fl2;
1303 new_fl2 = NULL;
1304 locks_copy_lock(left, right);
1305 locks_insert_lock_ctx(left, &fl->fl_list);
1306 }
1307 right->fl_start = request->fl_end + 1;
1308 locks_wake_up_blocks(right);
1309 }
1310 if (left) {
1311 left->fl_end = request->fl_start - 1;
1312 locks_wake_up_blocks(left);
1313 }
1314 out:
1315 spin_unlock(&ctx->flc_lock);
1316 percpu_up_read(&file_rwsem);
1317
1318
1319
1320 if (new_fl)
1321 locks_free_lock(new_fl);
1322 if (new_fl2)
1323 locks_free_lock(new_fl2);
1324 locks_dispose_list(&dispose);
1325 trace_posix_lock_inode(inode, request, error);
1326
1327 return error;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344int posix_lock_file(struct file *filp, struct file_lock *fl,
1345 struct file_lock *conflock)
1346{
1347 return posix_lock_inode(locks_inode(filp), fl, conflock);
1348}
1349EXPORT_SYMBOL(posix_lock_file);
1350
1351
1352
1353
1354
1355
1356
1357
1358static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1359{
1360 int error;
1361 might_sleep ();
1362 for (;;) {
1363 error = posix_lock_inode(inode, fl, NULL);
1364 if (error != FILE_LOCK_DEFERRED)
1365 break;
1366 error = wait_event_interruptible(fl->fl_wait,
1367 list_empty(&fl->fl_blocked_member));
1368 if (error)
1369 break;
1370 }
1371 locks_delete_block(fl);
1372 return error;
1373}
1374
1375static void lease_clear_pending(struct file_lock *fl, int arg)
1376{
1377 switch (arg) {
1378 case F_UNLCK:
1379 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1380 fallthrough;
1381 case F_RDLCK:
1382 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1383 }
1384}
1385
1386
1387int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1388{
1389 int error = assign_type(fl, arg);
1390
1391 if (error)
1392 return error;
1393 lease_clear_pending(fl, arg);
1394 locks_wake_up_blocks(fl);
1395 if (arg == F_UNLCK) {
1396 struct file *filp = fl->fl_file;
1397
1398 f_delown(filp);
1399 filp->f_owner.signum = 0;
1400 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1401 if (fl->fl_fasync != NULL) {
1402 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1403 fl->fl_fasync = NULL;
1404 }
1405 locks_delete_lock_ctx(fl, dispose);
1406 }
1407 return 0;
1408}
1409EXPORT_SYMBOL(lease_modify);
1410
1411static bool past_time(unsigned long then)
1412{
1413 if (!then)
1414
1415 return false;
1416 return time_after(jiffies, then);
1417}
1418
1419static void time_out_leases(struct inode *inode, struct list_head *dispose)
1420{
1421 struct file_lock_context *ctx = inode->i_flctx;
1422 struct file_lock *fl, *tmp;
1423
1424 lockdep_assert_held(&ctx->flc_lock);
1425
1426 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1427 trace_time_out_leases(inode, fl);
1428 if (past_time(fl->fl_downgrade_time))
1429 lease_modify(fl, F_RDLCK, dispose);
1430 if (past_time(fl->fl_break_time))
1431 lease_modify(fl, F_UNLCK, dispose);
1432 }
1433}
1434
1435static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1436{
1437 bool rc;
1438
1439 if (lease->fl_lmops->lm_breaker_owns_lease
1440 && lease->fl_lmops->lm_breaker_owns_lease(lease))
1441 return false;
1442 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
1443 rc = false;
1444 goto trace;
1445 }
1446 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) {
1447 rc = false;
1448 goto trace;
1449 }
1450
1451 rc = locks_conflict(breaker, lease);
1452trace:
1453 trace_leases_conflict(rc, lease, breaker);
1454 return rc;
1455}
1456
1457static bool
1458any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1459{
1460 struct file_lock_context *ctx = inode->i_flctx;
1461 struct file_lock *fl;
1462
1463 lockdep_assert_held(&ctx->flc_lock);
1464
1465 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1466 if (leases_conflict(fl, breaker))
1467 return true;
1468 }
1469 return false;
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1486{
1487 int error = 0;
1488 struct file_lock_context *ctx;
1489 struct file_lock *new_fl, *fl, *tmp;
1490 unsigned long break_time;
1491 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1492 LIST_HEAD(dispose);
1493
1494 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1495 if (IS_ERR(new_fl))
1496 return PTR_ERR(new_fl);
1497 new_fl->fl_flags = type;
1498
1499
1500 ctx = smp_load_acquire(&inode->i_flctx);
1501 if (!ctx) {
1502 WARN_ON_ONCE(1);
1503 goto free_lock;
1504 }
1505
1506 percpu_down_read(&file_rwsem);
1507 spin_lock(&ctx->flc_lock);
1508
1509 time_out_leases(inode, &dispose);
1510
1511 if (!any_leases_conflict(inode, new_fl))
1512 goto out;
1513
1514 break_time = 0;
1515 if (lease_break_time > 0) {
1516 break_time = jiffies + lease_break_time * HZ;
1517 if (break_time == 0)
1518 break_time++;
1519 }
1520
1521 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1522 if (!leases_conflict(fl, new_fl))
1523 continue;
1524 if (want_write) {
1525 if (fl->fl_flags & FL_UNLOCK_PENDING)
1526 continue;
1527 fl->fl_flags |= FL_UNLOCK_PENDING;
1528 fl->fl_break_time = break_time;
1529 } else {
1530 if (lease_breaking(fl))
1531 continue;
1532 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1533 fl->fl_downgrade_time = break_time;
1534 }
1535 if (fl->fl_lmops->lm_break(fl))
1536 locks_delete_lock_ctx(fl, &dispose);
1537 }
1538
1539 if (list_empty(&ctx->flc_lease))
1540 goto out;
1541
1542 if (mode & O_NONBLOCK) {
1543 trace_break_lease_noblock(inode, new_fl);
1544 error = -EWOULDBLOCK;
1545 goto out;
1546 }
1547
1548restart:
1549 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1550 break_time = fl->fl_break_time;
1551 if (break_time != 0)
1552 break_time -= jiffies;
1553 if (break_time == 0)
1554 break_time++;
1555 locks_insert_block(fl, new_fl, leases_conflict);
1556 trace_break_lease_block(inode, new_fl);
1557 spin_unlock(&ctx->flc_lock);
1558 percpu_up_read(&file_rwsem);
1559
1560 locks_dispose_list(&dispose);
1561 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1562 list_empty(&new_fl->fl_blocked_member),
1563 break_time);
1564
1565 percpu_down_read(&file_rwsem);
1566 spin_lock(&ctx->flc_lock);
1567 trace_break_lease_unblock(inode, new_fl);
1568 locks_delete_block(new_fl);
1569 if (error >= 0) {
1570
1571
1572
1573
1574 if (error == 0)
1575 time_out_leases(inode, &dispose);
1576 if (any_leases_conflict(inode, new_fl))
1577 goto restart;
1578 error = 0;
1579 }
1580out:
1581 spin_unlock(&ctx->flc_lock);
1582 percpu_up_read(&file_rwsem);
1583 locks_dispose_list(&dispose);
1584free_lock:
1585 locks_free_lock(new_fl);
1586 return error;
1587}
1588EXPORT_SYMBOL(__break_lease);
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1600{
1601 bool has_lease = false;
1602 struct file_lock_context *ctx;
1603 struct file_lock *fl;
1604
1605 ctx = smp_load_acquire(&inode->i_flctx);
1606 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1607 spin_lock(&ctx->flc_lock);
1608 fl = list_first_entry_or_null(&ctx->flc_lease,
1609 struct file_lock, fl_list);
1610 if (fl && (fl->fl_type == F_WRLCK))
1611 has_lease = true;
1612 spin_unlock(&ctx->flc_lock);
1613 }
1614
1615 if (has_lease)
1616 *time = current_time(inode);
1617}
1618EXPORT_SYMBOL(lease_get_mtime);
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643int fcntl_getlease(struct file *filp)
1644{
1645 struct file_lock *fl;
1646 struct inode *inode = locks_inode(filp);
1647 struct file_lock_context *ctx;
1648 int type = F_UNLCK;
1649 LIST_HEAD(dispose);
1650
1651 ctx = smp_load_acquire(&inode->i_flctx);
1652 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1653 percpu_down_read(&file_rwsem);
1654 spin_lock(&ctx->flc_lock);
1655 time_out_leases(inode, &dispose);
1656 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1657 if (fl->fl_file != filp)
1658 continue;
1659 type = target_leasetype(fl);
1660 break;
1661 }
1662 spin_unlock(&ctx->flc_lock);
1663 percpu_up_read(&file_rwsem);
1664
1665 locks_dispose_list(&dispose);
1666 }
1667 return type;
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681static int
1682check_conflicting_open(struct file *filp, const long arg, int flags)
1683{
1684 struct inode *inode = locks_inode(filp);
1685 int self_wcount = 0, self_rcount = 0;
1686
1687 if (flags & FL_LAYOUT)
1688 return 0;
1689 if (flags & FL_DELEG)
1690
1691 return 0;
1692
1693 if (arg == F_RDLCK)
1694 return inode_is_open_for_write(inode) ? -EAGAIN : 0;
1695 else if (arg != F_WRLCK)
1696 return 0;
1697
1698
1699
1700
1701
1702
1703
1704 if (filp->f_mode & FMODE_WRITE)
1705 self_wcount = 1;
1706 else if (filp->f_mode & FMODE_READ)
1707 self_rcount = 1;
1708
1709 if (atomic_read(&inode->i_writecount) != self_wcount ||
1710 atomic_read(&inode->i_readcount) != self_rcount)
1711 return -EAGAIN;
1712
1713 return 0;
1714}
1715
1716static int
1717generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1718{
1719 struct file_lock *fl, *my_fl = NULL, *lease;
1720 struct inode *inode = locks_inode(filp);
1721 struct file_lock_context *ctx;
1722 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1723 int error;
1724 LIST_HEAD(dispose);
1725
1726 lease = *flp;
1727 trace_generic_add_lease(inode, lease);
1728
1729
1730 ctx = locks_get_lock_context(inode, arg);
1731 if (!ctx)
1732 return -ENOMEM;
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742 if (is_deleg && !inode_trylock(inode))
1743 return -EAGAIN;
1744
1745 if (is_deleg && arg == F_WRLCK) {
1746
1747 inode_unlock(inode);
1748 WARN_ON_ONCE(1);
1749 return -EINVAL;
1750 }
1751
1752 percpu_down_read(&file_rwsem);
1753 spin_lock(&ctx->flc_lock);
1754 time_out_leases(inode, &dispose);
1755 error = check_conflicting_open(filp, arg, lease->fl_flags);
1756 if (error)
1757 goto out;
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 error = -EAGAIN;
1768 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1769 if (fl->fl_file == filp &&
1770 fl->fl_owner == lease->fl_owner) {
1771 my_fl = fl;
1772 continue;
1773 }
1774
1775
1776
1777
1778
1779 if (arg == F_WRLCK)
1780 goto out;
1781
1782
1783
1784
1785 if (fl->fl_flags & FL_UNLOCK_PENDING)
1786 goto out;
1787 }
1788
1789 if (my_fl != NULL) {
1790 lease = my_fl;
1791 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1792 if (error)
1793 goto out;
1794 goto out_setup;
1795 }
1796
1797 error = -EINVAL;
1798 if (!leases_enable)
1799 goto out;
1800
1801 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811 smp_mb();
1812 error = check_conflicting_open(filp, arg, lease->fl_flags);
1813 if (error) {
1814 locks_unlink_lock_ctx(lease);
1815 goto out;
1816 }
1817
1818out_setup:
1819 if (lease->fl_lmops->lm_setup)
1820 lease->fl_lmops->lm_setup(lease, priv);
1821out:
1822 spin_unlock(&ctx->flc_lock);
1823 percpu_up_read(&file_rwsem);
1824 locks_dispose_list(&dispose);
1825 if (is_deleg)
1826 inode_unlock(inode);
1827 if (!error && !my_fl)
1828 *flp = NULL;
1829 return error;
1830}
1831
1832static int generic_delete_lease(struct file *filp, void *owner)
1833{
1834 int error = -EAGAIN;
1835 struct file_lock *fl, *victim = NULL;
1836 struct inode *inode = locks_inode(filp);
1837 struct file_lock_context *ctx;
1838 LIST_HEAD(dispose);
1839
1840 ctx = smp_load_acquire(&inode->i_flctx);
1841 if (!ctx) {
1842 trace_generic_delete_lease(inode, NULL);
1843 return error;
1844 }
1845
1846 percpu_down_read(&file_rwsem);
1847 spin_lock(&ctx->flc_lock);
1848 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1849 if (fl->fl_file == filp &&
1850 fl->fl_owner == owner) {
1851 victim = fl;
1852 break;
1853 }
1854 }
1855 trace_generic_delete_lease(inode, victim);
1856 if (victim)
1857 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1858 spin_unlock(&ctx->flc_lock);
1859 percpu_up_read(&file_rwsem);
1860 locks_dispose_list(&dispose);
1861 return error;
1862}
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1876 void **priv)
1877{
1878 struct inode *inode = locks_inode(filp);
1879 int error;
1880
1881 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1882 return -EACCES;
1883 if (!S_ISREG(inode->i_mode))
1884 return -EINVAL;
1885 error = security_file_lock(filp, arg);
1886 if (error)
1887 return error;
1888
1889 switch (arg) {
1890 case F_UNLCK:
1891 return generic_delete_lease(filp, *priv);
1892 case F_RDLCK:
1893 case F_WRLCK:
1894 if (!(*flp)->fl_lmops->lm_break) {
1895 WARN_ON_ONCE(1);
1896 return -ENOLCK;
1897 }
1898
1899 return generic_add_lease(filp, arg, flp, priv);
1900 default:
1901 return -EINVAL;
1902 }
1903}
1904EXPORT_SYMBOL(generic_setlease);
1905
1906#if IS_ENABLED(CONFIG_SRCU)
1907
1908
1909
1910
1911
1912
1913static struct srcu_notifier_head lease_notifier_chain;
1914
1915static inline void
1916lease_notifier_chain_init(void)
1917{
1918 srcu_init_notifier_head(&lease_notifier_chain);
1919}
1920
1921static inline void
1922setlease_notifier(long arg, struct file_lock *lease)
1923{
1924 if (arg != F_UNLCK)
1925 srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
1926}
1927
1928int lease_register_notifier(struct notifier_block *nb)
1929{
1930 return srcu_notifier_chain_register(&lease_notifier_chain, nb);
1931}
1932EXPORT_SYMBOL_GPL(lease_register_notifier);
1933
1934void lease_unregister_notifier(struct notifier_block *nb)
1935{
1936 srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
1937}
1938EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1939
1940#else
1941static inline void
1942lease_notifier_chain_init(void)
1943{
1944}
1945
1946static inline void
1947setlease_notifier(long arg, struct file_lock *lease)
1948{
1949}
1950
1951int lease_register_notifier(struct notifier_block *nb)
1952{
1953 return 0;
1954}
1955EXPORT_SYMBOL_GPL(lease_register_notifier);
1956
1957void lease_unregister_notifier(struct notifier_block *nb)
1958{
1959}
1960EXPORT_SYMBOL_GPL(lease_unregister_notifier);
1961
1962#endif
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981int
1982vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1983{
1984 if (lease)
1985 setlease_notifier(arg, *lease);
1986 if (filp->f_op->setlease)
1987 return filp->f_op->setlease(filp, arg, lease, priv);
1988 else
1989 return generic_setlease(filp, arg, lease, priv);
1990}
1991EXPORT_SYMBOL_GPL(vfs_setlease);
1992
1993static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1994{
1995 struct file_lock *fl;
1996 struct fasync_struct *new;
1997 int error;
1998
1999 fl = lease_alloc(filp, arg);
2000 if (IS_ERR(fl))
2001 return PTR_ERR(fl);
2002
2003 new = fasync_alloc();
2004 if (!new) {
2005 locks_free_lock(fl);
2006 return -ENOMEM;
2007 }
2008 new->fa_fd = fd;
2009
2010 error = vfs_setlease(filp, arg, &fl, (void **)&new);
2011 if (fl)
2012 locks_free_lock(fl);
2013 if (new)
2014 fasync_free(new);
2015 return error;
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
2029{
2030 if (arg == F_UNLCK)
2031 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
2032 return do_fcntl_add_lease(fd, filp, arg);
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2043{
2044 int error;
2045 might_sleep();
2046 for (;;) {
2047 error = flock_lock_inode(inode, fl);
2048 if (error != FILE_LOCK_DEFERRED)
2049 break;
2050 error = wait_event_interruptible(fl->fl_wait,
2051 list_empty(&fl->fl_blocked_member));
2052 if (error)
2053 break;
2054 }
2055 locks_delete_block(fl);
2056 return error;
2057}
2058
2059
2060
2061
2062
2063
2064
2065
2066int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2067{
2068 int res = 0;
2069 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2070 case FL_POSIX:
2071 res = posix_lock_inode_wait(inode, fl);
2072 break;
2073 case FL_FLOCK:
2074 res = flock_lock_inode_wait(inode, fl);
2075 break;
2076 default:
2077 BUG();
2078 }
2079 return res;
2080}
2081EXPORT_SYMBOL(locks_lock_inode_wait);
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
2099{
2100 struct fd f = fdget(fd);
2101 struct file_lock *lock;
2102 int can_sleep, unlock;
2103 int error;
2104
2105 error = -EBADF;
2106 if (!f.file)
2107 goto out;
2108
2109 can_sleep = !(cmd & LOCK_NB);
2110 cmd &= ~LOCK_NB;
2111 unlock = (cmd == LOCK_UN);
2112
2113 if (!unlock && !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2114 goto out_putf;
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 if (cmd & LOCK_MAND) {
2125 pr_warn_once("Attempt to set a LOCK_MAND lock via flock(2). This support has been removed and the request ignored.\n");
2126 error = 0;
2127 goto out_putf;
2128 }
2129
2130 lock = flock_make_lock(f.file, cmd, NULL);
2131 if (IS_ERR(lock)) {
2132 error = PTR_ERR(lock);
2133 goto out_putf;
2134 }
2135
2136 if (can_sleep)
2137 lock->fl_flags |= FL_SLEEP;
2138
2139 error = security_file_lock(f.file, lock->fl_type);
2140 if (error)
2141 goto out_free;
2142
2143 if (f.file->f_op->flock)
2144 error = f.file->f_op->flock(f.file,
2145 (can_sleep) ? F_SETLKW : F_SETLK,
2146 lock);
2147 else
2148 error = locks_lock_file_wait(f.file, lock);
2149
2150 out_free:
2151 locks_free_lock(lock);
2152
2153 out_putf:
2154 fdput(f);
2155 out:
2156 return error;
2157}
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167int vfs_test_lock(struct file *filp, struct file_lock *fl)
2168{
2169 if (filp->f_op->lock)
2170 return filp->f_op->lock(filp, F_GETLK, fl);
2171 posix_test_lock(filp, fl);
2172 return 0;
2173}
2174EXPORT_SYMBOL_GPL(vfs_test_lock);
2175
2176
2177
2178
2179
2180
2181
2182
2183static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2184{
2185 pid_t vnr;
2186 struct pid *pid;
2187
2188 if (IS_OFDLCK(fl))
2189 return -1;
2190 if (IS_REMOTELCK(fl))
2191 return fl->fl_pid;
2192
2193
2194
2195
2196
2197 if (ns == &init_pid_ns)
2198 return (pid_t)fl->fl_pid;
2199
2200 rcu_read_lock();
2201 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2202 vnr = pid_nr_ns(pid, ns);
2203 rcu_read_unlock();
2204 return vnr;
2205}
2206
2207static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2208{
2209 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2210#if BITS_PER_LONG == 32
2211
2212
2213
2214
2215 if (fl->fl_start > OFFT_OFFSET_MAX)
2216 return -EOVERFLOW;
2217 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2218 return -EOVERFLOW;
2219#endif
2220 flock->l_start = fl->fl_start;
2221 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2222 fl->fl_end - fl->fl_start + 1;
2223 flock->l_whence = 0;
2224 flock->l_type = fl->fl_type;
2225 return 0;
2226}
2227
2228#if BITS_PER_LONG == 32
2229static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2230{
2231 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2232 flock->l_start = fl->fl_start;
2233 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2234 fl->fl_end - fl->fl_start + 1;
2235 flock->l_whence = 0;
2236 flock->l_type = fl->fl_type;
2237}
2238#endif
2239
2240
2241
2242
2243int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2244{
2245 struct file_lock *fl;
2246 int error;
2247
2248 fl = locks_alloc_lock();
2249 if (fl == NULL)
2250 return -ENOMEM;
2251 error = -EINVAL;
2252 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2253 goto out;
2254
2255 error = flock_to_posix_lock(filp, fl, flock);
2256 if (error)
2257 goto out;
2258
2259 if (cmd == F_OFD_GETLK) {
2260 error = -EINVAL;
2261 if (flock->l_pid != 0)
2262 goto out;
2263
2264 fl->fl_flags |= FL_OFDLCK;
2265 fl->fl_owner = filp;
2266 }
2267
2268 error = vfs_test_lock(filp, fl);
2269 if (error)
2270 goto out;
2271
2272 flock->l_type = fl->fl_type;
2273 if (fl->fl_type != F_UNLCK) {
2274 error = posix_lock_to_flock(flock, fl);
2275 if (error)
2276 goto out;
2277 }
2278out:
2279 locks_free_lock(fl);
2280 return error;
2281}
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2317{
2318 if (filp->f_op->lock)
2319 return filp->f_op->lock(filp, cmd, fl);
2320 else
2321 return posix_lock_file(filp, fl, conf);
2322}
2323EXPORT_SYMBOL_GPL(vfs_lock_file);
2324
2325static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2326 struct file_lock *fl)
2327{
2328 int error;
2329
2330 error = security_file_lock(filp, fl->fl_type);
2331 if (error)
2332 return error;
2333
2334 for (;;) {
2335 error = vfs_lock_file(filp, cmd, fl, NULL);
2336 if (error != FILE_LOCK_DEFERRED)
2337 break;
2338 error = wait_event_interruptible(fl->fl_wait,
2339 list_empty(&fl->fl_blocked_member));
2340 if (error)
2341 break;
2342 }
2343 locks_delete_block(fl);
2344
2345 return error;
2346}
2347
2348
2349static int
2350check_fmode_for_setlk(struct file_lock *fl)
2351{
2352 switch (fl->fl_type) {
2353 case F_RDLCK:
2354 if (!(fl->fl_file->f_mode & FMODE_READ))
2355 return -EBADF;
2356 break;
2357 case F_WRLCK:
2358 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2359 return -EBADF;
2360 }
2361 return 0;
2362}
2363
2364
2365
2366
2367int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2368 struct flock *flock)
2369{
2370 struct file_lock *file_lock = locks_alloc_lock();
2371 struct inode *inode = locks_inode(filp);
2372 struct file *f;
2373 int error;
2374
2375 if (file_lock == NULL)
2376 return -ENOLCK;
2377
2378 error = flock_to_posix_lock(filp, file_lock, flock);
2379 if (error)
2380 goto out;
2381
2382 error = check_fmode_for_setlk(file_lock);
2383 if (error)
2384 goto out;
2385
2386
2387
2388
2389
2390 switch (cmd) {
2391 case F_OFD_SETLK:
2392 error = -EINVAL;
2393 if (flock->l_pid != 0)
2394 goto out;
2395
2396 cmd = F_SETLK;
2397 file_lock->fl_flags |= FL_OFDLCK;
2398 file_lock->fl_owner = filp;
2399 break;
2400 case F_OFD_SETLKW:
2401 error = -EINVAL;
2402 if (flock->l_pid != 0)
2403 goto out;
2404
2405 cmd = F_SETLKW;
2406 file_lock->fl_flags |= FL_OFDLCK;
2407 file_lock->fl_owner = filp;
2408 fallthrough;
2409 case F_SETLKW:
2410 file_lock->fl_flags |= FL_SLEEP;
2411 }
2412
2413 error = do_lock_file_wait(filp, cmd, file_lock);
2414
2415
2416
2417
2418
2419
2420 if (!error && file_lock->fl_type != F_UNLCK &&
2421 !(file_lock->fl_flags & FL_OFDLCK)) {
2422 struct files_struct *files = current->files;
2423
2424
2425
2426
2427
2428 spin_lock(&files->file_lock);
2429 f = files_lookup_fd_locked(files, fd);
2430 spin_unlock(&files->file_lock);
2431 if (f != filp) {
2432 file_lock->fl_type = F_UNLCK;
2433 error = do_lock_file_wait(filp, cmd, file_lock);
2434 WARN_ON_ONCE(error);
2435 error = -EBADF;
2436 }
2437 }
2438out:
2439 trace_fcntl_setlk(inode, file_lock, error);
2440 locks_free_lock(file_lock);
2441 return error;
2442}
2443
2444#if BITS_PER_LONG == 32
2445
2446
2447
2448int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2449{
2450 struct file_lock *fl;
2451 int error;
2452
2453 fl = locks_alloc_lock();
2454 if (fl == NULL)
2455 return -ENOMEM;
2456
2457 error = -EINVAL;
2458 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2459 goto out;
2460
2461 error = flock64_to_posix_lock(filp, fl, flock);
2462 if (error)
2463 goto out;
2464
2465 if (cmd == F_OFD_GETLK) {
2466 error = -EINVAL;
2467 if (flock->l_pid != 0)
2468 goto out;
2469
2470 cmd = F_GETLK64;
2471 fl->fl_flags |= FL_OFDLCK;
2472 fl->fl_owner = filp;
2473 }
2474
2475 error = vfs_test_lock(filp, fl);
2476 if (error)
2477 goto out;
2478
2479 flock->l_type = fl->fl_type;
2480 if (fl->fl_type != F_UNLCK)
2481 posix_lock_to_flock64(flock, fl);
2482
2483out:
2484 locks_free_lock(fl);
2485 return error;
2486}
2487
2488
2489
2490
2491int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2492 struct flock64 *flock)
2493{
2494 struct file_lock *file_lock = locks_alloc_lock();
2495 struct file *f;
2496 int error;
2497
2498 if (file_lock == NULL)
2499 return -ENOLCK;
2500
2501 error = flock64_to_posix_lock(filp, file_lock, flock);
2502 if (error)
2503 goto out;
2504
2505 error = check_fmode_for_setlk(file_lock);
2506 if (error)
2507 goto out;
2508
2509
2510
2511
2512
2513 switch (cmd) {
2514 case F_OFD_SETLK:
2515 error = -EINVAL;
2516 if (flock->l_pid != 0)
2517 goto out;
2518
2519 cmd = F_SETLK64;
2520 file_lock->fl_flags |= FL_OFDLCK;
2521 file_lock->fl_owner = filp;
2522 break;
2523 case F_OFD_SETLKW:
2524 error = -EINVAL;
2525 if (flock->l_pid != 0)
2526 goto out;
2527
2528 cmd = F_SETLKW64;
2529 file_lock->fl_flags |= FL_OFDLCK;
2530 file_lock->fl_owner = filp;
2531 fallthrough;
2532 case F_SETLKW64:
2533 file_lock->fl_flags |= FL_SLEEP;
2534 }
2535
2536 error = do_lock_file_wait(filp, cmd, file_lock);
2537
2538
2539
2540
2541
2542
2543 if (!error && file_lock->fl_type != F_UNLCK &&
2544 !(file_lock->fl_flags & FL_OFDLCK)) {
2545 struct files_struct *files = current->files;
2546
2547
2548
2549
2550
2551 spin_lock(&files->file_lock);
2552 f = files_lookup_fd_locked(files, fd);
2553 spin_unlock(&files->file_lock);
2554 if (f != filp) {
2555 file_lock->fl_type = F_UNLCK;
2556 error = do_lock_file_wait(filp, cmd, file_lock);
2557 WARN_ON_ONCE(error);
2558 error = -EBADF;
2559 }
2560 }
2561out:
2562 locks_free_lock(file_lock);
2563 return error;
2564}
2565#endif
2566
2567
2568
2569
2570
2571
2572void locks_remove_posix(struct file *filp, fl_owner_t owner)
2573{
2574 int error;
2575 struct inode *inode = locks_inode(filp);
2576 struct file_lock lock;
2577 struct file_lock_context *ctx;
2578
2579
2580
2581
2582
2583
2584 ctx = smp_load_acquire(&inode->i_flctx);
2585 if (!ctx || list_empty(&ctx->flc_posix))
2586 return;
2587
2588 locks_init_lock(&lock);
2589 lock.fl_type = F_UNLCK;
2590 lock.fl_flags = FL_POSIX | FL_CLOSE;
2591 lock.fl_start = 0;
2592 lock.fl_end = OFFSET_MAX;
2593 lock.fl_owner = owner;
2594 lock.fl_pid = current->tgid;
2595 lock.fl_file = filp;
2596 lock.fl_ops = NULL;
2597 lock.fl_lmops = NULL;
2598
2599 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2600
2601 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2602 lock.fl_ops->fl_release_private(&lock);
2603 trace_locks_remove_posix(inode, &lock, error);
2604}
2605EXPORT_SYMBOL(locks_remove_posix);
2606
2607
2608static void
2609locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2610{
2611 struct file_lock fl;
2612 struct inode *inode = locks_inode(filp);
2613
2614 if (list_empty(&flctx->flc_flock))
2615 return;
2616
2617 flock_make_lock(filp, LOCK_UN, &fl);
2618 fl.fl_flags |= FL_CLOSE;
2619
2620 if (filp->f_op->flock)
2621 filp->f_op->flock(filp, F_SETLKW, &fl);
2622 else
2623 flock_lock_inode(inode, &fl);
2624
2625 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2626 fl.fl_ops->fl_release_private(&fl);
2627}
2628
2629
2630static void
2631locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2632{
2633 struct file_lock *fl, *tmp;
2634 LIST_HEAD(dispose);
2635
2636 if (list_empty(&ctx->flc_lease))
2637 return;
2638
2639 percpu_down_read(&file_rwsem);
2640 spin_lock(&ctx->flc_lock);
2641 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2642 if (filp == fl->fl_file)
2643 lease_modify(fl, F_UNLCK, &dispose);
2644 spin_unlock(&ctx->flc_lock);
2645 percpu_up_read(&file_rwsem);
2646
2647 locks_dispose_list(&dispose);
2648}
2649
2650
2651
2652
2653void locks_remove_file(struct file *filp)
2654{
2655 struct file_lock_context *ctx;
2656
2657 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2658 if (!ctx)
2659 return;
2660
2661
2662 locks_remove_posix(filp, filp);
2663
2664
2665 locks_remove_flock(filp, ctx);
2666
2667
2668 locks_remove_lease(filp, ctx);
2669
2670 spin_lock(&ctx->flc_lock);
2671 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2672 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2673 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2674 spin_unlock(&ctx->flc_lock);
2675}
2676
2677
2678
2679
2680
2681
2682
2683
2684int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2685{
2686 if (filp->f_op->lock)
2687 return filp->f_op->lock(filp, F_CANCELLK, fl);
2688 return 0;
2689}
2690EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2691
2692#ifdef CONFIG_PROC_FS
2693#include <linux/proc_fs.h>
2694#include <linux/seq_file.h>
2695
2696struct locks_iterator {
2697 int li_cpu;
2698 loff_t li_pos;
2699};
2700
2701static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2702 loff_t id, char *pfx, int repeat)
2703{
2704 struct inode *inode = NULL;
2705 unsigned int fl_pid;
2706 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2707 int type;
2708
2709 fl_pid = locks_translate_pid(fl, proc_pidns);
2710
2711
2712
2713
2714
2715
2716 if (fl->fl_file != NULL)
2717 inode = locks_inode(fl->fl_file);
2718
2719 seq_printf(f, "%lld: ", id);
2720
2721 if (repeat)
2722 seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx);
2723
2724 if (IS_POSIX(fl)) {
2725 if (fl->fl_flags & FL_ACCESS)
2726 seq_puts(f, "ACCESS");
2727 else if (IS_OFDLCK(fl))
2728 seq_puts(f, "OFDLCK");
2729 else
2730 seq_puts(f, "POSIX ");
2731
2732 seq_printf(f, " %s ",
2733 (inode == NULL) ? "*NOINODE*" : "ADVISORY ");
2734 } else if (IS_FLOCK(fl)) {
2735 seq_puts(f, "FLOCK ADVISORY ");
2736 } else if (IS_LEASE(fl)) {
2737 if (fl->fl_flags & FL_DELEG)
2738 seq_puts(f, "DELEG ");
2739 else
2740 seq_puts(f, "LEASE ");
2741
2742 if (lease_breaking(fl))
2743 seq_puts(f, "BREAKING ");
2744 else if (fl->fl_file)
2745 seq_puts(f, "ACTIVE ");
2746 else
2747 seq_puts(f, "BREAKER ");
2748 } else {
2749 seq_puts(f, "UNKNOWN UNKNOWN ");
2750 }
2751 type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2752
2753 seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
2754 (type == F_RDLCK) ? "READ" : "UNLCK");
2755 if (inode) {
2756
2757 seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
2758 MAJOR(inode->i_sb->s_dev),
2759 MINOR(inode->i_sb->s_dev), inode->i_ino);
2760 } else {
2761 seq_printf(f, "%d <none>:0 ", fl_pid);
2762 }
2763 if (IS_POSIX(fl)) {
2764 if (fl->fl_end == OFFSET_MAX)
2765 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2766 else
2767 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2768 } else {
2769 seq_puts(f, "0 EOF\n");
2770 }
2771}
2772
2773static struct file_lock *get_next_blocked_member(struct file_lock *node)
2774{
2775 struct file_lock *tmp;
2776
2777
2778 if (node == NULL || node->fl_blocker == NULL)
2779 return NULL;
2780
2781
2782 tmp = list_next_entry(node, fl_blocked_member);
2783 if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member)
2784 || tmp == node) {
2785 return NULL;
2786 }
2787
2788 return tmp;
2789}
2790
2791static int locks_show(struct seq_file *f, void *v)
2792{
2793 struct locks_iterator *iter = f->private;
2794 struct file_lock *cur, *tmp;
2795 struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
2796 int level = 0;
2797
2798 cur = hlist_entry(v, struct file_lock, fl_link);
2799
2800 if (locks_translate_pid(cur, proc_pidns) == 0)
2801 return 0;
2802
2803
2804
2805
2806
2807
2808 while (cur != NULL) {
2809 if (level)
2810 lock_get_status(f, cur, iter->li_pos, "-> ", level);
2811 else
2812 lock_get_status(f, cur, iter->li_pos, "", level);
2813
2814 if (!list_empty(&cur->fl_blocked_requests)) {
2815
2816 cur = list_first_entry_or_null(&cur->fl_blocked_requests,
2817 struct file_lock, fl_blocked_member);
2818 level++;
2819 } else {
2820
2821 tmp = get_next_blocked_member(cur);
2822
2823 while (tmp == NULL && cur->fl_blocker != NULL) {
2824 cur = cur->fl_blocker;
2825 level--;
2826 tmp = get_next_blocked_member(cur);
2827 }
2828 cur = tmp;
2829 }
2830 }
2831
2832 return 0;
2833}
2834
2835static void __show_fd_locks(struct seq_file *f,
2836 struct list_head *head, int *id,
2837 struct file *filp, struct files_struct *files)
2838{
2839 struct file_lock *fl;
2840
2841 list_for_each_entry(fl, head, fl_list) {
2842
2843 if (filp != fl->fl_file)
2844 continue;
2845 if (fl->fl_owner != files &&
2846 fl->fl_owner != filp)
2847 continue;
2848
2849 (*id)++;
2850 seq_puts(f, "lock:\t");
2851 lock_get_status(f, fl, *id, "", 0);
2852 }
2853}
2854
2855void show_fd_locks(struct seq_file *f,
2856 struct file *filp, struct files_struct *files)
2857{
2858 struct inode *inode = locks_inode(filp);
2859 struct file_lock_context *ctx;
2860 int id = 0;
2861
2862 ctx = smp_load_acquire(&inode->i_flctx);
2863 if (!ctx)
2864 return;
2865
2866 spin_lock(&ctx->flc_lock);
2867 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2868 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2869 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2870 spin_unlock(&ctx->flc_lock);
2871}
2872
2873static void *locks_start(struct seq_file *f, loff_t *pos)
2874 __acquires(&blocked_lock_lock)
2875{
2876 struct locks_iterator *iter = f->private;
2877
2878 iter->li_pos = *pos + 1;
2879 percpu_down_write(&file_rwsem);
2880 spin_lock(&blocked_lock_lock);
2881 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2882}
2883
2884static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2885{
2886 struct locks_iterator *iter = f->private;
2887
2888 ++iter->li_pos;
2889 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2890}
2891
2892static void locks_stop(struct seq_file *f, void *v)
2893 __releases(&blocked_lock_lock)
2894{
2895 spin_unlock(&blocked_lock_lock);
2896 percpu_up_write(&file_rwsem);
2897}
2898
2899static const struct seq_operations locks_seq_operations = {
2900 .start = locks_start,
2901 .next = locks_next,
2902 .stop = locks_stop,
2903 .show = locks_show,
2904};
2905
2906static int __init proc_locks_init(void)
2907{
2908 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2909 sizeof(struct locks_iterator), NULL);
2910 return 0;
2911}
2912fs_initcall(proc_locks_init);
2913#endif
2914
2915static int __init filelock_init(void)
2916{
2917 int i;
2918
2919 flctx_cache = kmem_cache_create("file_lock_ctx",
2920 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2921
2922 filelock_cache = kmem_cache_create("file_lock_cache",
2923 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2924
2925 for_each_possible_cpu(i) {
2926 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2927
2928 spin_lock_init(&fll->lock);
2929 INIT_HLIST_HEAD(&fll->hlist);
2930 }
2931
2932 lease_notifier_chain_init();
2933 return 0;
2934}
2935core_initcall(filelock_init);
2936