1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/security.h>
123#include <linux/slab.h>
124#include <linux/syscalls.h>
125#include <linux/time.h>
126#include <linux/rcupdate.h>
127#include <linux/pid_namespace.h>
128#include <linux/hashtable.h>
129#include <linux/percpu.h>
130
131#define CREATE_TRACE_POINTS
132#include <trace/events/filelock.h>
133
134#include <asm/uaccess.h>
135
136#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
137#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
138#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
139#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
140
141static inline bool is_remote_lock(struct file *filp)
142{
143 return likely(!(filp->f_path.dentry->d_sb->s_flags & MS_NOREMOTELOCK));
144}
145
146static bool lease_breaking(struct file_lock *fl)
147{
148 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
149}
150
151static int target_leasetype(struct file_lock *fl)
152{
153 if (fl->fl_flags & FL_UNLOCK_PENDING)
154 return F_UNLCK;
155 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
156 return F_RDLCK;
157 return fl->fl_type;
158}
159
160int leases_enable = 1;
161int lease_break_time = 45;
162
163
164
165
166
167
168
169
170
171struct file_lock_list_struct {
172 spinlock_t lock;
173 struct hlist_head hlist;
174};
175static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
176DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
177
178
179
180
181
182
183
184
185
186
187
188
189#define BLOCKED_HASH_BITS 7
190static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static DEFINE_SPINLOCK(blocked_lock_lock);
210
211static struct kmem_cache *flctx_cache __read_mostly;
212static struct kmem_cache *filelock_cache __read_mostly;
213
214static struct file_lock_context *
215locks_get_lock_context(struct inode *inode, int type)
216{
217 struct file_lock_context *ctx;
218
219
220 ctx = smp_load_acquire(&inode->i_flctx);
221 if (likely(ctx) || type == F_UNLCK)
222 goto out;
223
224 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
225 if (!ctx)
226 goto out;
227
228 spin_lock_init(&ctx->flc_lock);
229 INIT_LIST_HEAD(&ctx->flc_flock);
230 INIT_LIST_HEAD(&ctx->flc_posix);
231 INIT_LIST_HEAD(&ctx->flc_lease);
232
233
234
235
236
237 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
238 kmem_cache_free(flctx_cache, ctx);
239 ctx = smp_load_acquire(&inode->i_flctx);
240 }
241out:
242 trace_locks_get_lock_context(inode, type, ctx);
243 return ctx;
244}
245
246static void
247locks_dump_ctx_list(struct list_head *list, char *list_type)
248{
249 struct file_lock *fl;
250
251 list_for_each_entry(fl, list, fl_list) {
252 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
253 }
254}
255
256static void
257locks_check_ctx_lists(struct inode *inode)
258{
259 struct file_lock_context *ctx = inode->i_flctx;
260
261 if (unlikely(!list_empty(&ctx->flc_flock) ||
262 !list_empty(&ctx->flc_posix) ||
263 !list_empty(&ctx->flc_lease))) {
264 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
265 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
266 inode->i_ino);
267 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
268 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
269 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
270 }
271}
272
273void
274locks_free_lock_context(struct inode *inode)
275{
276 struct file_lock_context *ctx = inode->i_flctx;
277
278 if (unlikely(ctx)) {
279 locks_check_ctx_lists(inode);
280 kmem_cache_free(flctx_cache, ctx);
281 }
282}
283
284static void locks_init_lock_heads(struct file_lock *fl)
285{
286 INIT_HLIST_NODE(&fl->fl_link);
287 INIT_LIST_HEAD(&fl->fl_list);
288 INIT_LIST_HEAD(&fl->fl_block);
289 init_waitqueue_head(&fl->fl_wait);
290}
291
292
293struct file_lock *locks_alloc_lock(void)
294{
295 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
296
297 if (fl)
298 locks_init_lock_heads(fl);
299
300 return fl;
301}
302EXPORT_SYMBOL_GPL(locks_alloc_lock);
303
304void locks_release_private(struct file_lock *fl)
305{
306 if (fl->fl_ops) {
307 if (fl->fl_ops->fl_release_private)
308 fl->fl_ops->fl_release_private(fl);
309 fl->fl_ops = NULL;
310 }
311
312 if (fl->fl_lmops) {
313 if (fl->fl_lmops->lm_put_owner) {
314 fl->fl_lmops->lm_put_owner(fl->fl_owner);
315 fl->fl_owner = NULL;
316 }
317 fl->fl_lmops = NULL;
318 }
319}
320EXPORT_SYMBOL_GPL(locks_release_private);
321
322
323void locks_free_lock(struct file_lock *fl)
324{
325 BUG_ON(waitqueue_active(&fl->fl_wait));
326 BUG_ON(!list_empty(&fl->fl_list));
327 BUG_ON(!list_empty(&fl->fl_block));
328 BUG_ON(!hlist_unhashed(&fl->fl_link));
329
330 locks_release_private(fl);
331 kmem_cache_free(filelock_cache, fl);
332}
333EXPORT_SYMBOL(locks_free_lock);
334
335static void
336locks_dispose_list(struct list_head *dispose)
337{
338 struct file_lock *fl;
339
340 while (!list_empty(dispose)) {
341 fl = list_first_entry(dispose, struct file_lock, fl_list);
342 list_del_init(&fl->fl_list);
343 locks_free_lock(fl);
344 }
345}
346
347void locks_init_lock(struct file_lock *fl)
348{
349 memset(fl, 0, sizeof(struct file_lock));
350 locks_init_lock_heads(fl);
351}
352
353EXPORT_SYMBOL(locks_init_lock);
354
355
356
357
358void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
359{
360 new->fl_owner = fl->fl_owner;
361 new->fl_pid = fl->fl_pid;
362 new->fl_file = NULL;
363 new->fl_flags = fl->fl_flags;
364 new->fl_type = fl->fl_type;
365 new->fl_start = fl->fl_start;
366 new->fl_end = fl->fl_end;
367 new->fl_lmops = fl->fl_lmops;
368 new->fl_ops = NULL;
369
370 if (fl->fl_lmops) {
371 if (fl->fl_lmops->lm_get_owner)
372 fl->fl_lmops->lm_get_owner(fl->fl_owner);
373 }
374}
375EXPORT_SYMBOL(locks_copy_conflock);
376
377void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
378{
379
380 WARN_ON_ONCE(new->fl_ops);
381
382 locks_copy_conflock(new, fl);
383
384 new->fl_file = fl->fl_file;
385 new->fl_ops = fl->fl_ops;
386
387 if (fl->fl_ops) {
388 if (fl->fl_ops->fl_copy_lock)
389 fl->fl_ops->fl_copy_lock(new, fl);
390 }
391}
392
393EXPORT_SYMBOL(locks_copy_lock);
394
395static inline int flock_translate_cmd(int cmd) {
396 if (cmd & LOCK_MAND)
397 return cmd & (LOCK_MAND | LOCK_RW);
398 switch (cmd) {
399 case LOCK_SH:
400 return F_RDLCK;
401 case LOCK_EX:
402 return F_WRLCK;
403 case LOCK_UN:
404 return F_UNLCK;
405 }
406 return -EINVAL;
407}
408
409
410static struct file_lock *
411flock_make_lock(struct file *filp, unsigned int cmd)
412{
413 struct file_lock *fl;
414 int type = flock_translate_cmd(cmd);
415
416 if (type < 0)
417 return ERR_PTR(type);
418
419 fl = locks_alloc_lock();
420 if (fl == NULL)
421 return ERR_PTR(-ENOMEM);
422
423 fl->fl_file = filp;
424 fl->fl_owner = filp;
425 fl->fl_pid = current->tgid;
426 fl->fl_flags = FL_FLOCK;
427 fl->fl_type = type;
428 fl->fl_end = OFFSET_MAX;
429
430 return fl;
431}
432
433static int assign_type(struct file_lock *fl, long type)
434{
435 switch (type) {
436 case F_RDLCK:
437 case F_WRLCK:
438 case F_UNLCK:
439 fl->fl_type = type;
440 break;
441 default:
442 return -EINVAL;
443 }
444 return 0;
445}
446
447static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
448 struct flock64 *l)
449{
450 switch (l->l_whence) {
451 case SEEK_SET:
452 fl->fl_start = 0;
453 break;
454 case SEEK_CUR:
455 fl->fl_start = filp->f_pos;
456 break;
457 case SEEK_END:
458 fl->fl_start = i_size_read(file_inode(filp));
459 break;
460 default:
461 return -EINVAL;
462 }
463 if (l->l_start > OFFSET_MAX - fl->fl_start)
464 return -EOVERFLOW;
465 fl->fl_start += l->l_start;
466 if (fl->fl_start < 0)
467 return -EINVAL;
468
469
470
471 if (l->l_len > 0) {
472 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
473 return -EOVERFLOW;
474 fl->fl_end = fl->fl_start + l->l_len - 1;
475
476 } else if (l->l_len < 0) {
477 if (fl->fl_start + l->l_len < 0)
478 return -EINVAL;
479 fl->fl_end = fl->fl_start - 1;
480 fl->fl_start += l->l_len;
481 } else
482 fl->fl_end = OFFSET_MAX;
483
484 fl->fl_owner = current->files;
485 fl->fl_pid = current->tgid;
486 fl->fl_file = filp;
487 fl->fl_flags = FL_POSIX;
488 fl->fl_ops = NULL;
489 fl->fl_lmops = NULL;
490
491 return assign_type(fl, l->l_type);
492}
493
494
495
496
497static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
498 struct flock *l)
499{
500 struct flock64 ll = {
501 .l_type = l->l_type,
502 .l_whence = l->l_whence,
503 .l_start = l->l_start,
504 .l_len = l->l_len,
505 };
506
507 return flock64_to_posix_lock(filp, fl, &ll);
508}
509
510
511static bool
512lease_break_callback(struct file_lock *fl)
513{
514 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
515 return false;
516}
517
518static void
519lease_setup(struct file_lock *fl, void **priv)
520{
521 struct file *filp = fl->fl_file;
522 struct fasync_struct *fa = *priv;
523
524
525
526
527
528
529 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
530 *priv = NULL;
531
532 __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
533}
534
535static const struct lock_manager_operations lease_manager_ops = {
536 .lm_break = lease_break_callback,
537 .lm_change = lease_modify,
538 .lm_setup = lease_setup,
539};
540
541
542
543
544static int lease_init(struct file *filp, long type, struct file_lock *fl)
545 {
546 if (assign_type(fl, type) != 0)
547 return -EINVAL;
548
549 fl->fl_owner = filp;
550 fl->fl_pid = current->tgid;
551
552 fl->fl_file = filp;
553 fl->fl_flags = FL_LEASE;
554 fl->fl_start = 0;
555 fl->fl_end = OFFSET_MAX;
556 fl->fl_ops = NULL;
557 fl->fl_lmops = &lease_manager_ops;
558 return 0;
559}
560
561
562static struct file_lock *lease_alloc(struct file *filp, long type)
563{
564 struct file_lock *fl = locks_alloc_lock();
565 int error = -ENOMEM;
566
567 if (fl == NULL)
568 return ERR_PTR(error);
569
570 error = lease_init(filp, type, fl);
571 if (error) {
572 locks_free_lock(fl);
573 return ERR_PTR(error);
574 }
575 return fl;
576}
577
578
579
580static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
581{
582 return ((fl1->fl_end >= fl2->fl_start) &&
583 (fl2->fl_end >= fl1->fl_start));
584}
585
586
587
588
589static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
590{
591 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
592 return fl2->fl_lmops == fl1->fl_lmops &&
593 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
594 return fl1->fl_owner == fl2->fl_owner;
595}
596
597
598static void locks_insert_global_locks(struct file_lock *fl)
599{
600 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
601
602 percpu_rwsem_assert_held(&file_rwsem);
603
604 spin_lock(&fll->lock);
605 fl->fl_link_cpu = smp_processor_id();
606 hlist_add_head(&fl->fl_link, &fll->hlist);
607 spin_unlock(&fll->lock);
608}
609
610
611static void locks_delete_global_locks(struct file_lock *fl)
612{
613 struct file_lock_list_struct *fll;
614
615 percpu_rwsem_assert_held(&file_rwsem);
616
617
618
619
620
621
622 if (hlist_unhashed(&fl->fl_link))
623 return;
624
625 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
626 spin_lock(&fll->lock);
627 hlist_del_init(&fl->fl_link);
628 spin_unlock(&fll->lock);
629}
630
631static unsigned long
632posix_owner_key(struct file_lock *fl)
633{
634 if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
635 return fl->fl_lmops->lm_owner_key(fl);
636 return (unsigned long)fl->fl_owner;
637}
638
639static void locks_insert_global_blocked(struct file_lock *waiter)
640{
641 lockdep_assert_held(&blocked_lock_lock);
642
643 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
644}
645
646static void locks_delete_global_blocked(struct file_lock *waiter)
647{
648 lockdep_assert_held(&blocked_lock_lock);
649
650 hash_del(&waiter->fl_link);
651}
652
653
654
655
656
657
658static void __locks_delete_block(struct file_lock *waiter)
659{
660 locks_delete_global_blocked(waiter);
661 list_del_init(&waiter->fl_block);
662 waiter->fl_next = NULL;
663}
664
665static void locks_delete_block(struct file_lock *waiter)
666{
667 spin_lock(&blocked_lock_lock);
668 __locks_delete_block(waiter);
669 spin_unlock(&blocked_lock_lock);
670}
671
672
673
674
675
676
677
678
679
680
681
682static void __locks_insert_block(struct file_lock *blocker,
683 struct file_lock *waiter)
684{
685 BUG_ON(!list_empty(&waiter->fl_block));
686 waiter->fl_next = blocker;
687 list_add_tail(&waiter->fl_block, &blocker->fl_block);
688 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
689 locks_insert_global_blocked(waiter);
690}
691
692
693static void locks_insert_block(struct file_lock *blocker,
694 struct file_lock *waiter)
695{
696 spin_lock(&blocked_lock_lock);
697 __locks_insert_block(blocker, waiter);
698 spin_unlock(&blocked_lock_lock);
699}
700
701
702
703
704
705
706static void locks_wake_up_blocks(struct file_lock *blocker)
707{
708
709
710
711
712
713
714
715 if (list_empty(&blocker->fl_block))
716 return;
717
718 spin_lock(&blocked_lock_lock);
719 while (!list_empty(&blocker->fl_block)) {
720 struct file_lock *waiter;
721
722 waiter = list_first_entry(&blocker->fl_block,
723 struct file_lock, fl_block);
724 __locks_delete_block(waiter);
725 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
726 waiter->fl_lmops->lm_notify(waiter);
727 else
728 wake_up(&waiter->fl_wait);
729 }
730 spin_unlock(&blocked_lock_lock);
731}
732
733static void
734locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
735{
736 fl->fl_nspid = get_pid(task_tgid(current));
737 list_add_tail(&fl->fl_list, before);
738 locks_insert_global_locks(fl);
739}
740
741static void
742locks_unlink_lock_ctx(struct file_lock *fl)
743{
744 locks_delete_global_locks(fl);
745 list_del_init(&fl->fl_list);
746 if (fl->fl_nspid) {
747 put_pid(fl->fl_nspid);
748 fl->fl_nspid = NULL;
749 }
750 locks_wake_up_blocks(fl);
751}
752
753static void
754locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
755{
756 locks_unlink_lock_ctx(fl);
757 if (dispose)
758 list_add(&fl->fl_list, dispose);
759 else
760 locks_free_lock(fl);
761}
762
763
764
765
766static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
767{
768 if (sys_fl->fl_type == F_WRLCK)
769 return 1;
770 if (caller_fl->fl_type == F_WRLCK)
771 return 1;
772 return 0;
773}
774
775
776
777
778static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
779{
780
781
782
783 if (posix_same_owner(caller_fl, sys_fl))
784 return (0);
785
786
787 if (!locks_overlap(caller_fl, sys_fl))
788 return 0;
789
790 return (locks_conflict(caller_fl, sys_fl));
791}
792
793
794
795
796static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
797{
798
799
800
801 if (caller_fl->fl_file == sys_fl->fl_file)
802 return (0);
803 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
804 return 0;
805
806 return (locks_conflict(caller_fl, sys_fl));
807}
808
809void
810posix_test_lock(struct file *filp, struct file_lock *fl)
811{
812 struct file_lock *cfl;
813 struct file_lock_context *ctx;
814 struct inode *inode = locks_inode(filp);
815
816 ctx = smp_load_acquire(&inode->i_flctx);
817 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
818 fl->fl_type = F_UNLCK;
819 return;
820 }
821
822 spin_lock(&ctx->flc_lock);
823 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
824 if (posix_locks_conflict(fl, cfl)) {
825 locks_copy_conflock(fl, cfl);
826 if (cfl->fl_nspid)
827 fl->fl_pid = pid_vnr(cfl->fl_nspid);
828 goto out;
829 }
830 }
831 fl->fl_type = F_UNLCK;
832out:
833 spin_unlock(&ctx->flc_lock);
834 return;
835}
836EXPORT_SYMBOL(posix_test_lock);
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871#define MAX_DEADLK_ITERATIONS 10
872
873
874static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
875{
876 struct file_lock *fl;
877
878 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
879 if (posix_same_owner(fl, block_fl))
880 return fl->fl_next;
881 }
882 return NULL;
883}
884
885
886static int posix_locks_deadlock(struct file_lock *caller_fl,
887 struct file_lock *block_fl)
888{
889 int i = 0;
890
891 lockdep_assert_held(&blocked_lock_lock);
892
893
894
895
896
897 if (IS_OFDLCK(caller_fl))
898 return 0;
899
900 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
901 if (i++ > MAX_DEADLK_ITERATIONS)
902 return 0;
903 if (posix_same_owner(caller_fl, block_fl))
904 return 1;
905 }
906 return 0;
907}
908
909
910
911
912
913
914
915
916static int flock_lock_inode(struct inode *inode, struct file_lock *request)
917{
918 struct file_lock *new_fl = NULL;
919 struct file_lock *fl;
920 struct file_lock_context *ctx;
921 int error = 0;
922 bool found = false;
923 LIST_HEAD(dispose);
924
925 ctx = locks_get_lock_context(inode, request->fl_type);
926 if (!ctx) {
927 if (request->fl_type != F_UNLCK)
928 return -ENOMEM;
929 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
930 }
931
932 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
933 new_fl = locks_alloc_lock();
934 if (!new_fl)
935 return -ENOMEM;
936 }
937
938 percpu_down_read_preempt_disable(&file_rwsem);
939 spin_lock(&ctx->flc_lock);
940 if (request->fl_flags & FL_ACCESS)
941 goto find_conflict;
942
943 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
944 if (request->fl_file != fl->fl_file)
945 continue;
946 if (request->fl_type == fl->fl_type)
947 goto out;
948 found = true;
949 locks_delete_lock_ctx(fl, &dispose);
950 break;
951 }
952
953 if (request->fl_type == F_UNLCK) {
954 if ((request->fl_flags & FL_EXISTS) && !found)
955 error = -ENOENT;
956 goto out;
957 }
958
959find_conflict:
960 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
961 if (!flock_locks_conflict(request, fl))
962 continue;
963 error = -EAGAIN;
964 if (!(request->fl_flags & FL_SLEEP))
965 goto out;
966 error = FILE_LOCK_DEFERRED;
967 locks_insert_block(fl, request);
968 goto out;
969 }
970 if (request->fl_flags & FL_ACCESS)
971 goto out;
972 locks_copy_lock(new_fl, request);
973 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
974 new_fl = NULL;
975 error = 0;
976
977out:
978 spin_unlock(&ctx->flc_lock);
979 percpu_up_read_preempt_enable(&file_rwsem);
980 if (new_fl)
981 locks_free_lock(new_fl);
982 locks_dispose_list(&dispose);
983 return error;
984}
985
986static int posix_lock_inode(struct inode *inode, struct file_lock *request,
987 struct file_lock *conflock)
988{
989 struct file_lock *fl, *tmp;
990 struct file_lock *new_fl = NULL;
991 struct file_lock *new_fl2 = NULL;
992 struct file_lock *left = NULL;
993 struct file_lock *right = NULL;
994 struct file_lock_context *ctx;
995 int error;
996 bool added = false;
997 LIST_HEAD(dispose);
998
999 ctx = locks_get_lock_context(inode, request->fl_type);
1000 if (!ctx)
1001 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1002
1003
1004
1005
1006
1007
1008
1009 if (!(request->fl_flags & FL_ACCESS) &&
1010 (request->fl_type != F_UNLCK ||
1011 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1012 new_fl = locks_alloc_lock();
1013 new_fl2 = locks_alloc_lock();
1014 }
1015
1016 percpu_down_read_preempt_disable(&file_rwsem);
1017 spin_lock(&ctx->flc_lock);
1018
1019
1020
1021
1022
1023 if (request->fl_type != F_UNLCK) {
1024 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1025 if (!posix_locks_conflict(request, fl))
1026 continue;
1027 if (conflock)
1028 locks_copy_conflock(conflock, fl);
1029 error = -EAGAIN;
1030 if (!(request->fl_flags & FL_SLEEP))
1031 goto out;
1032
1033
1034
1035
1036 error = -EDEADLK;
1037 spin_lock(&blocked_lock_lock);
1038 if (likely(!posix_locks_deadlock(request, fl))) {
1039 error = FILE_LOCK_DEFERRED;
1040 __locks_insert_block(fl, request);
1041 }
1042 spin_unlock(&blocked_lock_lock);
1043 goto out;
1044 }
1045 }
1046
1047
1048 error = 0;
1049 if (request->fl_flags & FL_ACCESS)
1050 goto out;
1051
1052
1053 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1054 if (posix_same_owner(request, fl))
1055 break;
1056 }
1057
1058
1059 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1060 if (!posix_same_owner(request, fl))
1061 break;
1062
1063
1064 if (request->fl_type == fl->fl_type) {
1065
1066
1067
1068
1069 if (fl->fl_end < request->fl_start - 1)
1070 continue;
1071
1072
1073
1074 if (fl->fl_start - 1 > request->fl_end)
1075 break;
1076
1077
1078
1079
1080
1081
1082 if (fl->fl_start > request->fl_start)
1083 fl->fl_start = request->fl_start;
1084 else
1085 request->fl_start = fl->fl_start;
1086 if (fl->fl_end < request->fl_end)
1087 fl->fl_end = request->fl_end;
1088 else
1089 request->fl_end = fl->fl_end;
1090 if (added) {
1091 locks_delete_lock_ctx(fl, &dispose);
1092 continue;
1093 }
1094 request = fl;
1095 added = true;
1096 } else {
1097
1098
1099
1100 if (fl->fl_end < request->fl_start)
1101 continue;
1102 if (fl->fl_start > request->fl_end)
1103 break;
1104 if (request->fl_type == F_UNLCK)
1105 added = true;
1106 if (fl->fl_start < request->fl_start)
1107 left = fl;
1108
1109
1110
1111 if (fl->fl_end > request->fl_end) {
1112 right = fl;
1113 break;
1114 }
1115 if (fl->fl_start >= request->fl_start) {
1116
1117
1118
1119 if (added) {
1120 locks_delete_lock_ctx(fl, &dispose);
1121 continue;
1122 }
1123
1124
1125
1126
1127
1128
1129
1130 error = -ENOLCK;
1131 if (!new_fl)
1132 goto out;
1133 locks_copy_lock(new_fl, request);
1134 request = new_fl;
1135 new_fl = NULL;
1136 locks_insert_lock_ctx(request, &fl->fl_list);
1137 locks_delete_lock_ctx(fl, &dispose);
1138 added = true;
1139 }
1140 }
1141 }
1142
1143
1144
1145
1146
1147
1148 error = -ENOLCK;
1149 if (right && left == right && !new_fl2)
1150 goto out;
1151
1152 error = 0;
1153 if (!added) {
1154 if (request->fl_type == F_UNLCK) {
1155 if (request->fl_flags & FL_EXISTS)
1156 error = -ENOENT;
1157 goto out;
1158 }
1159
1160 if (!new_fl) {
1161 error = -ENOLCK;
1162 goto out;
1163 }
1164 locks_copy_lock(new_fl, request);
1165 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1166 fl = new_fl;
1167 new_fl = NULL;
1168 }
1169 if (right) {
1170 if (left == right) {
1171
1172
1173
1174 left = new_fl2;
1175 new_fl2 = NULL;
1176 locks_copy_lock(left, right);
1177 locks_insert_lock_ctx(left, &fl->fl_list);
1178 }
1179 right->fl_start = request->fl_end + 1;
1180 locks_wake_up_blocks(right);
1181 }
1182 if (left) {
1183 left->fl_end = request->fl_start - 1;
1184 locks_wake_up_blocks(left);
1185 }
1186 out:
1187 spin_unlock(&ctx->flc_lock);
1188 percpu_up_read_preempt_enable(&file_rwsem);
1189
1190
1191
1192 if (new_fl)
1193 locks_free_lock(new_fl);
1194 if (new_fl2)
1195 locks_free_lock(new_fl2);
1196 locks_dispose_list(&dispose);
1197 trace_posix_lock_inode(inode, request, error);
1198
1199 return error;
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216int posix_lock_file(struct file *filp, struct file_lock *fl,
1217 struct file_lock *conflock)
1218{
1219 return posix_lock_inode(locks_inode(filp), fl, conflock);
1220}
1221EXPORT_SYMBOL(posix_lock_file);
1222
1223
1224
1225
1226
1227
1228
1229
1230static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1231{
1232 int error;
1233 might_sleep ();
1234 for (;;) {
1235 error = posix_lock_inode(inode, fl, NULL);
1236 if (error != FILE_LOCK_DEFERRED)
1237 break;
1238 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1239 if (!error)
1240 continue;
1241
1242 locks_delete_block(fl);
1243 break;
1244 }
1245 return error;
1246}
1247
1248#ifdef CONFIG_MANDATORY_FILE_LOCKING
1249
1250
1251
1252
1253
1254
1255
1256int locks_mandatory_locked(struct file *file)
1257{
1258 int ret;
1259 struct inode *inode = locks_inode(file);
1260 struct file_lock_context *ctx;
1261 struct file_lock *fl;
1262
1263 ctx = smp_load_acquire(&inode->i_flctx);
1264 if (!ctx || list_empty_careful(&ctx->flc_posix))
1265 return 0;
1266
1267
1268
1269
1270 spin_lock(&ctx->flc_lock);
1271 ret = 0;
1272 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1273 if (fl->fl_owner != current->files &&
1274 fl->fl_owner != file) {
1275 ret = -EAGAIN;
1276 break;
1277 }
1278 }
1279 spin_unlock(&ctx->flc_lock);
1280 return ret;
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1294 loff_t end, unsigned char type)
1295{
1296 struct file_lock fl;
1297 int error;
1298 bool sleep = false;
1299
1300 locks_init_lock(&fl);
1301 fl.fl_pid = current->tgid;
1302 fl.fl_file = filp;
1303 fl.fl_flags = FL_POSIX | FL_ACCESS;
1304 if (filp && !(filp->f_flags & O_NONBLOCK))
1305 sleep = true;
1306 fl.fl_type = type;
1307 fl.fl_start = start;
1308 fl.fl_end = end;
1309
1310 for (;;) {
1311 if (filp) {
1312 fl.fl_owner = filp;
1313 fl.fl_flags &= ~FL_SLEEP;
1314 error = posix_lock_inode(inode, &fl, NULL);
1315 if (!error)
1316 break;
1317 }
1318
1319 if (sleep)
1320 fl.fl_flags |= FL_SLEEP;
1321 fl.fl_owner = current->files;
1322 error = posix_lock_inode(inode, &fl, NULL);
1323 if (error != FILE_LOCK_DEFERRED)
1324 break;
1325 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1326 if (!error) {
1327
1328
1329
1330
1331 if (__mandatory_lock(inode))
1332 continue;
1333 }
1334
1335 locks_delete_block(&fl);
1336 break;
1337 }
1338
1339 return error;
1340}
1341
1342EXPORT_SYMBOL(locks_mandatory_area);
1343#endif
1344
1345static void lease_clear_pending(struct file_lock *fl, int arg)
1346{
1347 switch (arg) {
1348 case F_UNLCK:
1349 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1350
1351 case F_RDLCK:
1352 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1353 }
1354}
1355
1356
1357int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1358{
1359 int error = assign_type(fl, arg);
1360
1361 if (error)
1362 return error;
1363 lease_clear_pending(fl, arg);
1364 locks_wake_up_blocks(fl);
1365 if (arg == F_UNLCK) {
1366 struct file *filp = fl->fl_file;
1367
1368 f_delown(filp);
1369 filp->f_owner.signum = 0;
1370 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1371 if (fl->fl_fasync != NULL) {
1372 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1373 fl->fl_fasync = NULL;
1374 }
1375 locks_delete_lock_ctx(fl, dispose);
1376 }
1377 return 0;
1378}
1379EXPORT_SYMBOL(lease_modify);
1380
1381static bool past_time(unsigned long then)
1382{
1383 if (!then)
1384
1385 return false;
1386 return time_after(jiffies, then);
1387}
1388
1389static void time_out_leases(struct inode *inode, struct list_head *dispose)
1390{
1391 struct file_lock_context *ctx = inode->i_flctx;
1392 struct file_lock *fl, *tmp;
1393
1394 lockdep_assert_held(&ctx->flc_lock);
1395
1396 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1397 trace_time_out_leases(inode, fl);
1398 if (past_time(fl->fl_downgrade_time))
1399 lease_modify(fl, F_RDLCK, dispose);
1400 if (past_time(fl->fl_break_time))
1401 lease_modify(fl, F_UNLCK, dispose);
1402 }
1403}
1404
1405static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1406{
1407 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1408 return false;
1409 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1410 return false;
1411 return locks_conflict(breaker, lease);
1412}
1413
1414static bool
1415any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1416{
1417 struct file_lock_context *ctx = inode->i_flctx;
1418 struct file_lock *fl;
1419
1420 lockdep_assert_held(&ctx->flc_lock);
1421
1422 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1423 if (leases_conflict(fl, breaker))
1424 return true;
1425 }
1426 return false;
1427}
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1443{
1444 int error = 0;
1445 struct file_lock_context *ctx;
1446 struct file_lock *new_fl, *fl, *tmp;
1447 unsigned long break_time;
1448 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1449 LIST_HEAD(dispose);
1450
1451 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1452 if (IS_ERR(new_fl))
1453 return PTR_ERR(new_fl);
1454 new_fl->fl_flags = type;
1455
1456
1457 ctx = smp_load_acquire(&inode->i_flctx);
1458 if (!ctx) {
1459 WARN_ON_ONCE(1);
1460 return error;
1461 }
1462
1463 percpu_down_read_preempt_disable(&file_rwsem);
1464 spin_lock(&ctx->flc_lock);
1465
1466 time_out_leases(inode, &dispose);
1467
1468 if (!any_leases_conflict(inode, new_fl))
1469 goto out;
1470
1471 break_time = 0;
1472 if (lease_break_time > 0) {
1473 break_time = jiffies + lease_break_time * HZ;
1474 if (break_time == 0)
1475 break_time++;
1476 }
1477
1478 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1479 if (!leases_conflict(fl, new_fl))
1480 continue;
1481 if (want_write) {
1482 if (fl->fl_flags & FL_UNLOCK_PENDING)
1483 continue;
1484 fl->fl_flags |= FL_UNLOCK_PENDING;
1485 fl->fl_break_time = break_time;
1486 } else {
1487 if (lease_breaking(fl))
1488 continue;
1489 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1490 fl->fl_downgrade_time = break_time;
1491 }
1492 if (fl->fl_lmops->lm_break(fl))
1493 locks_delete_lock_ctx(fl, &dispose);
1494 }
1495
1496 if (list_empty(&ctx->flc_lease))
1497 goto out;
1498
1499 if (mode & O_NONBLOCK) {
1500 trace_break_lease_noblock(inode, new_fl);
1501 error = -EWOULDBLOCK;
1502 goto out;
1503 }
1504
1505restart:
1506 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1507 break_time = fl->fl_break_time;
1508 if (break_time != 0)
1509 break_time -= jiffies;
1510 if (break_time == 0)
1511 break_time++;
1512 locks_insert_block(fl, new_fl);
1513 trace_break_lease_block(inode, new_fl);
1514 spin_unlock(&ctx->flc_lock);
1515 percpu_up_read_preempt_enable(&file_rwsem);
1516
1517 locks_dispose_list(&dispose);
1518 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1519 !new_fl->fl_next, break_time);
1520
1521 percpu_down_read_preempt_disable(&file_rwsem);
1522 spin_lock(&ctx->flc_lock);
1523 trace_break_lease_unblock(inode, new_fl);
1524 locks_delete_block(new_fl);
1525 if (error >= 0) {
1526
1527
1528
1529
1530 if (error == 0)
1531 time_out_leases(inode, &dispose);
1532 if (any_leases_conflict(inode, new_fl))
1533 goto restart;
1534 error = 0;
1535 }
1536out:
1537 spin_unlock(&ctx->flc_lock);
1538 percpu_up_read_preempt_enable(&file_rwsem);
1539 locks_dispose_list(&dispose);
1540 locks_free_lock(new_fl);
1541 return error;
1542}
1543
1544EXPORT_SYMBOL(__break_lease);
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555void lease_get_mtime(struct inode *inode, struct timespec *time)
1556{
1557 bool has_lease = false;
1558 struct file_lock_context *ctx;
1559 struct file_lock *fl;
1560
1561 ctx = smp_load_acquire(&inode->i_flctx);
1562 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1563 spin_lock(&ctx->flc_lock);
1564 fl = list_first_entry_or_null(&ctx->flc_lease,
1565 struct file_lock, fl_list);
1566 if (fl && (fl->fl_type == F_WRLCK))
1567 has_lease = true;
1568 spin_unlock(&ctx->flc_lock);
1569 }
1570
1571 if (has_lease)
1572 *time = current_time(inode);
1573 else
1574 *time = inode->i_mtime;
1575}
1576
1577EXPORT_SYMBOL(lease_get_mtime);
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602int fcntl_getlease(struct file *filp)
1603{
1604 struct file_lock *fl;
1605 struct inode *inode = locks_inode(filp);
1606 struct file_lock_context *ctx;
1607 int type = F_UNLCK;
1608 LIST_HEAD(dispose);
1609
1610 ctx = smp_load_acquire(&inode->i_flctx);
1611 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1612 percpu_down_read_preempt_disable(&file_rwsem);
1613 spin_lock(&ctx->flc_lock);
1614 time_out_leases(inode, &dispose);
1615 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1616 if (fl->fl_file != filp)
1617 continue;
1618 type = target_leasetype(fl);
1619 break;
1620 }
1621 spin_unlock(&ctx->flc_lock);
1622 percpu_up_read_preempt_enable(&file_rwsem);
1623
1624 locks_dispose_list(&dispose);
1625 }
1626 return type;
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640static int
1641check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1642{
1643 int ret = 0;
1644 struct inode *inode = dentry->d_inode;
1645
1646 if (flags & FL_LAYOUT)
1647 return 0;
1648
1649 if ((arg == F_RDLCK) &&
1650 (atomic_read(&d_real_inode(dentry)->i_writecount) > 0))
1651 return -EAGAIN;
1652
1653 if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1654 (atomic_read(&inode->i_count) > 1)))
1655 ret = -EAGAIN;
1656
1657 return ret;
1658}
1659
1660static int
1661generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1662{
1663 struct file_lock *fl, *my_fl = NULL, *lease;
1664 struct dentry *dentry = filp->f_path.dentry;
1665 struct inode *inode = dentry->d_inode;
1666 struct file_lock_context *ctx;
1667 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1668 int error;
1669 LIST_HEAD(dispose);
1670
1671 lease = *flp;
1672 trace_generic_add_lease(inode, lease);
1673
1674
1675 ctx = locks_get_lock_context(inode, arg);
1676 if (!ctx)
1677 return -ENOMEM;
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687 if (is_deleg && !inode_trylock(inode))
1688 return -EAGAIN;
1689
1690 if (is_deleg && arg == F_WRLCK) {
1691
1692 inode_unlock(inode);
1693 WARN_ON_ONCE(1);
1694 return -EINVAL;
1695 }
1696
1697 percpu_down_read_preempt_disable(&file_rwsem);
1698 spin_lock(&ctx->flc_lock);
1699 time_out_leases(inode, &dispose);
1700 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1701 if (error)
1702 goto out;
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 error = -EAGAIN;
1713 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1714 if (fl->fl_file == filp &&
1715 fl->fl_owner == lease->fl_owner) {
1716 my_fl = fl;
1717 continue;
1718 }
1719
1720
1721
1722
1723
1724 if (arg == F_WRLCK)
1725 goto out;
1726
1727
1728
1729
1730 if (fl->fl_flags & FL_UNLOCK_PENDING)
1731 goto out;
1732 }
1733
1734 if (my_fl != NULL) {
1735 lease = my_fl;
1736 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1737 if (error)
1738 goto out;
1739 goto out_setup;
1740 }
1741
1742 error = -EINVAL;
1743 if (!leases_enable)
1744 goto out;
1745
1746 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756 smp_mb();
1757 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1758 if (error) {
1759 locks_unlink_lock_ctx(lease);
1760 goto out;
1761 }
1762
1763out_setup:
1764 if (lease->fl_lmops->lm_setup)
1765 lease->fl_lmops->lm_setup(lease, priv);
1766out:
1767 spin_unlock(&ctx->flc_lock);
1768 percpu_up_read_preempt_enable(&file_rwsem);
1769 locks_dispose_list(&dispose);
1770 if (is_deleg)
1771 inode_unlock(inode);
1772 if (!error && !my_fl)
1773 *flp = NULL;
1774 return error;
1775}
1776
1777static int generic_delete_lease(struct file *filp, void *owner)
1778{
1779 int error = -EAGAIN;
1780 struct file_lock *fl, *victim = NULL;
1781 struct inode *inode = locks_inode(filp);
1782 struct file_lock_context *ctx;
1783 LIST_HEAD(dispose);
1784
1785 ctx = smp_load_acquire(&inode->i_flctx);
1786 if (!ctx) {
1787 trace_generic_delete_lease(inode, NULL);
1788 return error;
1789 }
1790
1791 percpu_down_read_preempt_disable(&file_rwsem);
1792 spin_lock(&ctx->flc_lock);
1793 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1794 if (fl->fl_file == filp &&
1795 fl->fl_owner == owner) {
1796 victim = fl;
1797 break;
1798 }
1799 }
1800 trace_generic_delete_lease(inode, victim);
1801 if (victim)
1802 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1803 spin_unlock(&ctx->flc_lock);
1804 percpu_up_read_preempt_enable(&file_rwsem);
1805 locks_dispose_list(&dispose);
1806 return error;
1807}
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1821 void **priv)
1822{
1823 struct inode *inode = locks_inode(filp);
1824 int error;
1825
1826 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1827 return -EACCES;
1828 if (!S_ISREG(inode->i_mode))
1829 return -EINVAL;
1830 error = security_file_lock(filp, arg);
1831 if (error)
1832 return error;
1833
1834 switch (arg) {
1835 case F_UNLCK:
1836 return generic_delete_lease(filp, *priv);
1837 case F_RDLCK:
1838 case F_WRLCK:
1839 if (!(*flp)->fl_lmops->lm_break) {
1840 WARN_ON_ONCE(1);
1841 return -ENOLCK;
1842 }
1843
1844 return generic_add_lease(filp, arg, flp, priv);
1845 default:
1846 return -EINVAL;
1847 }
1848}
1849EXPORT_SYMBOL(generic_setlease);
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868int
1869vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1870{
1871 if (filp->f_op->setlease && is_remote_lock(filp))
1872 return filp->f_op->setlease(filp, arg, lease, priv);
1873 else
1874 return generic_setlease(filp, arg, lease, priv);
1875}
1876EXPORT_SYMBOL_GPL(vfs_setlease);
1877
1878static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1879{
1880 struct file_lock *fl;
1881 struct fasync_struct *new;
1882 int error;
1883
1884 fl = lease_alloc(filp, arg);
1885 if (IS_ERR(fl))
1886 return PTR_ERR(fl);
1887
1888 new = fasync_alloc();
1889 if (!new) {
1890 locks_free_lock(fl);
1891 return -ENOMEM;
1892 }
1893 new->fa_fd = fd;
1894
1895 error = vfs_setlease(filp, arg, &fl, (void **)&new);
1896 if (fl)
1897 locks_free_lock(fl);
1898 if (new)
1899 fasync_free(new);
1900 return error;
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1914{
1915 if (arg == F_UNLCK)
1916 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1917 return do_fcntl_add_lease(fd, filp, arg);
1918}
1919
1920
1921
1922
1923
1924
1925
1926
1927static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1928{
1929 int error;
1930 might_sleep();
1931 for (;;) {
1932 error = flock_lock_inode(inode, fl);
1933 if (error != FILE_LOCK_DEFERRED)
1934 break;
1935 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1936 if (!error)
1937 continue;
1938
1939 locks_delete_block(fl);
1940 break;
1941 }
1942 return error;
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1953{
1954 int res = 0;
1955 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
1956 case FL_POSIX:
1957 res = posix_lock_inode_wait(inode, fl);
1958 break;
1959 case FL_FLOCK:
1960 res = flock_lock_inode_wait(inode, fl);
1961 break;
1962 default:
1963 BUG();
1964 }
1965 return res;
1966}
1967EXPORT_SYMBOL(locks_lock_inode_wait);
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1989{
1990 struct fd f = fdget(fd);
1991 struct file_lock *lock;
1992 int can_sleep, unlock;
1993 int error;
1994
1995 error = -EBADF;
1996 if (!f.file)
1997 goto out;
1998
1999 can_sleep = !(cmd & LOCK_NB);
2000 cmd &= ~LOCK_NB;
2001 unlock = (cmd == LOCK_UN);
2002
2003 if (!unlock && !(cmd & LOCK_MAND) &&
2004 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2005 goto out_putf;
2006
2007 lock = flock_make_lock(f.file, cmd);
2008 if (IS_ERR(lock)) {
2009 error = PTR_ERR(lock);
2010 goto out_putf;
2011 }
2012
2013 if (can_sleep)
2014 lock->fl_flags |= FL_SLEEP;
2015
2016 error = security_file_lock(f.file, lock->fl_type);
2017 if (error)
2018 goto out_free;
2019
2020 if (f.file->f_op->flock && is_remote_lock(f.file))
2021 error = f.file->f_op->flock(f.file,
2022 (can_sleep) ? F_SETLKW : F_SETLK,
2023 lock);
2024 else
2025 error = locks_lock_file_wait(f.file, lock);
2026
2027 out_free:
2028 locks_free_lock(lock);
2029
2030 out_putf:
2031 fdput(f);
2032 out:
2033 return error;
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044int vfs_test_lock(struct file *filp, struct file_lock *fl)
2045{
2046 if (filp->f_op->lock && is_remote_lock(filp))
2047 return filp->f_op->lock(filp, F_GETLK, fl);
2048 posix_test_lock(filp, fl);
2049 return 0;
2050}
2051EXPORT_SYMBOL_GPL(vfs_test_lock);
2052
2053static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2054{
2055 flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
2056#if BITS_PER_LONG == 32
2057
2058
2059
2060
2061 if (fl->fl_start > OFFT_OFFSET_MAX)
2062 return -EOVERFLOW;
2063 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2064 return -EOVERFLOW;
2065#endif
2066 flock->l_start = fl->fl_start;
2067 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2068 fl->fl_end - fl->fl_start + 1;
2069 flock->l_whence = 0;
2070 flock->l_type = fl->fl_type;
2071 return 0;
2072}
2073
2074#if BITS_PER_LONG == 32
2075static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2076{
2077 flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid;
2078 flock->l_start = fl->fl_start;
2079 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2080 fl->fl_end - fl->fl_start + 1;
2081 flock->l_whence = 0;
2082 flock->l_type = fl->fl_type;
2083}
2084#endif
2085
2086
2087
2088
2089int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
2090{
2091 struct file_lock file_lock;
2092 struct flock flock;
2093 int error;
2094
2095 error = -EFAULT;
2096 if (copy_from_user(&flock, l, sizeof(flock)))
2097 goto out;
2098 error = -EINVAL;
2099 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2100 goto out;
2101
2102 error = flock_to_posix_lock(filp, &file_lock, &flock);
2103 if (error)
2104 goto out;
2105
2106 if (cmd == F_OFD_GETLK) {
2107 error = -EINVAL;
2108 if (flock.l_pid != 0)
2109 goto out;
2110
2111 cmd = F_GETLK;
2112 file_lock.fl_flags |= FL_OFDLCK;
2113 file_lock.fl_owner = filp;
2114 }
2115
2116 error = vfs_test_lock(filp, &file_lock);
2117 if (error)
2118 goto out;
2119
2120 flock.l_type = file_lock.fl_type;
2121 if (file_lock.fl_type != F_UNLCK) {
2122 error = posix_lock_to_flock(&flock, &file_lock);
2123 if (error)
2124 goto rel_priv;
2125 }
2126 error = -EFAULT;
2127 if (!copy_to_user(l, &flock, sizeof(flock)))
2128 error = 0;
2129rel_priv:
2130 locks_release_private(&file_lock);
2131out:
2132 return error;
2133}
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2169{
2170 if (filp->f_op->lock && is_remote_lock(filp))
2171 return filp->f_op->lock(filp, cmd, fl);
2172 else
2173 return posix_lock_file(filp, fl, conf);
2174}
2175EXPORT_SYMBOL_GPL(vfs_lock_file);
2176
2177static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2178 struct file_lock *fl)
2179{
2180 int error;
2181
2182 error = security_file_lock(filp, fl->fl_type);
2183 if (error)
2184 return error;
2185
2186 for (;;) {
2187 error = vfs_lock_file(filp, cmd, fl, NULL);
2188 if (error != FILE_LOCK_DEFERRED)
2189 break;
2190 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
2191 if (!error)
2192 continue;
2193
2194 locks_delete_block(fl);
2195 break;
2196 }
2197
2198 return error;
2199}
2200
2201
2202static int
2203check_fmode_for_setlk(struct file_lock *fl)
2204{
2205 switch (fl->fl_type) {
2206 case F_RDLCK:
2207 if (!(fl->fl_file->f_mode & FMODE_READ))
2208 return -EBADF;
2209 break;
2210 case F_WRLCK:
2211 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2212 return -EBADF;
2213 }
2214 return 0;
2215}
2216
2217
2218
2219
2220int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2221 struct flock __user *l)
2222{
2223 struct file_lock *file_lock = locks_alloc_lock();
2224 struct flock flock;
2225 struct inode *inode;
2226 struct file *f;
2227 int error;
2228
2229 if (file_lock == NULL)
2230 return -ENOLCK;
2231
2232 inode = locks_inode(filp);
2233
2234
2235
2236
2237 error = -EFAULT;
2238 if (copy_from_user(&flock, l, sizeof(flock)))
2239 goto out;
2240
2241
2242
2243
2244 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2245 error = -EAGAIN;
2246 goto out;
2247 }
2248
2249 error = flock_to_posix_lock(filp, file_lock, &flock);
2250 if (error)
2251 goto out;
2252
2253 error = check_fmode_for_setlk(file_lock);
2254 if (error)
2255 goto out;
2256
2257
2258
2259
2260
2261 switch (cmd) {
2262 case F_OFD_SETLK:
2263 error = -EINVAL;
2264 if (flock.l_pid != 0)
2265 goto out;
2266
2267 cmd = F_SETLK;
2268 file_lock->fl_flags |= FL_OFDLCK;
2269 file_lock->fl_owner = filp;
2270 break;
2271 case F_OFD_SETLKW:
2272 error = -EINVAL;
2273 if (flock.l_pid != 0)
2274 goto out;
2275
2276 cmd = F_SETLKW;
2277 file_lock->fl_flags |= FL_OFDLCK;
2278 file_lock->fl_owner = filp;
2279
2280 case F_SETLKW:
2281 file_lock->fl_flags |= FL_SLEEP;
2282 }
2283
2284 error = do_lock_file_wait(filp, cmd, file_lock);
2285
2286
2287
2288
2289
2290
2291 if (!error && file_lock->fl_type != F_UNLCK &&
2292 !(file_lock->fl_flags & FL_OFDLCK)) {
2293
2294
2295
2296
2297
2298 spin_lock(¤t->files->file_lock);
2299 f = fcheck(fd);
2300 spin_unlock(¤t->files->file_lock);
2301 if (f != filp) {
2302 file_lock->fl_type = F_UNLCK;
2303 error = do_lock_file_wait(filp, cmd, file_lock);
2304 WARN_ON_ONCE(error);
2305 error = -EBADF;
2306 }
2307 }
2308out:
2309 trace_fcntl_setlk(inode, file_lock, error);
2310 locks_free_lock(file_lock);
2311 return error;
2312}
2313
2314#if BITS_PER_LONG == 32
2315
2316
2317
2318int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
2319{
2320 struct file_lock file_lock;
2321 struct flock64 flock;
2322 int error;
2323
2324 error = -EFAULT;
2325 if (copy_from_user(&flock, l, sizeof(flock)))
2326 goto out;
2327 error = -EINVAL;
2328 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
2329 goto out;
2330
2331 error = flock64_to_posix_lock(filp, &file_lock, &flock);
2332 if (error)
2333 goto out;
2334
2335 if (cmd == F_OFD_GETLK) {
2336 error = -EINVAL;
2337 if (flock.l_pid != 0)
2338 goto out;
2339
2340 cmd = F_GETLK64;
2341 file_lock.fl_flags |= FL_OFDLCK;
2342 file_lock.fl_owner = filp;
2343 }
2344
2345 error = vfs_test_lock(filp, &file_lock);
2346 if (error)
2347 goto out;
2348
2349 flock.l_type = file_lock.fl_type;
2350 if (file_lock.fl_type != F_UNLCK)
2351 posix_lock_to_flock64(&flock, &file_lock);
2352
2353 error = -EFAULT;
2354 if (!copy_to_user(l, &flock, sizeof(flock)))
2355 error = 0;
2356
2357 locks_release_private(&file_lock);
2358out:
2359 return error;
2360}
2361
2362
2363
2364
2365int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2366 struct flock64 __user *l)
2367{
2368 struct file_lock *file_lock = locks_alloc_lock();
2369 struct flock64 flock;
2370 struct inode *inode;
2371 struct file *f;
2372 int error;
2373
2374 if (file_lock == NULL)
2375 return -ENOLCK;
2376
2377
2378
2379
2380 error = -EFAULT;
2381 if (copy_from_user(&flock, l, sizeof(flock)))
2382 goto out;
2383
2384 inode = locks_inode(filp);
2385
2386
2387
2388
2389 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2390 error = -EAGAIN;
2391 goto out;
2392 }
2393
2394 error = flock64_to_posix_lock(filp, file_lock, &flock);
2395 if (error)
2396 goto out;
2397
2398 error = check_fmode_for_setlk(file_lock);
2399 if (error)
2400 goto out;
2401
2402
2403
2404
2405
2406 switch (cmd) {
2407 case F_OFD_SETLK:
2408 error = -EINVAL;
2409 if (flock.l_pid != 0)
2410 goto out;
2411
2412 cmd = F_SETLK64;
2413 file_lock->fl_flags |= FL_OFDLCK;
2414 file_lock->fl_owner = filp;
2415 break;
2416 case F_OFD_SETLKW:
2417 error = -EINVAL;
2418 if (flock.l_pid != 0)
2419 goto out;
2420
2421 cmd = F_SETLKW64;
2422 file_lock->fl_flags |= FL_OFDLCK;
2423 file_lock->fl_owner = filp;
2424
2425 case F_SETLKW64:
2426 file_lock->fl_flags |= FL_SLEEP;
2427 }
2428
2429 error = do_lock_file_wait(filp, cmd, file_lock);
2430
2431
2432
2433
2434
2435
2436 if (!error && file_lock->fl_type != F_UNLCK &&
2437 !(file_lock->fl_flags & FL_OFDLCK)) {
2438
2439
2440
2441
2442
2443 spin_lock(¤t->files->file_lock);
2444 f = fcheck(fd);
2445 spin_unlock(¤t->files->file_lock);
2446 if (f != filp) {
2447 file_lock->fl_type = F_UNLCK;
2448 error = do_lock_file_wait(filp, cmd, file_lock);
2449 WARN_ON_ONCE(error);
2450 error = -EBADF;
2451 }
2452 }
2453out:
2454 locks_free_lock(file_lock);
2455 return error;
2456}
2457#endif
2458
2459
2460
2461
2462
2463
2464void locks_remove_posix(struct file *filp, fl_owner_t owner)
2465{
2466 int error;
2467 struct inode *inode = locks_inode(filp);
2468 struct file_lock lock;
2469 struct file_lock_context *ctx;
2470
2471
2472
2473
2474
2475
2476 ctx = smp_load_acquire(&inode->i_flctx);
2477 if (!ctx || list_empty(&ctx->flc_posix))
2478 return;
2479
2480 lock.fl_type = F_UNLCK;
2481 lock.fl_flags = FL_POSIX | FL_CLOSE;
2482 lock.fl_start = 0;
2483 lock.fl_end = OFFSET_MAX;
2484 lock.fl_owner = owner;
2485 lock.fl_pid = current->tgid;
2486 lock.fl_file = filp;
2487 lock.fl_ops = NULL;
2488 lock.fl_lmops = NULL;
2489
2490 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2491
2492 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2493 lock.fl_ops->fl_release_private(&lock);
2494 trace_locks_remove_posix(inode, &lock, error);
2495}
2496
2497EXPORT_SYMBOL(locks_remove_posix);
2498
2499
2500static void
2501locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2502{
2503 struct file_lock fl = {
2504 .fl_owner = filp,
2505 .fl_pid = current->tgid,
2506 .fl_file = filp,
2507 .fl_flags = FL_FLOCK,
2508 .fl_type = F_UNLCK,
2509 .fl_end = OFFSET_MAX,
2510 };
2511 struct inode *inode = locks_inode(filp);
2512
2513 if (list_empty(&flctx->flc_flock))
2514 return;
2515
2516 if (filp->f_op->flock && is_remote_lock(filp))
2517 filp->f_op->flock(filp, F_SETLKW, &fl);
2518 else
2519 flock_lock_inode(inode, &fl);
2520
2521 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2522 fl.fl_ops->fl_release_private(&fl);
2523}
2524
2525
2526static void
2527locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2528{
2529 struct file_lock *fl, *tmp;
2530 LIST_HEAD(dispose);
2531
2532 if (list_empty(&ctx->flc_lease))
2533 return;
2534
2535 percpu_down_read_preempt_disable(&file_rwsem);
2536 spin_lock(&ctx->flc_lock);
2537 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2538 if (filp == fl->fl_file)
2539 lease_modify(fl, F_UNLCK, &dispose);
2540 spin_unlock(&ctx->flc_lock);
2541 percpu_up_read_preempt_enable(&file_rwsem);
2542
2543 locks_dispose_list(&dispose);
2544}
2545
2546
2547
2548
2549void locks_remove_file(struct file *filp)
2550{
2551 struct file_lock_context *ctx;
2552
2553 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2554 if (!ctx)
2555 return;
2556
2557
2558 locks_remove_posix(filp, filp);
2559
2560
2561 locks_remove_flock(filp, ctx);
2562
2563
2564 locks_remove_lease(filp, ctx);
2565}
2566
2567
2568
2569
2570
2571
2572
2573int
2574posix_unblock_lock(struct file_lock *waiter)
2575{
2576 int status = 0;
2577
2578 spin_lock(&blocked_lock_lock);
2579 if (waiter->fl_next)
2580 __locks_delete_block(waiter);
2581 else
2582 status = -ENOENT;
2583 spin_unlock(&blocked_lock_lock);
2584 return status;
2585}
2586EXPORT_SYMBOL(posix_unblock_lock);
2587
2588
2589
2590
2591
2592
2593
2594
2595int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2596{
2597 if (filp->f_op->lock && is_remote_lock(filp))
2598 return filp->f_op->lock(filp, F_CANCELLK, fl);
2599 return 0;
2600}
2601
2602EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2603
2604#ifdef CONFIG_PROC_FS
2605#include <linux/proc_fs.h>
2606#include <linux/seq_file.h>
2607
2608struct locks_iterator {
2609 int li_cpu;
2610 loff_t li_pos;
2611};
2612
2613static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2614 loff_t id, char *pfx)
2615{
2616 struct inode *inode = NULL;
2617 unsigned int fl_pid;
2618
2619 if (fl->fl_nspid) {
2620 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2621
2622
2623 fl_pid = pid_nr_ns(fl->fl_nspid, proc_pidns);
2624
2625
2626
2627
2628
2629
2630 if (fl_pid == 0)
2631 return;
2632 } else
2633 fl_pid = fl->fl_pid;
2634
2635 if (fl->fl_file != NULL)
2636 inode = locks_inode(fl->fl_file);
2637
2638 seq_printf(f, "%lld:%s ", id, pfx);
2639 if (IS_POSIX(fl)) {
2640 if (fl->fl_flags & FL_ACCESS)
2641 seq_puts(f, "ACCESS");
2642 else if (IS_OFDLCK(fl))
2643 seq_puts(f, "OFDLCK");
2644 else
2645 seq_puts(f, "POSIX ");
2646
2647 seq_printf(f, " %s ",
2648 (inode == NULL) ? "*NOINODE*" :
2649 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2650 } else if (IS_FLOCK(fl)) {
2651 if (fl->fl_type & LOCK_MAND) {
2652 seq_puts(f, "FLOCK MSNFS ");
2653 } else {
2654 seq_puts(f, "FLOCK ADVISORY ");
2655 }
2656 } else if (IS_LEASE(fl)) {
2657 if (fl->fl_flags & FL_DELEG)
2658 seq_puts(f, "DELEG ");
2659 else
2660 seq_puts(f, "LEASE ");
2661
2662 if (lease_breaking(fl))
2663 seq_puts(f, "BREAKING ");
2664 else if (fl->fl_file)
2665 seq_puts(f, "ACTIVE ");
2666 else
2667 seq_puts(f, "BREAKER ");
2668 } else {
2669 seq_puts(f, "UNKNOWN UNKNOWN ");
2670 }
2671 if (fl->fl_type & LOCK_MAND) {
2672 seq_printf(f, "%s ",
2673 (fl->fl_type & LOCK_READ)
2674 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2675 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2676 } else {
2677 seq_printf(f, "%s ",
2678 (lease_breaking(fl))
2679 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2680 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2681 }
2682 if (inode) {
2683
2684 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2685 MAJOR(inode->i_sb->s_dev),
2686 MINOR(inode->i_sb->s_dev), inode->i_ino);
2687 } else {
2688 seq_printf(f, "%d <none>:0 ", fl_pid);
2689 }
2690 if (IS_POSIX(fl)) {
2691 if (fl->fl_end == OFFSET_MAX)
2692 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2693 else
2694 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2695 } else {
2696 seq_puts(f, "0 EOF\n");
2697 }
2698}
2699
2700static int locks_show(struct seq_file *f, void *v)
2701{
2702 struct locks_iterator *iter = f->private;
2703 struct file_lock *fl, *bfl;
2704 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2705
2706 fl = hlist_entry(v, struct file_lock, fl_link);
2707
2708 if (fl->fl_nspid && !pid_nr_ns(fl->fl_nspid, proc_pidns))
2709 return 0;
2710
2711 lock_get_status(f, fl, iter->li_pos, "");
2712
2713 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2714 lock_get_status(f, bfl, iter->li_pos, " ->");
2715
2716 return 0;
2717}
2718
2719static void __show_fd_locks(struct seq_file *f,
2720 struct list_head *head, int *id,
2721 struct file *filp, struct files_struct *files)
2722{
2723 struct file_lock *fl;
2724
2725 list_for_each_entry(fl, head, fl_list) {
2726
2727 if (filp != fl->fl_file)
2728 continue;
2729 if (fl->fl_owner != files &&
2730 fl->fl_owner != filp)
2731 continue;
2732
2733 (*id)++;
2734 seq_puts(f, "lock:\t");
2735 lock_get_status(f, fl, *id, "");
2736 }
2737}
2738
2739void show_fd_locks(struct seq_file *f,
2740 struct file *filp, struct files_struct *files)
2741{
2742 struct inode *inode = locks_inode(filp);
2743 struct file_lock_context *ctx;
2744 int id = 0;
2745
2746 ctx = smp_load_acquire(&inode->i_flctx);
2747 if (!ctx)
2748 return;
2749
2750 spin_lock(&ctx->flc_lock);
2751 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2752 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2753 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2754 spin_unlock(&ctx->flc_lock);
2755}
2756
2757static void *locks_start(struct seq_file *f, loff_t *pos)
2758 __acquires(&blocked_lock_lock)
2759{
2760 struct locks_iterator *iter = f->private;
2761
2762 iter->li_pos = *pos + 1;
2763 percpu_down_write(&file_rwsem);
2764 spin_lock(&blocked_lock_lock);
2765 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2766}
2767
2768static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2769{
2770 struct locks_iterator *iter = f->private;
2771
2772 ++iter->li_pos;
2773 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2774}
2775
2776static void locks_stop(struct seq_file *f, void *v)
2777 __releases(&blocked_lock_lock)
2778{
2779 spin_unlock(&blocked_lock_lock);
2780 percpu_up_write(&file_rwsem);
2781}
2782
2783static const struct seq_operations locks_seq_operations = {
2784 .start = locks_start,
2785 .next = locks_next,
2786 .stop = locks_stop,
2787 .show = locks_show,
2788};
2789
2790static int locks_open(struct inode *inode, struct file *filp)
2791{
2792 return seq_open_private(filp, &locks_seq_operations,
2793 sizeof(struct locks_iterator));
2794}
2795
2796static const struct file_operations proc_locks_operations = {
2797 .open = locks_open,
2798 .read = seq_read,
2799 .llseek = seq_lseek,
2800 .release = seq_release_private,
2801};
2802
2803static int __init proc_locks_init(void)
2804{
2805 proc_create("locks", 0, NULL, &proc_locks_operations);
2806 return 0;
2807}
2808fs_initcall(proc_locks_init);
2809#endif
2810
2811static int __init filelock_init(void)
2812{
2813 int i;
2814
2815 flctx_cache = kmem_cache_create("file_lock_ctx",
2816 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2817
2818 filelock_cache = kmem_cache_create("file_lock_cache",
2819 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2820
2821
2822 for_each_possible_cpu(i) {
2823 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2824
2825 spin_lock_init(&fll->lock);
2826 INIT_HLIST_HEAD(&fll->hlist);
2827 }
2828
2829 return 0;
2830}
2831
2832core_initcall(filelock_init);
2833