1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/capability.h>
118#include <linux/file.h>
119#include <linux/fdtable.h>
120#include <linux/fs.h>
121#include <linux/init.h>
122#include <linux/security.h>
123#include <linux/slab.h>
124#include <linux/syscalls.h>
125#include <linux/time.h>
126#include <linux/rcupdate.h>
127#include <linux/pid_namespace.h>
128#include <linux/hashtable.h>
129#include <linux/percpu.h>
130
131#define CREATE_TRACE_POINTS
132#include <trace/events/filelock.h>
133
134#include <linux/uaccess.h>
135
136#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
137#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
138#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
139#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
140#define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
141
142static bool lease_breaking(struct file_lock *fl)
143{
144 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
145}
146
147static int target_leasetype(struct file_lock *fl)
148{
149 if (fl->fl_flags & FL_UNLOCK_PENDING)
150 return F_UNLCK;
151 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
152 return F_RDLCK;
153 return fl->fl_type;
154}
155
156int leases_enable = 1;
157int lease_break_time = 45;
158
159
160
161
162
163
164
165
166
167struct file_lock_list_struct {
168 spinlock_t lock;
169 struct hlist_head hlist;
170};
171static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
172DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
173
174
175
176
177
178
179
180
181
182
183
184
185#define BLOCKED_HASH_BITS 7
186static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205static DEFINE_SPINLOCK(blocked_lock_lock);
206
207static struct kmem_cache *flctx_cache __read_mostly;
208static struct kmem_cache *filelock_cache __read_mostly;
209
210static struct file_lock_context *
211locks_get_lock_context(struct inode *inode, int type)
212{
213 struct file_lock_context *ctx;
214
215
216 ctx = smp_load_acquire(&inode->i_flctx);
217 if (likely(ctx) || type == F_UNLCK)
218 goto out;
219
220 ctx = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
221 if (!ctx)
222 goto out;
223
224 spin_lock_init(&ctx->flc_lock);
225 INIT_LIST_HEAD(&ctx->flc_flock);
226 INIT_LIST_HEAD(&ctx->flc_posix);
227 INIT_LIST_HEAD(&ctx->flc_lease);
228
229
230
231
232
233 if (cmpxchg(&inode->i_flctx, NULL, ctx)) {
234 kmem_cache_free(flctx_cache, ctx);
235 ctx = smp_load_acquire(&inode->i_flctx);
236 }
237out:
238 trace_locks_get_lock_context(inode, type, ctx);
239 return ctx;
240}
241
242static void
243locks_dump_ctx_list(struct list_head *list, char *list_type)
244{
245 struct file_lock *fl;
246
247 list_for_each_entry(fl, list, fl_list) {
248 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
249 }
250}
251
252static void
253locks_check_ctx_lists(struct inode *inode)
254{
255 struct file_lock_context *ctx = inode->i_flctx;
256
257 if (unlikely(!list_empty(&ctx->flc_flock) ||
258 !list_empty(&ctx->flc_posix) ||
259 !list_empty(&ctx->flc_lease))) {
260 pr_warn("Leaked locks on dev=0x%x:0x%x ino=0x%lx:\n",
261 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
262 inode->i_ino);
263 locks_dump_ctx_list(&ctx->flc_flock, "FLOCK");
264 locks_dump_ctx_list(&ctx->flc_posix, "POSIX");
265 locks_dump_ctx_list(&ctx->flc_lease, "LEASE");
266 }
267}
268
269static void
270locks_check_ctx_file_list(struct file *filp, struct list_head *list,
271 char *list_type)
272{
273 struct file_lock *fl;
274 struct inode *inode = locks_inode(filp);
275
276 list_for_each_entry(fl, list, fl_list)
277 if (fl->fl_file == filp)
278 pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx "
279 " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n",
280 list_type, MAJOR(inode->i_sb->s_dev),
281 MINOR(inode->i_sb->s_dev), inode->i_ino,
282 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
283}
284
285void
286locks_free_lock_context(struct inode *inode)
287{
288 struct file_lock_context *ctx = inode->i_flctx;
289
290 if (unlikely(ctx)) {
291 locks_check_ctx_lists(inode);
292 kmem_cache_free(flctx_cache, ctx);
293 }
294}
295
296static void locks_init_lock_heads(struct file_lock *fl)
297{
298 INIT_HLIST_NODE(&fl->fl_link);
299 INIT_LIST_HEAD(&fl->fl_list);
300 INIT_LIST_HEAD(&fl->fl_block);
301 init_waitqueue_head(&fl->fl_wait);
302}
303
304
305struct file_lock *locks_alloc_lock(void)
306{
307 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
308
309 if (fl)
310 locks_init_lock_heads(fl);
311
312 return fl;
313}
314EXPORT_SYMBOL_GPL(locks_alloc_lock);
315
316void locks_release_private(struct file_lock *fl)
317{
318 if (fl->fl_ops) {
319 if (fl->fl_ops->fl_release_private)
320 fl->fl_ops->fl_release_private(fl);
321 fl->fl_ops = NULL;
322 }
323
324 if (fl->fl_lmops) {
325 if (fl->fl_lmops->lm_put_owner) {
326 fl->fl_lmops->lm_put_owner(fl->fl_owner);
327 fl->fl_owner = NULL;
328 }
329 fl->fl_lmops = NULL;
330 }
331}
332EXPORT_SYMBOL_GPL(locks_release_private);
333
334
335void locks_free_lock(struct file_lock *fl)
336{
337 BUG_ON(waitqueue_active(&fl->fl_wait));
338 BUG_ON(!list_empty(&fl->fl_list));
339 BUG_ON(!list_empty(&fl->fl_block));
340 BUG_ON(!hlist_unhashed(&fl->fl_link));
341
342 locks_release_private(fl);
343 kmem_cache_free(filelock_cache, fl);
344}
345EXPORT_SYMBOL(locks_free_lock);
346
347static void
348locks_dispose_list(struct list_head *dispose)
349{
350 struct file_lock *fl;
351
352 while (!list_empty(dispose)) {
353 fl = list_first_entry(dispose, struct file_lock, fl_list);
354 list_del_init(&fl->fl_list);
355 locks_free_lock(fl);
356 }
357}
358
359void locks_init_lock(struct file_lock *fl)
360{
361 memset(fl, 0, sizeof(struct file_lock));
362 locks_init_lock_heads(fl);
363}
364
365EXPORT_SYMBOL(locks_init_lock);
366
367
368
369
370void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
371{
372 new->fl_owner = fl->fl_owner;
373 new->fl_pid = fl->fl_pid;
374 new->fl_file = NULL;
375 new->fl_flags = fl->fl_flags;
376 new->fl_type = fl->fl_type;
377 new->fl_start = fl->fl_start;
378 new->fl_end = fl->fl_end;
379 new->fl_lmops = fl->fl_lmops;
380 new->fl_ops = NULL;
381
382 if (fl->fl_lmops) {
383 if (fl->fl_lmops->lm_get_owner)
384 fl->fl_lmops->lm_get_owner(fl->fl_owner);
385 }
386}
387EXPORT_SYMBOL(locks_copy_conflock);
388
389void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
390{
391
392 WARN_ON_ONCE(new->fl_ops);
393
394 locks_copy_conflock(new, fl);
395
396 new->fl_file = fl->fl_file;
397 new->fl_ops = fl->fl_ops;
398
399 if (fl->fl_ops) {
400 if (fl->fl_ops->fl_copy_lock)
401 fl->fl_ops->fl_copy_lock(new, fl);
402 }
403}
404
405EXPORT_SYMBOL(locks_copy_lock);
406
407static inline int flock_translate_cmd(int cmd) {
408 if (cmd & LOCK_MAND)
409 return cmd & (LOCK_MAND | LOCK_RW);
410 switch (cmd) {
411 case LOCK_SH:
412 return F_RDLCK;
413 case LOCK_EX:
414 return F_WRLCK;
415 case LOCK_UN:
416 return F_UNLCK;
417 }
418 return -EINVAL;
419}
420
421
422static struct file_lock *
423flock_make_lock(struct file *filp, unsigned int cmd)
424{
425 struct file_lock *fl;
426 int type = flock_translate_cmd(cmd);
427
428 if (type < 0)
429 return ERR_PTR(type);
430
431 fl = locks_alloc_lock();
432 if (fl == NULL)
433 return ERR_PTR(-ENOMEM);
434
435 fl->fl_file = filp;
436 fl->fl_owner = filp;
437 fl->fl_pid = current->tgid;
438 fl->fl_flags = FL_FLOCK;
439 fl->fl_type = type;
440 fl->fl_end = OFFSET_MAX;
441
442 return fl;
443}
444
445static int assign_type(struct file_lock *fl, long type)
446{
447 switch (type) {
448 case F_RDLCK:
449 case F_WRLCK:
450 case F_UNLCK:
451 fl->fl_type = type;
452 break;
453 default:
454 return -EINVAL;
455 }
456 return 0;
457}
458
459static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
460 struct flock64 *l)
461{
462 switch (l->l_whence) {
463 case SEEK_SET:
464 fl->fl_start = 0;
465 break;
466 case SEEK_CUR:
467 fl->fl_start = filp->f_pos;
468 break;
469 case SEEK_END:
470 fl->fl_start = i_size_read(file_inode(filp));
471 break;
472 default:
473 return -EINVAL;
474 }
475 if (l->l_start > OFFSET_MAX - fl->fl_start)
476 return -EOVERFLOW;
477 fl->fl_start += l->l_start;
478 if (fl->fl_start < 0)
479 return -EINVAL;
480
481
482
483 if (l->l_len > 0) {
484 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
485 return -EOVERFLOW;
486 fl->fl_end = fl->fl_start + l->l_len - 1;
487
488 } else if (l->l_len < 0) {
489 if (fl->fl_start + l->l_len < 0)
490 return -EINVAL;
491 fl->fl_end = fl->fl_start - 1;
492 fl->fl_start += l->l_len;
493 } else
494 fl->fl_end = OFFSET_MAX;
495
496 fl->fl_owner = current->files;
497 fl->fl_pid = current->tgid;
498 fl->fl_file = filp;
499 fl->fl_flags = FL_POSIX;
500 fl->fl_ops = NULL;
501 fl->fl_lmops = NULL;
502
503 return assign_type(fl, l->l_type);
504}
505
506
507
508
509static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
510 struct flock *l)
511{
512 struct flock64 ll = {
513 .l_type = l->l_type,
514 .l_whence = l->l_whence,
515 .l_start = l->l_start,
516 .l_len = l->l_len,
517 };
518
519 return flock64_to_posix_lock(filp, fl, &ll);
520}
521
522
523static bool
524lease_break_callback(struct file_lock *fl)
525{
526 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
527 return false;
528}
529
530static void
531lease_setup(struct file_lock *fl, void **priv)
532{
533 struct file *filp = fl->fl_file;
534 struct fasync_struct *fa = *priv;
535
536
537
538
539
540
541 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
542 *priv = NULL;
543
544 __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
545}
546
547static const struct lock_manager_operations lease_manager_ops = {
548 .lm_break = lease_break_callback,
549 .lm_change = lease_modify,
550 .lm_setup = lease_setup,
551};
552
553
554
555
556static int lease_init(struct file *filp, long type, struct file_lock *fl)
557{
558 if (assign_type(fl, type) != 0)
559 return -EINVAL;
560
561 fl->fl_owner = filp;
562 fl->fl_pid = current->tgid;
563
564 fl->fl_file = filp;
565 fl->fl_flags = FL_LEASE;
566 fl->fl_start = 0;
567 fl->fl_end = OFFSET_MAX;
568 fl->fl_ops = NULL;
569 fl->fl_lmops = &lease_manager_ops;
570 return 0;
571}
572
573
574static struct file_lock *lease_alloc(struct file *filp, long type)
575{
576 struct file_lock *fl = locks_alloc_lock();
577 int error = -ENOMEM;
578
579 if (fl == NULL)
580 return ERR_PTR(error);
581
582 error = lease_init(filp, type, fl);
583 if (error) {
584 locks_free_lock(fl);
585 return ERR_PTR(error);
586 }
587 return fl;
588}
589
590
591
592static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
593{
594 return ((fl1->fl_end >= fl2->fl_start) &&
595 (fl2->fl_end >= fl1->fl_start));
596}
597
598
599
600
601static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
602{
603 if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
604 return fl2->fl_lmops == fl1->fl_lmops &&
605 fl1->fl_lmops->lm_compare_owner(fl1, fl2);
606 return fl1->fl_owner == fl2->fl_owner;
607}
608
609
610static void locks_insert_global_locks(struct file_lock *fl)
611{
612 struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list);
613
614 percpu_rwsem_assert_held(&file_rwsem);
615
616 spin_lock(&fll->lock);
617 fl->fl_link_cpu = smp_processor_id();
618 hlist_add_head(&fl->fl_link, &fll->hlist);
619 spin_unlock(&fll->lock);
620}
621
622
623static void locks_delete_global_locks(struct file_lock *fl)
624{
625 struct file_lock_list_struct *fll;
626
627 percpu_rwsem_assert_held(&file_rwsem);
628
629
630
631
632
633
634 if (hlist_unhashed(&fl->fl_link))
635 return;
636
637 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
638 spin_lock(&fll->lock);
639 hlist_del_init(&fl->fl_link);
640 spin_unlock(&fll->lock);
641}
642
643static unsigned long
644posix_owner_key(struct file_lock *fl)
645{
646 if (fl->fl_lmops && fl->fl_lmops->lm_owner_key)
647 return fl->fl_lmops->lm_owner_key(fl);
648 return (unsigned long)fl->fl_owner;
649}
650
651static void locks_insert_global_blocked(struct file_lock *waiter)
652{
653 lockdep_assert_held(&blocked_lock_lock);
654
655 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter));
656}
657
658static void locks_delete_global_blocked(struct file_lock *waiter)
659{
660 lockdep_assert_held(&blocked_lock_lock);
661
662 hash_del(&waiter->fl_link);
663}
664
665
666
667
668
669
670static void __locks_delete_block(struct file_lock *waiter)
671{
672 locks_delete_global_blocked(waiter);
673 list_del_init(&waiter->fl_block);
674 waiter->fl_next = NULL;
675}
676
677static void locks_delete_block(struct file_lock *waiter)
678{
679 spin_lock(&blocked_lock_lock);
680 __locks_delete_block(waiter);
681 spin_unlock(&blocked_lock_lock);
682}
683
684
685
686
687
688
689
690
691
692
693
694static void __locks_insert_block(struct file_lock *blocker,
695 struct file_lock *waiter)
696{
697 BUG_ON(!list_empty(&waiter->fl_block));
698 waiter->fl_next = blocker;
699 list_add_tail(&waiter->fl_block, &blocker->fl_block);
700 if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
701 locks_insert_global_blocked(waiter);
702}
703
704
705static void locks_insert_block(struct file_lock *blocker,
706 struct file_lock *waiter)
707{
708 spin_lock(&blocked_lock_lock);
709 __locks_insert_block(blocker, waiter);
710 spin_unlock(&blocked_lock_lock);
711}
712
713
714
715
716
717
718static void locks_wake_up_blocks(struct file_lock *blocker)
719{
720
721
722
723
724
725
726
727 if (list_empty(&blocker->fl_block))
728 return;
729
730 spin_lock(&blocked_lock_lock);
731 while (!list_empty(&blocker->fl_block)) {
732 struct file_lock *waiter;
733
734 waiter = list_first_entry(&blocker->fl_block,
735 struct file_lock, fl_block);
736 __locks_delete_block(waiter);
737 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
738 waiter->fl_lmops->lm_notify(waiter);
739 else
740 wake_up(&waiter->fl_wait);
741 }
742 spin_unlock(&blocked_lock_lock);
743}
744
745static void
746locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
747{
748 list_add_tail(&fl->fl_list, before);
749 locks_insert_global_locks(fl);
750}
751
752static void
753locks_unlink_lock_ctx(struct file_lock *fl)
754{
755 locks_delete_global_locks(fl);
756 list_del_init(&fl->fl_list);
757 locks_wake_up_blocks(fl);
758}
759
760static void
761locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
762{
763 locks_unlink_lock_ctx(fl);
764 if (dispose)
765 list_add(&fl->fl_list, dispose);
766 else
767 locks_free_lock(fl);
768}
769
770
771
772
773static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
774{
775 if (sys_fl->fl_type == F_WRLCK)
776 return 1;
777 if (caller_fl->fl_type == F_WRLCK)
778 return 1;
779 return 0;
780}
781
782
783
784
785static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
786{
787
788
789
790 if (posix_same_owner(caller_fl, sys_fl))
791 return (0);
792
793
794 if (!locks_overlap(caller_fl, sys_fl))
795 return 0;
796
797 return (locks_conflict(caller_fl, sys_fl));
798}
799
800
801
802
803static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
804{
805
806
807
808 if (caller_fl->fl_file == sys_fl->fl_file)
809 return (0);
810 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
811 return 0;
812
813 return (locks_conflict(caller_fl, sys_fl));
814}
815
816void
817posix_test_lock(struct file *filp, struct file_lock *fl)
818{
819 struct file_lock *cfl;
820 struct file_lock_context *ctx;
821 struct inode *inode = locks_inode(filp);
822
823 ctx = smp_load_acquire(&inode->i_flctx);
824 if (!ctx || list_empty_careful(&ctx->flc_posix)) {
825 fl->fl_type = F_UNLCK;
826 return;
827 }
828
829 spin_lock(&ctx->flc_lock);
830 list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
831 if (posix_locks_conflict(fl, cfl)) {
832 locks_copy_conflock(fl, cfl);
833 goto out;
834 }
835 }
836 fl->fl_type = F_UNLCK;
837out:
838 spin_unlock(&ctx->flc_lock);
839 return;
840}
841EXPORT_SYMBOL(posix_test_lock);
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876#define MAX_DEADLK_ITERATIONS 10
877
878
879static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
880{
881 struct file_lock *fl;
882
883 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
884 if (posix_same_owner(fl, block_fl))
885 return fl->fl_next;
886 }
887 return NULL;
888}
889
890
891static int posix_locks_deadlock(struct file_lock *caller_fl,
892 struct file_lock *block_fl)
893{
894 int i = 0;
895
896 lockdep_assert_held(&blocked_lock_lock);
897
898
899
900
901
902 if (IS_OFDLCK(caller_fl))
903 return 0;
904
905 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
906 if (i++ > MAX_DEADLK_ITERATIONS)
907 return 0;
908 if (posix_same_owner(caller_fl, block_fl))
909 return 1;
910 }
911 return 0;
912}
913
914
915
916
917
918
919
920
921static int flock_lock_inode(struct inode *inode, struct file_lock *request)
922{
923 struct file_lock *new_fl = NULL;
924 struct file_lock *fl;
925 struct file_lock_context *ctx;
926 int error = 0;
927 bool found = false;
928 LIST_HEAD(dispose);
929
930 ctx = locks_get_lock_context(inode, request->fl_type);
931 if (!ctx) {
932 if (request->fl_type != F_UNLCK)
933 return -ENOMEM;
934 return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0;
935 }
936
937 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
938 new_fl = locks_alloc_lock();
939 if (!new_fl)
940 return -ENOMEM;
941 }
942
943 percpu_down_read_preempt_disable(&file_rwsem);
944 spin_lock(&ctx->flc_lock);
945 if (request->fl_flags & FL_ACCESS)
946 goto find_conflict;
947
948 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
949 if (request->fl_file != fl->fl_file)
950 continue;
951 if (request->fl_type == fl->fl_type)
952 goto out;
953 found = true;
954 locks_delete_lock_ctx(fl, &dispose);
955 break;
956 }
957
958 if (request->fl_type == F_UNLCK) {
959 if ((request->fl_flags & FL_EXISTS) && !found)
960 error = -ENOENT;
961 goto out;
962 }
963
964find_conflict:
965 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
966 if (!flock_locks_conflict(request, fl))
967 continue;
968 error = -EAGAIN;
969 if (!(request->fl_flags & FL_SLEEP))
970 goto out;
971 error = FILE_LOCK_DEFERRED;
972 locks_insert_block(fl, request);
973 goto out;
974 }
975 if (request->fl_flags & FL_ACCESS)
976 goto out;
977 locks_copy_lock(new_fl, request);
978 locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
979 new_fl = NULL;
980 error = 0;
981
982out:
983 spin_unlock(&ctx->flc_lock);
984 percpu_up_read_preempt_enable(&file_rwsem);
985 if (new_fl)
986 locks_free_lock(new_fl);
987 locks_dispose_list(&dispose);
988 return error;
989}
990
991static int posix_lock_inode(struct inode *inode, struct file_lock *request,
992 struct file_lock *conflock)
993{
994 struct file_lock *fl, *tmp;
995 struct file_lock *new_fl = NULL;
996 struct file_lock *new_fl2 = NULL;
997 struct file_lock *left = NULL;
998 struct file_lock *right = NULL;
999 struct file_lock_context *ctx;
1000 int error;
1001 bool added = false;
1002 LIST_HEAD(dispose);
1003
1004 ctx = locks_get_lock_context(inode, request->fl_type);
1005 if (!ctx)
1006 return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM;
1007
1008
1009
1010
1011
1012
1013
1014 if (!(request->fl_flags & FL_ACCESS) &&
1015 (request->fl_type != F_UNLCK ||
1016 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
1017 new_fl = locks_alloc_lock();
1018 new_fl2 = locks_alloc_lock();
1019 }
1020
1021 percpu_down_read_preempt_disable(&file_rwsem);
1022 spin_lock(&ctx->flc_lock);
1023
1024
1025
1026
1027
1028 if (request->fl_type != F_UNLCK) {
1029 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1030 if (!posix_locks_conflict(request, fl))
1031 continue;
1032 if (conflock)
1033 locks_copy_conflock(conflock, fl);
1034 error = -EAGAIN;
1035 if (!(request->fl_flags & FL_SLEEP))
1036 goto out;
1037
1038
1039
1040
1041 error = -EDEADLK;
1042 spin_lock(&blocked_lock_lock);
1043 if (likely(!posix_locks_deadlock(request, fl))) {
1044 error = FILE_LOCK_DEFERRED;
1045 __locks_insert_block(fl, request);
1046 }
1047 spin_unlock(&blocked_lock_lock);
1048 goto out;
1049 }
1050 }
1051
1052
1053 error = 0;
1054 if (request->fl_flags & FL_ACCESS)
1055 goto out;
1056
1057
1058 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1059 if (posix_same_owner(request, fl))
1060 break;
1061 }
1062
1063
1064 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1065 if (!posix_same_owner(request, fl))
1066 break;
1067
1068
1069 if (request->fl_type == fl->fl_type) {
1070
1071
1072
1073
1074 if (fl->fl_end < request->fl_start - 1)
1075 continue;
1076
1077
1078
1079 if (fl->fl_start - 1 > request->fl_end)
1080 break;
1081
1082
1083
1084
1085
1086
1087 if (fl->fl_start > request->fl_start)
1088 fl->fl_start = request->fl_start;
1089 else
1090 request->fl_start = fl->fl_start;
1091 if (fl->fl_end < request->fl_end)
1092 fl->fl_end = request->fl_end;
1093 else
1094 request->fl_end = fl->fl_end;
1095 if (added) {
1096 locks_delete_lock_ctx(fl, &dispose);
1097 continue;
1098 }
1099 request = fl;
1100 added = true;
1101 } else {
1102
1103
1104
1105 if (fl->fl_end < request->fl_start)
1106 continue;
1107 if (fl->fl_start > request->fl_end)
1108 break;
1109 if (request->fl_type == F_UNLCK)
1110 added = true;
1111 if (fl->fl_start < request->fl_start)
1112 left = fl;
1113
1114
1115
1116 if (fl->fl_end > request->fl_end) {
1117 right = fl;
1118 break;
1119 }
1120 if (fl->fl_start >= request->fl_start) {
1121
1122
1123
1124 if (added) {
1125 locks_delete_lock_ctx(fl, &dispose);
1126 continue;
1127 }
1128
1129
1130
1131
1132
1133
1134
1135 error = -ENOLCK;
1136 if (!new_fl)
1137 goto out;
1138 locks_copy_lock(new_fl, request);
1139 request = new_fl;
1140 new_fl = NULL;
1141 locks_insert_lock_ctx(request, &fl->fl_list);
1142 locks_delete_lock_ctx(fl, &dispose);
1143 added = true;
1144 }
1145 }
1146 }
1147
1148
1149
1150
1151
1152
1153 error = -ENOLCK;
1154 if (right && left == right && !new_fl2)
1155 goto out;
1156
1157 error = 0;
1158 if (!added) {
1159 if (request->fl_type == F_UNLCK) {
1160 if (request->fl_flags & FL_EXISTS)
1161 error = -ENOENT;
1162 goto out;
1163 }
1164
1165 if (!new_fl) {
1166 error = -ENOLCK;
1167 goto out;
1168 }
1169 locks_copy_lock(new_fl, request);
1170 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1171 fl = new_fl;
1172 new_fl = NULL;
1173 }
1174 if (right) {
1175 if (left == right) {
1176
1177
1178
1179 left = new_fl2;
1180 new_fl2 = NULL;
1181 locks_copy_lock(left, right);
1182 locks_insert_lock_ctx(left, &fl->fl_list);
1183 }
1184 right->fl_start = request->fl_end + 1;
1185 locks_wake_up_blocks(right);
1186 }
1187 if (left) {
1188 left->fl_end = request->fl_start - 1;
1189 locks_wake_up_blocks(left);
1190 }
1191 out:
1192 spin_unlock(&ctx->flc_lock);
1193 percpu_up_read_preempt_enable(&file_rwsem);
1194
1195
1196
1197 if (new_fl)
1198 locks_free_lock(new_fl);
1199 if (new_fl2)
1200 locks_free_lock(new_fl2);
1201 locks_dispose_list(&dispose);
1202 trace_posix_lock_inode(inode, request, error);
1203
1204 return error;
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221int posix_lock_file(struct file *filp, struct file_lock *fl,
1222 struct file_lock *conflock)
1223{
1224 return posix_lock_inode(locks_inode(filp), fl, conflock);
1225}
1226EXPORT_SYMBOL(posix_lock_file);
1227
1228
1229
1230
1231
1232
1233
1234
1235static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1236{
1237 int error;
1238 might_sleep ();
1239 for (;;) {
1240 error = posix_lock_inode(inode, fl, NULL);
1241 if (error != FILE_LOCK_DEFERRED)
1242 break;
1243 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1244 if (!error)
1245 continue;
1246
1247 locks_delete_block(fl);
1248 break;
1249 }
1250 return error;
1251}
1252
1253#ifdef CONFIG_MANDATORY_FILE_LOCKING
1254
1255
1256
1257
1258
1259
1260
1261int locks_mandatory_locked(struct file *file)
1262{
1263 int ret;
1264 struct inode *inode = locks_inode(file);
1265 struct file_lock_context *ctx;
1266 struct file_lock *fl;
1267
1268 ctx = smp_load_acquire(&inode->i_flctx);
1269 if (!ctx || list_empty_careful(&ctx->flc_posix))
1270 return 0;
1271
1272
1273
1274
1275 spin_lock(&ctx->flc_lock);
1276 ret = 0;
1277 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1278 if (fl->fl_owner != current->files &&
1279 fl->fl_owner != file) {
1280 ret = -EAGAIN;
1281 break;
1282 }
1283 }
1284 spin_unlock(&ctx->flc_lock);
1285 return ret;
1286}
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
1299 loff_t end, unsigned char type)
1300{
1301 struct file_lock fl;
1302 int error;
1303 bool sleep = false;
1304
1305 locks_init_lock(&fl);
1306 fl.fl_pid = current->tgid;
1307 fl.fl_file = filp;
1308 fl.fl_flags = FL_POSIX | FL_ACCESS;
1309 if (filp && !(filp->f_flags & O_NONBLOCK))
1310 sleep = true;
1311 fl.fl_type = type;
1312 fl.fl_start = start;
1313 fl.fl_end = end;
1314
1315 for (;;) {
1316 if (filp) {
1317 fl.fl_owner = filp;
1318 fl.fl_flags &= ~FL_SLEEP;
1319 error = posix_lock_inode(inode, &fl, NULL);
1320 if (!error)
1321 break;
1322 }
1323
1324 if (sleep)
1325 fl.fl_flags |= FL_SLEEP;
1326 fl.fl_owner = current->files;
1327 error = posix_lock_inode(inode, &fl, NULL);
1328 if (error != FILE_LOCK_DEFERRED)
1329 break;
1330 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1331 if (!error) {
1332
1333
1334
1335
1336 if (__mandatory_lock(inode))
1337 continue;
1338 }
1339
1340 locks_delete_block(&fl);
1341 break;
1342 }
1343
1344 return error;
1345}
1346
1347EXPORT_SYMBOL(locks_mandatory_area);
1348#endif
1349
1350static void lease_clear_pending(struct file_lock *fl, int arg)
1351{
1352 switch (arg) {
1353 case F_UNLCK:
1354 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1355
1356 case F_RDLCK:
1357 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1358 }
1359}
1360
1361
1362int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1363{
1364 int error = assign_type(fl, arg);
1365
1366 if (error)
1367 return error;
1368 lease_clear_pending(fl, arg);
1369 locks_wake_up_blocks(fl);
1370 if (arg == F_UNLCK) {
1371 struct file *filp = fl->fl_file;
1372
1373 f_delown(filp);
1374 filp->f_owner.signum = 0;
1375 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1376 if (fl->fl_fasync != NULL) {
1377 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1378 fl->fl_fasync = NULL;
1379 }
1380 locks_delete_lock_ctx(fl, dispose);
1381 }
1382 return 0;
1383}
1384EXPORT_SYMBOL(lease_modify);
1385
1386static bool past_time(unsigned long then)
1387{
1388 if (!then)
1389
1390 return false;
1391 return time_after(jiffies, then);
1392}
1393
1394static void time_out_leases(struct inode *inode, struct list_head *dispose)
1395{
1396 struct file_lock_context *ctx = inode->i_flctx;
1397 struct file_lock *fl, *tmp;
1398
1399 lockdep_assert_held(&ctx->flc_lock);
1400
1401 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1402 trace_time_out_leases(inode, fl);
1403 if (past_time(fl->fl_downgrade_time))
1404 lease_modify(fl, F_RDLCK, dispose);
1405 if (past_time(fl->fl_break_time))
1406 lease_modify(fl, F_UNLCK, dispose);
1407 }
1408}
1409
1410static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
1411{
1412 if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT))
1413 return false;
1414 if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE))
1415 return false;
1416 return locks_conflict(breaker, lease);
1417}
1418
1419static bool
1420any_leases_conflict(struct inode *inode, struct file_lock *breaker)
1421{
1422 struct file_lock_context *ctx = inode->i_flctx;
1423 struct file_lock *fl;
1424
1425 lockdep_assert_held(&ctx->flc_lock);
1426
1427 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1428 if (leases_conflict(fl, breaker))
1429 return true;
1430 }
1431 return false;
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1448{
1449 int error = 0;
1450 struct file_lock_context *ctx;
1451 struct file_lock *new_fl, *fl, *tmp;
1452 unsigned long break_time;
1453 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1454 LIST_HEAD(dispose);
1455
1456 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1457 if (IS_ERR(new_fl))
1458 return PTR_ERR(new_fl);
1459 new_fl->fl_flags = type;
1460
1461
1462 ctx = smp_load_acquire(&inode->i_flctx);
1463 if (!ctx) {
1464 WARN_ON_ONCE(1);
1465 return error;
1466 }
1467
1468 percpu_down_read_preempt_disable(&file_rwsem);
1469 spin_lock(&ctx->flc_lock);
1470
1471 time_out_leases(inode, &dispose);
1472
1473 if (!any_leases_conflict(inode, new_fl))
1474 goto out;
1475
1476 break_time = 0;
1477 if (lease_break_time > 0) {
1478 break_time = jiffies + lease_break_time * HZ;
1479 if (break_time == 0)
1480 break_time++;
1481 }
1482
1483 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1484 if (!leases_conflict(fl, new_fl))
1485 continue;
1486 if (want_write) {
1487 if (fl->fl_flags & FL_UNLOCK_PENDING)
1488 continue;
1489 fl->fl_flags |= FL_UNLOCK_PENDING;
1490 fl->fl_break_time = break_time;
1491 } else {
1492 if (lease_breaking(fl))
1493 continue;
1494 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1495 fl->fl_downgrade_time = break_time;
1496 }
1497 if (fl->fl_lmops->lm_break(fl))
1498 locks_delete_lock_ctx(fl, &dispose);
1499 }
1500
1501 if (list_empty(&ctx->flc_lease))
1502 goto out;
1503
1504 if (mode & O_NONBLOCK) {
1505 trace_break_lease_noblock(inode, new_fl);
1506 error = -EWOULDBLOCK;
1507 goto out;
1508 }
1509
1510restart:
1511 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1512 break_time = fl->fl_break_time;
1513 if (break_time != 0)
1514 break_time -= jiffies;
1515 if (break_time == 0)
1516 break_time++;
1517 locks_insert_block(fl, new_fl);
1518 trace_break_lease_block(inode, new_fl);
1519 spin_unlock(&ctx->flc_lock);
1520 percpu_up_read_preempt_enable(&file_rwsem);
1521
1522 locks_dispose_list(&dispose);
1523 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1524 !new_fl->fl_next, break_time);
1525
1526 percpu_down_read_preempt_disable(&file_rwsem);
1527 spin_lock(&ctx->flc_lock);
1528 trace_break_lease_unblock(inode, new_fl);
1529 locks_delete_block(new_fl);
1530 if (error >= 0) {
1531
1532
1533
1534
1535 if (error == 0)
1536 time_out_leases(inode, &dispose);
1537 if (any_leases_conflict(inode, new_fl))
1538 goto restart;
1539 error = 0;
1540 }
1541out:
1542 spin_unlock(&ctx->flc_lock);
1543 percpu_up_read_preempt_enable(&file_rwsem);
1544 locks_dispose_list(&dispose);
1545 locks_free_lock(new_fl);
1546 return error;
1547}
1548
1549EXPORT_SYMBOL(__break_lease);
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560void lease_get_mtime(struct inode *inode, struct timespec64 *time)
1561{
1562 bool has_lease = false;
1563 struct file_lock_context *ctx;
1564 struct file_lock *fl;
1565
1566 ctx = smp_load_acquire(&inode->i_flctx);
1567 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1568 spin_lock(&ctx->flc_lock);
1569 fl = list_first_entry_or_null(&ctx->flc_lease,
1570 struct file_lock, fl_list);
1571 if (fl && (fl->fl_type == F_WRLCK))
1572 has_lease = true;
1573 spin_unlock(&ctx->flc_lock);
1574 }
1575
1576 if (has_lease)
1577 *time = current_time(inode);
1578}
1579
1580EXPORT_SYMBOL(lease_get_mtime);
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605int fcntl_getlease(struct file *filp)
1606{
1607 struct file_lock *fl;
1608 struct inode *inode = locks_inode(filp);
1609 struct file_lock_context *ctx;
1610 int type = F_UNLCK;
1611 LIST_HEAD(dispose);
1612
1613 ctx = smp_load_acquire(&inode->i_flctx);
1614 if (ctx && !list_empty_careful(&ctx->flc_lease)) {
1615 percpu_down_read_preempt_disable(&file_rwsem);
1616 spin_lock(&ctx->flc_lock);
1617 time_out_leases(inode, &dispose);
1618 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1619 if (fl->fl_file != filp)
1620 continue;
1621 type = target_leasetype(fl);
1622 break;
1623 }
1624 spin_unlock(&ctx->flc_lock);
1625 percpu_up_read_preempt_enable(&file_rwsem);
1626
1627 locks_dispose_list(&dispose);
1628 }
1629 return type;
1630}
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643static int
1644check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
1645{
1646 int ret = 0;
1647 struct inode *inode = dentry->d_inode;
1648
1649 if (flags & FL_LAYOUT)
1650 return 0;
1651
1652 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1653 return -EAGAIN;
1654
1655 if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
1656 (atomic_read(&inode->i_count) > 1)))
1657 ret = -EAGAIN;
1658
1659 return ret;
1660}
1661
1662static int
1663generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
1664{
1665 struct file_lock *fl, *my_fl = NULL, *lease;
1666 struct dentry *dentry = filp->f_path.dentry;
1667 struct inode *inode = dentry->d_inode;
1668 struct file_lock_context *ctx;
1669 bool is_deleg = (*flp)->fl_flags & FL_DELEG;
1670 int error;
1671 LIST_HEAD(dispose);
1672
1673 lease = *flp;
1674 trace_generic_add_lease(inode, lease);
1675
1676
1677 ctx = locks_get_lock_context(inode, arg);
1678 if (!ctx)
1679 return -ENOMEM;
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 if (is_deleg && !inode_trylock(inode))
1690 return -EAGAIN;
1691
1692 if (is_deleg && arg == F_WRLCK) {
1693
1694 inode_unlock(inode);
1695 WARN_ON_ONCE(1);
1696 return -EINVAL;
1697 }
1698
1699 percpu_down_read_preempt_disable(&file_rwsem);
1700 spin_lock(&ctx->flc_lock);
1701 time_out_leases(inode, &dispose);
1702 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1703 if (error)
1704 goto out;
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714 error = -EAGAIN;
1715 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1716 if (fl->fl_file == filp &&
1717 fl->fl_owner == lease->fl_owner) {
1718 my_fl = fl;
1719 continue;
1720 }
1721
1722
1723
1724
1725
1726 if (arg == F_WRLCK)
1727 goto out;
1728
1729
1730
1731
1732 if (fl->fl_flags & FL_UNLOCK_PENDING)
1733 goto out;
1734 }
1735
1736 if (my_fl != NULL) {
1737 lease = my_fl;
1738 error = lease->fl_lmops->lm_change(lease, arg, &dispose);
1739 if (error)
1740 goto out;
1741 goto out_setup;
1742 }
1743
1744 error = -EINVAL;
1745 if (!leases_enable)
1746 goto out;
1747
1748 locks_insert_lock_ctx(lease, &ctx->flc_lease);
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758 smp_mb();
1759 error = check_conflicting_open(dentry, arg, lease->fl_flags);
1760 if (error) {
1761 locks_unlink_lock_ctx(lease);
1762 goto out;
1763 }
1764
1765out_setup:
1766 if (lease->fl_lmops->lm_setup)
1767 lease->fl_lmops->lm_setup(lease, priv);
1768out:
1769 spin_unlock(&ctx->flc_lock);
1770 percpu_up_read_preempt_enable(&file_rwsem);
1771 locks_dispose_list(&dispose);
1772 if (is_deleg)
1773 inode_unlock(inode);
1774 if (!error && !my_fl)
1775 *flp = NULL;
1776 return error;
1777}
1778
1779static int generic_delete_lease(struct file *filp, void *owner)
1780{
1781 int error = -EAGAIN;
1782 struct file_lock *fl, *victim = NULL;
1783 struct inode *inode = locks_inode(filp);
1784 struct file_lock_context *ctx;
1785 LIST_HEAD(dispose);
1786
1787 ctx = smp_load_acquire(&inode->i_flctx);
1788 if (!ctx) {
1789 trace_generic_delete_lease(inode, NULL);
1790 return error;
1791 }
1792
1793 percpu_down_read_preempt_disable(&file_rwsem);
1794 spin_lock(&ctx->flc_lock);
1795 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1796 if (fl->fl_file == filp &&
1797 fl->fl_owner == owner) {
1798 victim = fl;
1799 break;
1800 }
1801 }
1802 trace_generic_delete_lease(inode, victim);
1803 if (victim)
1804 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1805 spin_unlock(&ctx->flc_lock);
1806 percpu_up_read_preempt_enable(&file_rwsem);
1807 locks_dispose_list(&dispose);
1808 return error;
1809}
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
1823 void **priv)
1824{
1825 struct inode *inode = locks_inode(filp);
1826 int error;
1827
1828 if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
1829 return -EACCES;
1830 if (!S_ISREG(inode->i_mode))
1831 return -EINVAL;
1832 error = security_file_lock(filp, arg);
1833 if (error)
1834 return error;
1835
1836 switch (arg) {
1837 case F_UNLCK:
1838 return generic_delete_lease(filp, *priv);
1839 case F_RDLCK:
1840 case F_WRLCK:
1841 if (!(*flp)->fl_lmops->lm_break) {
1842 WARN_ON_ONCE(1);
1843 return -ENOLCK;
1844 }
1845
1846 return generic_add_lease(filp, arg, flp, priv);
1847 default:
1848 return -EINVAL;
1849 }
1850}
1851EXPORT_SYMBOL(generic_setlease);
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870int
1871vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
1872{
1873 if (filp->f_op->setlease)
1874 return filp->f_op->setlease(filp, arg, lease, priv);
1875 else
1876 return generic_setlease(filp, arg, lease, priv);
1877}
1878EXPORT_SYMBOL_GPL(vfs_setlease);
1879
1880static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1881{
1882 struct file_lock *fl;
1883 struct fasync_struct *new;
1884 int error;
1885
1886 fl = lease_alloc(filp, arg);
1887 if (IS_ERR(fl))
1888 return PTR_ERR(fl);
1889
1890 new = fasync_alloc();
1891 if (!new) {
1892 locks_free_lock(fl);
1893 return -ENOMEM;
1894 }
1895 new->fa_fd = fd;
1896
1897 error = vfs_setlease(filp, arg, &fl, (void **)&new);
1898 if (fl)
1899 locks_free_lock(fl);
1900 if (new)
1901 fasync_free(new);
1902 return error;
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1916{
1917 if (arg == F_UNLCK)
1918 return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
1919 return do_fcntl_add_lease(fd, filp, arg);
1920}
1921
1922
1923
1924
1925
1926
1927
1928
1929static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1930{
1931 int error;
1932 might_sleep();
1933 for (;;) {
1934 error = flock_lock_inode(inode, fl);
1935 if (error != FILE_LOCK_DEFERRED)
1936 break;
1937 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1938 if (!error)
1939 continue;
1940
1941 locks_delete_block(fl);
1942 break;
1943 }
1944 return error;
1945}
1946
1947
1948
1949
1950
1951
1952
1953
1954int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1955{
1956 int res = 0;
1957 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
1958 case FL_POSIX:
1959 res = posix_lock_inode_wait(inode, fl);
1960 break;
1961 case FL_FLOCK:
1962 res = flock_lock_inode_wait(inode, fl);
1963 break;
1964 default:
1965 BUG();
1966 }
1967 return res;
1968}
1969EXPORT_SYMBOL(locks_lock_inode_wait);
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1989{
1990 struct fd f = fdget(fd);
1991 struct file_lock *lock;
1992 int can_sleep, unlock;
1993 int error;
1994
1995 error = -EBADF;
1996 if (!f.file)
1997 goto out;
1998
1999 can_sleep = !(cmd & LOCK_NB);
2000 cmd &= ~LOCK_NB;
2001 unlock = (cmd == LOCK_UN);
2002
2003 if (!unlock && !(cmd & LOCK_MAND) &&
2004 !(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
2005 goto out_putf;
2006
2007 lock = flock_make_lock(f.file, cmd);
2008 if (IS_ERR(lock)) {
2009 error = PTR_ERR(lock);
2010 goto out_putf;
2011 }
2012
2013 if (can_sleep)
2014 lock->fl_flags |= FL_SLEEP;
2015
2016 error = security_file_lock(f.file, lock->fl_type);
2017 if (error)
2018 goto out_free;
2019
2020 if (f.file->f_op->flock)
2021 error = f.file->f_op->flock(f.file,
2022 (can_sleep) ? F_SETLKW : F_SETLK,
2023 lock);
2024 else
2025 error = locks_lock_file_wait(f.file, lock);
2026
2027 out_free:
2028 locks_free_lock(lock);
2029
2030 out_putf:
2031 fdput(f);
2032 out:
2033 return error;
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044int vfs_test_lock(struct file *filp, struct file_lock *fl)
2045{
2046 if (filp->f_op->lock)
2047 return filp->f_op->lock(filp, F_GETLK, fl);
2048 posix_test_lock(filp, fl);
2049 return 0;
2050}
2051EXPORT_SYMBOL_GPL(vfs_test_lock);
2052
2053
2054
2055
2056
2057
2058
2059
2060static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2061{
2062 pid_t vnr;
2063 struct pid *pid;
2064
2065 if (IS_OFDLCK(fl))
2066 return -1;
2067 if (IS_REMOTELCK(fl))
2068 return fl->fl_pid;
2069
2070
2071
2072
2073
2074 if (ns == &init_pid_ns)
2075 return (pid_t)fl->fl_pid;
2076
2077 rcu_read_lock();
2078 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2079 vnr = pid_nr_ns(pid, ns);
2080 rcu_read_unlock();
2081 return vnr;
2082}
2083
2084static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2085{
2086 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2087#if BITS_PER_LONG == 32
2088
2089
2090
2091
2092 if (fl->fl_start > OFFT_OFFSET_MAX)
2093 return -EOVERFLOW;
2094 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2095 return -EOVERFLOW;
2096#endif
2097 flock->l_start = fl->fl_start;
2098 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2099 fl->fl_end - fl->fl_start + 1;
2100 flock->l_whence = 0;
2101 flock->l_type = fl->fl_type;
2102 return 0;
2103}
2104
2105#if BITS_PER_LONG == 32
2106static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2107{
2108 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2109 flock->l_start = fl->fl_start;
2110 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2111 fl->fl_end - fl->fl_start + 1;
2112 flock->l_whence = 0;
2113 flock->l_type = fl->fl_type;
2114}
2115#endif
2116
2117
2118
2119
2120int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
2121{
2122 struct file_lock *fl;
2123 int error;
2124
2125 fl = locks_alloc_lock();
2126 if (fl == NULL)
2127 return -ENOMEM;
2128 error = -EINVAL;
2129 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2130 goto out;
2131
2132 error = flock_to_posix_lock(filp, fl, flock);
2133 if (error)
2134 goto out;
2135
2136 if (cmd == F_OFD_GETLK) {
2137 error = -EINVAL;
2138 if (flock->l_pid != 0)
2139 goto out;
2140
2141 cmd = F_GETLK;
2142 fl->fl_flags |= FL_OFDLCK;
2143 fl->fl_owner = filp;
2144 }
2145
2146 error = vfs_test_lock(filp, fl);
2147 if (error)
2148 goto out;
2149
2150 flock->l_type = fl->fl_type;
2151 if (fl->fl_type != F_UNLCK) {
2152 error = posix_lock_to_flock(flock, fl);
2153 if (error)
2154 goto out;
2155 }
2156out:
2157 locks_free_lock(fl);
2158 return error;
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2195{
2196 if (filp->f_op->lock)
2197 return filp->f_op->lock(filp, cmd, fl);
2198 else
2199 return posix_lock_file(filp, fl, conf);
2200}
2201EXPORT_SYMBOL_GPL(vfs_lock_file);
2202
2203static int do_lock_file_wait(struct file *filp, unsigned int cmd,
2204 struct file_lock *fl)
2205{
2206 int error;
2207
2208 error = security_file_lock(filp, fl->fl_type);
2209 if (error)
2210 return error;
2211
2212 for (;;) {
2213 error = vfs_lock_file(filp, cmd, fl, NULL);
2214 if (error != FILE_LOCK_DEFERRED)
2215 break;
2216 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
2217 if (!error)
2218 continue;
2219
2220 locks_delete_block(fl);
2221 break;
2222 }
2223
2224 return error;
2225}
2226
2227
2228static int
2229check_fmode_for_setlk(struct file_lock *fl)
2230{
2231 switch (fl->fl_type) {
2232 case F_RDLCK:
2233 if (!(fl->fl_file->f_mode & FMODE_READ))
2234 return -EBADF;
2235 break;
2236 case F_WRLCK:
2237 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2238 return -EBADF;
2239 }
2240 return 0;
2241}
2242
2243
2244
2245
2246int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
2247 struct flock *flock)
2248{
2249 struct file_lock *file_lock = locks_alloc_lock();
2250 struct inode *inode = locks_inode(filp);
2251 struct file *f;
2252 int error;
2253
2254 if (file_lock == NULL)
2255 return -ENOLCK;
2256
2257
2258
2259
2260 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2261 error = -EAGAIN;
2262 goto out;
2263 }
2264
2265 error = flock_to_posix_lock(filp, file_lock, flock);
2266 if (error)
2267 goto out;
2268
2269 error = check_fmode_for_setlk(file_lock);
2270 if (error)
2271 goto out;
2272
2273
2274
2275
2276
2277 switch (cmd) {
2278 case F_OFD_SETLK:
2279 error = -EINVAL;
2280 if (flock->l_pid != 0)
2281 goto out;
2282
2283 cmd = F_SETLK;
2284 file_lock->fl_flags |= FL_OFDLCK;
2285 file_lock->fl_owner = filp;
2286 break;
2287 case F_OFD_SETLKW:
2288 error = -EINVAL;
2289 if (flock->l_pid != 0)
2290 goto out;
2291
2292 cmd = F_SETLKW;
2293 file_lock->fl_flags |= FL_OFDLCK;
2294 file_lock->fl_owner = filp;
2295
2296 case F_SETLKW:
2297 file_lock->fl_flags |= FL_SLEEP;
2298 }
2299
2300 error = do_lock_file_wait(filp, cmd, file_lock);
2301
2302
2303
2304
2305
2306
2307 if (!error && file_lock->fl_type != F_UNLCK &&
2308 !(file_lock->fl_flags & FL_OFDLCK)) {
2309
2310
2311
2312
2313
2314 spin_lock(¤t->files->file_lock);
2315 f = fcheck(fd);
2316 spin_unlock(¤t->files->file_lock);
2317 if (f != filp) {
2318 file_lock->fl_type = F_UNLCK;
2319 error = do_lock_file_wait(filp, cmd, file_lock);
2320 WARN_ON_ONCE(error);
2321 error = -EBADF;
2322 }
2323 }
2324out:
2325 trace_fcntl_setlk(inode, file_lock, error);
2326 locks_free_lock(file_lock);
2327 return error;
2328}
2329
2330#if BITS_PER_LONG == 32
2331
2332
2333
2334int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
2335{
2336 struct file_lock *fl;
2337 int error;
2338
2339 fl = locks_alloc_lock();
2340 if (fl == NULL)
2341 return -ENOMEM;
2342
2343 error = -EINVAL;
2344 if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
2345 goto out;
2346
2347 error = flock64_to_posix_lock(filp, fl, flock);
2348 if (error)
2349 goto out;
2350
2351 if (cmd == F_OFD_GETLK) {
2352 error = -EINVAL;
2353 if (flock->l_pid != 0)
2354 goto out;
2355
2356 cmd = F_GETLK64;
2357 fl->fl_flags |= FL_OFDLCK;
2358 fl->fl_owner = filp;
2359 }
2360
2361 error = vfs_test_lock(filp, fl);
2362 if (error)
2363 goto out;
2364
2365 flock->l_type = fl->fl_type;
2366 if (fl->fl_type != F_UNLCK)
2367 posix_lock_to_flock64(flock, fl);
2368
2369out:
2370 locks_free_lock(fl);
2371 return error;
2372}
2373
2374
2375
2376
2377int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
2378 struct flock64 *flock)
2379{
2380 struct file_lock *file_lock = locks_alloc_lock();
2381 struct inode *inode = locks_inode(filp);
2382 struct file *f;
2383 int error;
2384
2385 if (file_lock == NULL)
2386 return -ENOLCK;
2387
2388
2389
2390
2391 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
2392 error = -EAGAIN;
2393 goto out;
2394 }
2395
2396 error = flock64_to_posix_lock(filp, file_lock, flock);
2397 if (error)
2398 goto out;
2399
2400 error = check_fmode_for_setlk(file_lock);
2401 if (error)
2402 goto out;
2403
2404
2405
2406
2407
2408 switch (cmd) {
2409 case F_OFD_SETLK:
2410 error = -EINVAL;
2411 if (flock->l_pid != 0)
2412 goto out;
2413
2414 cmd = F_SETLK64;
2415 file_lock->fl_flags |= FL_OFDLCK;
2416 file_lock->fl_owner = filp;
2417 break;
2418 case F_OFD_SETLKW:
2419 error = -EINVAL;
2420 if (flock->l_pid != 0)
2421 goto out;
2422
2423 cmd = F_SETLKW64;
2424 file_lock->fl_flags |= FL_OFDLCK;
2425 file_lock->fl_owner = filp;
2426
2427 case F_SETLKW64:
2428 file_lock->fl_flags |= FL_SLEEP;
2429 }
2430
2431 error = do_lock_file_wait(filp, cmd, file_lock);
2432
2433
2434
2435
2436
2437
2438 if (!error && file_lock->fl_type != F_UNLCK &&
2439 !(file_lock->fl_flags & FL_OFDLCK)) {
2440
2441
2442
2443
2444
2445 spin_lock(¤t->files->file_lock);
2446 f = fcheck(fd);
2447 spin_unlock(¤t->files->file_lock);
2448 if (f != filp) {
2449 file_lock->fl_type = F_UNLCK;
2450 error = do_lock_file_wait(filp, cmd, file_lock);
2451 WARN_ON_ONCE(error);
2452 error = -EBADF;
2453 }
2454 }
2455out:
2456 locks_free_lock(file_lock);
2457 return error;
2458}
2459#endif
2460
2461
2462
2463
2464
2465
2466void locks_remove_posix(struct file *filp, fl_owner_t owner)
2467{
2468 int error;
2469 struct inode *inode = locks_inode(filp);
2470 struct file_lock lock;
2471 struct file_lock_context *ctx;
2472
2473
2474
2475
2476
2477
2478 ctx = smp_load_acquire(&inode->i_flctx);
2479 if (!ctx || list_empty(&ctx->flc_posix))
2480 return;
2481
2482 lock.fl_type = F_UNLCK;
2483 lock.fl_flags = FL_POSIX | FL_CLOSE;
2484 lock.fl_start = 0;
2485 lock.fl_end = OFFSET_MAX;
2486 lock.fl_owner = owner;
2487 lock.fl_pid = current->tgid;
2488 lock.fl_file = filp;
2489 lock.fl_ops = NULL;
2490 lock.fl_lmops = NULL;
2491
2492 error = vfs_lock_file(filp, F_SETLK, &lock, NULL);
2493
2494 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2495 lock.fl_ops->fl_release_private(&lock);
2496 trace_locks_remove_posix(inode, &lock, error);
2497}
2498
2499EXPORT_SYMBOL(locks_remove_posix);
2500
2501
2502static void
2503locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
2504{
2505 struct file_lock fl = {
2506 .fl_owner = filp,
2507 .fl_pid = current->tgid,
2508 .fl_file = filp,
2509 .fl_flags = FL_FLOCK | FL_CLOSE,
2510 .fl_type = F_UNLCK,
2511 .fl_end = OFFSET_MAX,
2512 };
2513 struct inode *inode = locks_inode(filp);
2514
2515 if (list_empty(&flctx->flc_flock))
2516 return;
2517
2518 if (filp->f_op->flock)
2519 filp->f_op->flock(filp, F_SETLKW, &fl);
2520 else
2521 flock_lock_inode(inode, &fl);
2522
2523 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2524 fl.fl_ops->fl_release_private(&fl);
2525}
2526
2527
2528static void
2529locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
2530{
2531 struct file_lock *fl, *tmp;
2532 LIST_HEAD(dispose);
2533
2534 if (list_empty(&ctx->flc_lease))
2535 return;
2536
2537 percpu_down_read_preempt_disable(&file_rwsem);
2538 spin_lock(&ctx->flc_lock);
2539 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2540 if (filp == fl->fl_file)
2541 lease_modify(fl, F_UNLCK, &dispose);
2542 spin_unlock(&ctx->flc_lock);
2543 percpu_up_read_preempt_enable(&file_rwsem);
2544
2545 locks_dispose_list(&dispose);
2546}
2547
2548
2549
2550
2551void locks_remove_file(struct file *filp)
2552{
2553 struct file_lock_context *ctx;
2554
2555 ctx = smp_load_acquire(&locks_inode(filp)->i_flctx);
2556 if (!ctx)
2557 return;
2558
2559
2560 locks_remove_posix(filp, filp);
2561
2562
2563 locks_remove_flock(filp, ctx);
2564
2565
2566 locks_remove_lease(filp, ctx);
2567
2568 spin_lock(&ctx->flc_lock);
2569 locks_check_ctx_file_list(filp, &ctx->flc_posix, "POSIX");
2570 locks_check_ctx_file_list(filp, &ctx->flc_flock, "FLOCK");
2571 locks_check_ctx_file_list(filp, &ctx->flc_lease, "LEASE");
2572 spin_unlock(&ctx->flc_lock);
2573}
2574
2575
2576
2577
2578
2579
2580
2581int
2582posix_unblock_lock(struct file_lock *waiter)
2583{
2584 int status = 0;
2585
2586 spin_lock(&blocked_lock_lock);
2587 if (waiter->fl_next)
2588 __locks_delete_block(waiter);
2589 else
2590 status = -ENOENT;
2591 spin_unlock(&blocked_lock_lock);
2592 return status;
2593}
2594EXPORT_SYMBOL(posix_unblock_lock);
2595
2596
2597
2598
2599
2600
2601
2602
2603int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2604{
2605 if (filp->f_op->lock)
2606 return filp->f_op->lock(filp, F_CANCELLK, fl);
2607 return 0;
2608}
2609
2610EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2611
2612#ifdef CONFIG_PROC_FS
2613#include <linux/proc_fs.h>
2614#include <linux/seq_file.h>
2615
2616struct locks_iterator {
2617 int li_cpu;
2618 loff_t li_pos;
2619};
2620
2621static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2622 loff_t id, char *pfx)
2623{
2624 struct inode *inode = NULL;
2625 unsigned int fl_pid;
2626 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2627
2628 fl_pid = locks_translate_pid(fl, proc_pidns);
2629
2630
2631
2632
2633
2634
2635 if (fl->fl_file != NULL)
2636 inode = locks_inode(fl->fl_file);
2637
2638 seq_printf(f, "%lld:%s ", id, pfx);
2639 if (IS_POSIX(fl)) {
2640 if (fl->fl_flags & FL_ACCESS)
2641 seq_puts(f, "ACCESS");
2642 else if (IS_OFDLCK(fl))
2643 seq_puts(f, "OFDLCK");
2644 else
2645 seq_puts(f, "POSIX ");
2646
2647 seq_printf(f, " %s ",
2648 (inode == NULL) ? "*NOINODE*" :
2649 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2650 } else if (IS_FLOCK(fl)) {
2651 if (fl->fl_type & LOCK_MAND) {
2652 seq_puts(f, "FLOCK MSNFS ");
2653 } else {
2654 seq_puts(f, "FLOCK ADVISORY ");
2655 }
2656 } else if (IS_LEASE(fl)) {
2657 if (fl->fl_flags & FL_DELEG)
2658 seq_puts(f, "DELEG ");
2659 else
2660 seq_puts(f, "LEASE ");
2661
2662 if (lease_breaking(fl))
2663 seq_puts(f, "BREAKING ");
2664 else if (fl->fl_file)
2665 seq_puts(f, "ACTIVE ");
2666 else
2667 seq_puts(f, "BREAKER ");
2668 } else {
2669 seq_puts(f, "UNKNOWN UNKNOWN ");
2670 }
2671 if (fl->fl_type & LOCK_MAND) {
2672 seq_printf(f, "%s ",
2673 (fl->fl_type & LOCK_READ)
2674 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2675 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2676 } else {
2677 seq_printf(f, "%s ",
2678 (lease_breaking(fl))
2679 ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
2680 : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
2681 }
2682 if (inode) {
2683
2684 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2685 MAJOR(inode->i_sb->s_dev),
2686 MINOR(inode->i_sb->s_dev), inode->i_ino);
2687 } else {
2688 seq_printf(f, "%d <none>:0 ", fl_pid);
2689 }
2690 if (IS_POSIX(fl)) {
2691 if (fl->fl_end == OFFSET_MAX)
2692 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2693 else
2694 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2695 } else {
2696 seq_puts(f, "0 EOF\n");
2697 }
2698}
2699
2700static int locks_show(struct seq_file *f, void *v)
2701{
2702 struct locks_iterator *iter = f->private;
2703 struct file_lock *fl, *bfl;
2704 struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
2705
2706 fl = hlist_entry(v, struct file_lock, fl_link);
2707
2708 if (locks_translate_pid(fl, proc_pidns) == 0)
2709 return 0;
2710
2711 lock_get_status(f, fl, iter->li_pos, "");
2712
2713 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2714 lock_get_status(f, bfl, iter->li_pos, " ->");
2715
2716 return 0;
2717}
2718
2719static void __show_fd_locks(struct seq_file *f,
2720 struct list_head *head, int *id,
2721 struct file *filp, struct files_struct *files)
2722{
2723 struct file_lock *fl;
2724
2725 list_for_each_entry(fl, head, fl_list) {
2726
2727 if (filp != fl->fl_file)
2728 continue;
2729 if (fl->fl_owner != files &&
2730 fl->fl_owner != filp)
2731 continue;
2732
2733 (*id)++;
2734 seq_puts(f, "lock:\t");
2735 lock_get_status(f, fl, *id, "");
2736 }
2737}
2738
2739void show_fd_locks(struct seq_file *f,
2740 struct file *filp, struct files_struct *files)
2741{
2742 struct inode *inode = locks_inode(filp);
2743 struct file_lock_context *ctx;
2744 int id = 0;
2745
2746 ctx = smp_load_acquire(&inode->i_flctx);
2747 if (!ctx)
2748 return;
2749
2750 spin_lock(&ctx->flc_lock);
2751 __show_fd_locks(f, &ctx->flc_flock, &id, filp, files);
2752 __show_fd_locks(f, &ctx->flc_posix, &id, filp, files);
2753 __show_fd_locks(f, &ctx->flc_lease, &id, filp, files);
2754 spin_unlock(&ctx->flc_lock);
2755}
2756
2757static void *locks_start(struct seq_file *f, loff_t *pos)
2758 __acquires(&blocked_lock_lock)
2759{
2760 struct locks_iterator *iter = f->private;
2761
2762 iter->li_pos = *pos + 1;
2763 percpu_down_write(&file_rwsem);
2764 spin_lock(&blocked_lock_lock);
2765 return seq_hlist_start_percpu(&file_lock_list.hlist, &iter->li_cpu, *pos);
2766}
2767
2768static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2769{
2770 struct locks_iterator *iter = f->private;
2771
2772 ++iter->li_pos;
2773 return seq_hlist_next_percpu(v, &file_lock_list.hlist, &iter->li_cpu, pos);
2774}
2775
2776static void locks_stop(struct seq_file *f, void *v)
2777 __releases(&blocked_lock_lock)
2778{
2779 spin_unlock(&blocked_lock_lock);
2780 percpu_up_write(&file_rwsem);
2781}
2782
2783static const struct seq_operations locks_seq_operations = {
2784 .start = locks_start,
2785 .next = locks_next,
2786 .stop = locks_stop,
2787 .show = locks_show,
2788};
2789
2790static int __init proc_locks_init(void)
2791{
2792 proc_create_seq_private("locks", 0, NULL, &locks_seq_operations,
2793 sizeof(struct locks_iterator), NULL);
2794 return 0;
2795}
2796fs_initcall(proc_locks_init);
2797#endif
2798
2799static int __init filelock_init(void)
2800{
2801 int i;
2802
2803 flctx_cache = kmem_cache_create("file_lock_ctx",
2804 sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
2805
2806 filelock_cache = kmem_cache_create("file_lock_cache",
2807 sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
2808
2809
2810 for_each_possible_cpu(i) {
2811 struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
2812
2813 spin_lock_init(&fll->lock);
2814 INIT_HLIST_HEAD(&fll->hlist);
2815 }
2816
2817 return 0;
2818}
2819
2820core_initcall(filelock_init);
2821