1
2#include <linux/wait.h>
3#include <linux/backing-dev.h>
4#include <linux/kthread.h>
5#include <linux/freezer.h>
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/mm.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11#include <linux/writeback.h>
12#include <linux/device.h>
13#include <trace/events/writeback.h>
14
15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16
17struct backing_dev_info default_backing_dev_info = {
18 .name = "default",
19 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
20 .state = 0,
21 .capabilities = BDI_CAP_MAP_COPY,
22};
23EXPORT_SYMBOL_GPL(default_backing_dev_info);
24
25struct backing_dev_info noop_backing_dev_info = {
26 .name = "noop",
27 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
28};
29EXPORT_SYMBOL_GPL(noop_backing_dev_info);
30
31static struct class *bdi_class;
32
33
34
35
36
37
38DEFINE_SPINLOCK(bdi_lock);
39LIST_HEAD(bdi_list);
40LIST_HEAD(bdi_pending_list);
41
42static struct task_struct *sync_supers_tsk;
43static struct timer_list sync_supers_timer;
44
45static int bdi_sync_supers(void *);
46static void sync_supers_timer_fn(unsigned long);
47
48void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
49{
50 if (wb1 < wb2) {
51 spin_lock(&wb1->list_lock);
52 spin_lock_nested(&wb2->list_lock, 1);
53 } else {
54 spin_lock(&wb2->list_lock);
55 spin_lock_nested(&wb1->list_lock, 1);
56 }
57}
58
59#ifdef CONFIG_DEBUG_FS
60#include <linux/debugfs.h>
61#include <linux/seq_file.h>
62
63static struct dentry *bdi_debug_root;
64
65static void bdi_debug_init(void)
66{
67 bdi_debug_root = debugfs_create_dir("bdi", NULL);
68}
69
70static int bdi_debug_stats_show(struct seq_file *m, void *v)
71{
72 struct backing_dev_info *bdi = m->private;
73 struct bdi_writeback *wb = &bdi->wb;
74 unsigned long background_thresh;
75 unsigned long dirty_thresh;
76 unsigned long bdi_thresh;
77 unsigned long nr_dirty, nr_io, nr_more_io;
78 struct inode *inode;
79
80 nr_dirty = nr_io = nr_more_io = 0;
81 spin_lock(&wb->list_lock);
82 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
83 nr_dirty++;
84 list_for_each_entry(inode, &wb->b_io, i_wb_list)
85 nr_io++;
86 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
87 nr_more_io++;
88 spin_unlock(&wb->list_lock);
89
90 global_dirty_limits(&background_thresh, &dirty_thresh);
91 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
92
93#define K(x) ((x) << (PAGE_SHIFT - 10))
94 seq_printf(m,
95 "BdiWriteback: %10lu kB\n"
96 "BdiReclaimable: %10lu kB\n"
97 "BdiDirtyThresh: %10lu kB\n"
98 "DirtyThresh: %10lu kB\n"
99 "BackgroundThresh: %10lu kB\n"
100 "BdiDirtied: %10lu kB\n"
101 "BdiWritten: %10lu kB\n"
102 "BdiWriteBandwidth: %10lu kBps\n"
103 "b_dirty: %10lu\n"
104 "b_io: %10lu\n"
105 "b_more_io: %10lu\n"
106 "bdi_list: %10u\n"
107 "state: %10lx\n",
108 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
109 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
110 K(bdi_thresh),
111 K(dirty_thresh),
112 K(background_thresh),
113 (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
114 (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
115 (unsigned long) K(bdi->write_bandwidth),
116 nr_dirty,
117 nr_io,
118 nr_more_io,
119 !list_empty(&bdi->bdi_list), bdi->state);
120#undef K
121
122 return 0;
123}
124
125static int bdi_debug_stats_open(struct inode *inode, struct file *file)
126{
127 return single_open(file, bdi_debug_stats_show, inode->i_private);
128}
129
130static const struct file_operations bdi_debug_stats_fops = {
131 .open = bdi_debug_stats_open,
132 .read = seq_read,
133 .llseek = seq_lseek,
134 .release = single_release,
135};
136
137static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
138{
139 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
140 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
141 bdi, &bdi_debug_stats_fops);
142}
143
144static void bdi_debug_unregister(struct backing_dev_info *bdi)
145{
146 debugfs_remove(bdi->debug_stats);
147 debugfs_remove(bdi->debug_dir);
148}
149#else
150static inline void bdi_debug_init(void)
151{
152}
153static inline void bdi_debug_register(struct backing_dev_info *bdi,
154 const char *name)
155{
156}
157static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
158{
159}
160#endif
161
162static ssize_t read_ahead_kb_store(struct device *dev,
163 struct device_attribute *attr,
164 const char *buf, size_t count)
165{
166 struct backing_dev_info *bdi = dev_get_drvdata(dev);
167 char *end;
168 unsigned long read_ahead_kb;
169 ssize_t ret = -EINVAL;
170
171 read_ahead_kb = simple_strtoul(buf, &end, 10);
172 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
173 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
174 ret = count;
175 }
176 return ret;
177}
178
179#define K(pages) ((pages) << (PAGE_SHIFT - 10))
180
181#define BDI_SHOW(name, expr) \
182static ssize_t name##_show(struct device *dev, \
183 struct device_attribute *attr, char *page) \
184{ \
185 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
186 \
187 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
188}
189
190BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
191
192static ssize_t min_ratio_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t count)
194{
195 struct backing_dev_info *bdi = dev_get_drvdata(dev);
196 char *end;
197 unsigned int ratio;
198 ssize_t ret = -EINVAL;
199
200 ratio = simple_strtoul(buf, &end, 10);
201 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
202 ret = bdi_set_min_ratio(bdi, ratio);
203 if (!ret)
204 ret = count;
205 }
206 return ret;
207}
208BDI_SHOW(min_ratio, bdi->min_ratio)
209
210static ssize_t max_ratio_store(struct device *dev,
211 struct device_attribute *attr, const char *buf, size_t count)
212{
213 struct backing_dev_info *bdi = dev_get_drvdata(dev);
214 char *end;
215 unsigned int ratio;
216 ssize_t ret = -EINVAL;
217
218 ratio = simple_strtoul(buf, &end, 10);
219 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
220 ret = bdi_set_max_ratio(bdi, ratio);
221 if (!ret)
222 ret = count;
223 }
224 return ret;
225}
226BDI_SHOW(max_ratio, bdi->max_ratio)
227
228#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
229
230static struct device_attribute bdi_dev_attrs[] = {
231 __ATTR_RW(read_ahead_kb),
232 __ATTR_RW(min_ratio),
233 __ATTR_RW(max_ratio),
234 __ATTR_NULL,
235};
236
237static __init int bdi_class_init(void)
238{
239 bdi_class = class_create(THIS_MODULE, "bdi");
240 if (IS_ERR(bdi_class))
241 return PTR_ERR(bdi_class);
242
243 bdi_class->dev_attrs = bdi_dev_attrs;
244 bdi_debug_init();
245 return 0;
246}
247postcore_initcall(bdi_class_init);
248
249static int __init default_bdi_init(void)
250{
251 int err;
252
253 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
254 BUG_ON(IS_ERR(sync_supers_tsk));
255
256 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
257 bdi_arm_supers_timer();
258
259 err = bdi_init(&default_backing_dev_info);
260 if (!err)
261 bdi_register(&default_backing_dev_info, NULL, "default");
262 err = bdi_init(&noop_backing_dev_info);
263
264 return err;
265}
266subsys_initcall(default_bdi_init);
267
268int bdi_has_dirty_io(struct backing_dev_info *bdi)
269{
270 return wb_has_dirty_io(&bdi->wb);
271}
272
273
274
275
276
277
278
279static int bdi_sync_supers(void *unused)
280{
281 set_user_nice(current, 0);
282
283 while (!kthread_should_stop()) {
284 set_current_state(TASK_INTERRUPTIBLE);
285 schedule();
286
287
288
289
290 sync_supers();
291 }
292
293 return 0;
294}
295
296void bdi_arm_supers_timer(void)
297{
298 unsigned long next;
299
300 if (!dirty_writeback_interval)
301 return;
302
303 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
304 mod_timer(&sync_supers_timer, round_jiffies_up(next));
305}
306
307static void sync_supers_timer_fn(unsigned long unused)
308{
309 wake_up_process(sync_supers_tsk);
310 bdi_arm_supers_timer();
311}
312
313static void wakeup_timer_fn(unsigned long data)
314{
315 struct backing_dev_info *bdi = (struct backing_dev_info *)data;
316
317 spin_lock_bh(&bdi->wb_lock);
318 if (bdi->wb.task) {
319 trace_writeback_wake_thread(bdi);
320 wake_up_process(bdi->wb.task);
321 } else if (bdi->dev) {
322
323
324
325
326
327 trace_writeback_wake_forker_thread(bdi);
328 wake_up_process(default_backing_dev_info.wb.task);
329 }
330 spin_unlock_bh(&bdi->wb_lock);
331}
332
333
334
335
336
337
338
339
340
341
342
343
344void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
345{
346 unsigned long timeout;
347
348 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
349 mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
350}
351
352
353
354
355
356static unsigned long bdi_longest_inactive(void)
357{
358 unsigned long interval;
359
360 interval = msecs_to_jiffies(dirty_writeback_interval * 10);
361 return max(5UL * 60 * HZ, interval);
362}
363
364
365
366
367
368static void bdi_clear_pending(struct backing_dev_info *bdi)
369{
370 clear_bit(BDI_pending, &bdi->state);
371 smp_mb__after_clear_bit();
372 wake_up_bit(&bdi->state, BDI_pending);
373}
374
375static int bdi_forker_thread(void *ptr)
376{
377 struct bdi_writeback *me = ptr;
378
379 current->flags |= PF_SWAPWRITE;
380 set_freezable();
381
382
383
384
385 set_user_nice(current, 0);
386
387 for (;;) {
388 struct task_struct *task = NULL;
389 struct backing_dev_info *bdi;
390 enum {
391 NO_ACTION,
392 FORK_THREAD,
393 KILL_THREAD,
394 } action = NO_ACTION;
395
396
397
398
399
400 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
401 del_timer(&me->wakeup_timer);
402 wb_do_writeback(me, 0);
403 }
404
405 spin_lock_bh(&bdi_lock);
406
407
408
409
410
411
412 set_current_state(TASK_INTERRUPTIBLE);
413
414 list_for_each_entry(bdi, &bdi_list, bdi_list) {
415 bool have_dirty_io;
416
417 if (!bdi_cap_writeback_dirty(bdi) ||
418 bdi_cap_flush_forker(bdi))
419 continue;
420
421 WARN(!test_bit(BDI_registered, &bdi->state),
422 "bdi %p/%s is not registered!\n", bdi, bdi->name);
423
424 have_dirty_io = !list_empty(&bdi->work_list) ||
425 wb_has_dirty_io(&bdi->wb);
426
427
428
429
430
431 if (!bdi->wb.task && have_dirty_io) {
432
433
434
435
436 set_bit(BDI_pending, &bdi->state);
437 action = FORK_THREAD;
438 break;
439 }
440
441 spin_lock(&bdi->wb_lock);
442
443
444
445
446
447
448
449 if (bdi->wb.task && !have_dirty_io &&
450 time_after(jiffies, bdi->wb.last_active +
451 bdi_longest_inactive())) {
452 task = bdi->wb.task;
453 bdi->wb.task = NULL;
454 spin_unlock(&bdi->wb_lock);
455 set_bit(BDI_pending, &bdi->state);
456 action = KILL_THREAD;
457 break;
458 }
459 spin_unlock(&bdi->wb_lock);
460 }
461 spin_unlock_bh(&bdi_lock);
462
463
464 if (!list_empty(&me->bdi->work_list))
465 __set_current_state(TASK_RUNNING);
466
467 switch (action) {
468 case FORK_THREAD:
469 __set_current_state(TASK_RUNNING);
470 task = kthread_create(bdi_writeback_thread, &bdi->wb,
471 "flush-%s", dev_name(bdi->dev));
472 if (IS_ERR(task)) {
473
474
475
476
477
478 writeback_inodes_wb(&bdi->wb, 1024,
479 WB_REASON_FORKER_THREAD);
480 } else {
481
482
483
484
485
486
487 spin_lock_bh(&bdi->wb_lock);
488 bdi->wb.task = task;
489 spin_unlock_bh(&bdi->wb_lock);
490 wake_up_process(task);
491 }
492 bdi_clear_pending(bdi);
493 break;
494
495 case KILL_THREAD:
496 __set_current_state(TASK_RUNNING);
497 kthread_stop(task);
498 bdi_clear_pending(bdi);
499 break;
500
501 case NO_ACTION:
502 if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
503
504
505
506
507
508
509
510 schedule_timeout(bdi_longest_inactive());
511 else
512 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
513 try_to_freeze();
514 break;
515 }
516 }
517
518 return 0;
519}
520
521
522
523
524static void bdi_remove_from_list(struct backing_dev_info *bdi)
525{
526 spin_lock_bh(&bdi_lock);
527 list_del_rcu(&bdi->bdi_list);
528 spin_unlock_bh(&bdi_lock);
529
530 synchronize_rcu_expedited();
531}
532
533int bdi_register(struct backing_dev_info *bdi, struct device *parent,
534 const char *fmt, ...)
535{
536 va_list args;
537 struct device *dev;
538
539 if (bdi->dev)
540 return 0;
541
542 va_start(args, fmt);
543 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
544 va_end(args);
545 if (IS_ERR(dev))
546 return PTR_ERR(dev);
547
548 bdi->dev = dev;
549
550
551
552
553
554
555 if (bdi_cap_flush_forker(bdi)) {
556 struct bdi_writeback *wb = &bdi->wb;
557
558 wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
559 dev_name(dev));
560 if (IS_ERR(wb->task))
561 return PTR_ERR(wb->task);
562 }
563
564 bdi_debug_register(bdi, dev_name(dev));
565 set_bit(BDI_registered, &bdi->state);
566
567 spin_lock_bh(&bdi_lock);
568 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
569 spin_unlock_bh(&bdi_lock);
570
571 trace_writeback_bdi_register(bdi);
572 return 0;
573}
574EXPORT_SYMBOL(bdi_register);
575
576int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
577{
578 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
579}
580EXPORT_SYMBOL(bdi_register_dev);
581
582
583
584
585static void bdi_wb_shutdown(struct backing_dev_info *bdi)
586{
587 struct task_struct *task;
588
589 if (!bdi_cap_writeback_dirty(bdi))
590 return;
591
592
593
594
595 bdi_remove_from_list(bdi);
596
597
598
599
600 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
601 TASK_UNINTERRUPTIBLE);
602
603
604
605
606
607 spin_lock_bh(&bdi->wb_lock);
608 task = bdi->wb.task;
609 bdi->wb.task = NULL;
610 spin_unlock_bh(&bdi->wb_lock);
611
612 if (task)
613 kthread_stop(task);
614}
615
616
617
618
619static void bdi_prune_sb(struct backing_dev_info *bdi)
620{
621 struct super_block *sb;
622
623 spin_lock(&sb_lock);
624 list_for_each_entry(sb, &super_blocks, s_list) {
625 if (sb->s_bdi == bdi)
626 sb->s_bdi = &default_backing_dev_info;
627 }
628 spin_unlock(&sb_lock);
629}
630
631void bdi_unregister(struct backing_dev_info *bdi)
632{
633 struct device *dev = bdi->dev;
634
635 if (dev) {
636 bdi_set_min_ratio(bdi, 0);
637 trace_writeback_bdi_unregister(bdi);
638 bdi_prune_sb(bdi);
639 del_timer_sync(&bdi->wb.wakeup_timer);
640
641 if (!bdi_cap_flush_forker(bdi))
642 bdi_wb_shutdown(bdi);
643 bdi_debug_unregister(bdi);
644
645 spin_lock_bh(&bdi->wb_lock);
646 bdi->dev = NULL;
647 spin_unlock_bh(&bdi->wb_lock);
648
649 device_unregister(dev);
650 }
651}
652EXPORT_SYMBOL(bdi_unregister);
653
654static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
655{
656 memset(wb, 0, sizeof(*wb));
657
658 wb->bdi = bdi;
659 wb->last_old_flush = jiffies;
660 INIT_LIST_HEAD(&wb->b_dirty);
661 INIT_LIST_HEAD(&wb->b_io);
662 INIT_LIST_HEAD(&wb->b_more_io);
663 spin_lock_init(&wb->list_lock);
664 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
665}
666
667
668
669
670#define INIT_BW (100 << (20 - PAGE_SHIFT))
671
672int bdi_init(struct backing_dev_info *bdi)
673{
674 int i, err;
675
676 bdi->dev = NULL;
677
678 bdi->min_ratio = 0;
679 bdi->max_ratio = 100;
680 bdi->max_prop_frac = PROP_FRAC_BASE;
681 spin_lock_init(&bdi->wb_lock);
682 INIT_LIST_HEAD(&bdi->bdi_list);
683 INIT_LIST_HEAD(&bdi->work_list);
684
685 bdi_wb_init(&bdi->wb, bdi);
686
687 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
688 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
689 if (err)
690 goto err;
691 }
692
693 bdi->dirty_exceeded = 0;
694
695 bdi->bw_time_stamp = jiffies;
696 bdi->written_stamp = 0;
697
698 bdi->balanced_dirty_ratelimit = INIT_BW;
699 bdi->dirty_ratelimit = INIT_BW;
700 bdi->write_bandwidth = INIT_BW;
701 bdi->avg_write_bandwidth = INIT_BW;
702
703 err = prop_local_init_percpu(&bdi->completions);
704
705 if (err) {
706err:
707 while (i--)
708 percpu_counter_destroy(&bdi->bdi_stat[i]);
709 }
710
711 return err;
712}
713EXPORT_SYMBOL(bdi_init);
714
715void bdi_destroy(struct backing_dev_info *bdi)
716{
717 int i;
718
719
720
721
722
723 if (bdi_has_dirty_io(bdi)) {
724 struct bdi_writeback *dst = &default_backing_dev_info.wb;
725
726 bdi_lock_two(&bdi->wb, dst);
727 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
728 list_splice(&bdi->wb.b_io, &dst->b_io);
729 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
730 spin_unlock(&bdi->wb.list_lock);
731 spin_unlock(&dst->list_lock);
732 }
733
734 bdi_unregister(bdi);
735
736
737
738
739
740
741
742 del_timer_sync(&bdi->wb.wakeup_timer);
743
744 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
745 percpu_counter_destroy(&bdi->bdi_stat[i]);
746
747 prop_local_destroy_percpu(&bdi->completions);
748}
749EXPORT_SYMBOL(bdi_destroy);
750
751
752
753
754
755int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
756 unsigned int cap)
757{
758 char tmp[32];
759 int err;
760
761 bdi->name = name;
762 bdi->capabilities = cap;
763 err = bdi_init(bdi);
764 if (err)
765 return err;
766
767 sprintf(tmp, "%.28s%s", name, "-%d");
768 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
769 if (err) {
770 bdi_destroy(bdi);
771 return err;
772 }
773
774 return 0;
775}
776EXPORT_SYMBOL(bdi_setup_and_register);
777
778static wait_queue_head_t congestion_wqh[2] = {
779 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
780 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
781 };
782static atomic_t nr_bdi_congested[2];
783
784void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
785{
786 enum bdi_state bit;
787 wait_queue_head_t *wqh = &congestion_wqh[sync];
788
789 bit = sync ? BDI_sync_congested : BDI_async_congested;
790 if (test_and_clear_bit(bit, &bdi->state))
791 atomic_dec(&nr_bdi_congested[sync]);
792 smp_mb__after_clear_bit();
793 if (waitqueue_active(wqh))
794 wake_up(wqh);
795}
796EXPORT_SYMBOL(clear_bdi_congested);
797
798void set_bdi_congested(struct backing_dev_info *bdi, int sync)
799{
800 enum bdi_state bit;
801
802 bit = sync ? BDI_sync_congested : BDI_async_congested;
803 if (!test_and_set_bit(bit, &bdi->state))
804 atomic_inc(&nr_bdi_congested[sync]);
805}
806EXPORT_SYMBOL(set_bdi_congested);
807
808
809
810
811
812
813
814
815
816
817long congestion_wait(int sync, long timeout)
818{
819 long ret;
820 unsigned long start = jiffies;
821 DEFINE_WAIT(wait);
822 wait_queue_head_t *wqh = &congestion_wqh[sync];
823
824 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
825 ret = io_schedule_timeout(timeout);
826 finish_wait(wqh, &wait);
827
828 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
829 jiffies_to_usecs(jiffies - start));
830
831 return ret;
832}
833EXPORT_SYMBOL(congestion_wait);
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853long wait_iff_congested(struct zone *zone, int sync, long timeout)
854{
855 long ret;
856 unsigned long start = jiffies;
857 DEFINE_WAIT(wait);
858 wait_queue_head_t *wqh = &congestion_wqh[sync];
859
860
861
862
863
864
865 if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
866 !zone_is_reclaim_congested(zone)) {
867 cond_resched();
868
869
870 ret = timeout - (jiffies - start);
871 if (ret < 0)
872 ret = 0;
873
874 goto out;
875 }
876
877
878 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
879 ret = io_schedule_timeout(timeout);
880 finish_wait(wqh, &wait);
881
882out:
883 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
884 jiffies_to_usecs(jiffies - start));
885
886 return ret;
887}
888EXPORT_SYMBOL(wait_iff_congested);
889