1
2
3
4
5
6
7
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/prefetch.h>
13#include <linux/kthread.h>
14#include <linux/swap.h>
15#include <linux/timer.h>
16#include <linux/freezer.h>
17#include <linux/sched/signal.h>
18
19#include "f2fs.h"
20#include "segment.h"
21#include "node.h"
22#include "gc.h"
23#include <trace/events/f2fs.h>
24
25#define __reverse_ffz(x) __reverse_ffs(~(x))
26
27static struct kmem_cache *discard_entry_slab;
28static struct kmem_cache *discard_cmd_slab;
29static struct kmem_cache *sit_entry_set_slab;
30static struct kmem_cache *inmem_entry_slab;
31
32static unsigned long __reverse_ulong(unsigned char *str)
33{
34 unsigned long tmp = 0;
35 int shift = 24, idx = 0;
36
37#if BITS_PER_LONG == 64
38 shift = 56;
39#endif
40 while (shift >= 0) {
41 tmp |= (unsigned long)str[idx++] << shift;
42 shift -= BITS_PER_BYTE;
43 }
44 return tmp;
45}
46
47
48
49
50
51static inline unsigned long __reverse_ffs(unsigned long word)
52{
53 int num = 0;
54
55#if BITS_PER_LONG == 64
56 if ((word & 0xffffffff00000000UL) == 0)
57 num += 32;
58 else
59 word >>= 32;
60#endif
61 if ((word & 0xffff0000) == 0)
62 num += 16;
63 else
64 word >>= 16;
65
66 if ((word & 0xff00) == 0)
67 num += 8;
68 else
69 word >>= 8;
70
71 if ((word & 0xf0) == 0)
72 num += 4;
73 else
74 word >>= 4;
75
76 if ((word & 0xc) == 0)
77 num += 2;
78 else
79 word >>= 2;
80
81 if ((word & 0x2) == 0)
82 num += 1;
83 return num;
84}
85
86
87
88
89
90
91
92
93
94
95static unsigned long __find_rev_next_bit(const unsigned long *addr,
96 unsigned long size, unsigned long offset)
97{
98 const unsigned long *p = addr + BIT_WORD(offset);
99 unsigned long result = size;
100 unsigned long tmp;
101
102 if (offset >= size)
103 return size;
104
105 size -= (offset & ~(BITS_PER_LONG - 1));
106 offset %= BITS_PER_LONG;
107
108 while (1) {
109 if (*p == 0)
110 goto pass;
111
112 tmp = __reverse_ulong((unsigned char *)p);
113
114 tmp &= ~0UL >> offset;
115 if (size < BITS_PER_LONG)
116 tmp &= (~0UL << (BITS_PER_LONG - size));
117 if (tmp)
118 goto found;
119pass:
120 if (size <= BITS_PER_LONG)
121 break;
122 size -= BITS_PER_LONG;
123 offset = 0;
124 p++;
125 }
126 return result;
127found:
128 return result - size + __reverse_ffs(tmp);
129}
130
131static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
132 unsigned long size, unsigned long offset)
133{
134 const unsigned long *p = addr + BIT_WORD(offset);
135 unsigned long result = size;
136 unsigned long tmp;
137
138 if (offset >= size)
139 return size;
140
141 size -= (offset & ~(BITS_PER_LONG - 1));
142 offset %= BITS_PER_LONG;
143
144 while (1) {
145 if (*p == ~0UL)
146 goto pass;
147
148 tmp = __reverse_ulong((unsigned char *)p);
149
150 if (offset)
151 tmp |= ~0UL << (BITS_PER_LONG - offset);
152 if (size < BITS_PER_LONG)
153 tmp |= ~0UL >> size;
154 if (tmp != ~0UL)
155 goto found;
156pass:
157 if (size <= BITS_PER_LONG)
158 break;
159 size -= BITS_PER_LONG;
160 offset = 0;
161 p++;
162 }
163 return result;
164found:
165 return result - size + __reverse_ffz(tmp);
166}
167
168bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
169{
170 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
171 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
172 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
173
174 if (f2fs_lfs_mode(sbi))
175 return false;
176 if (sbi->gc_mode == GC_URGENT_HIGH)
177 return true;
178 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
179 return true;
180
181 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
182 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
183}
184
185void f2fs_register_inmem_page(struct inode *inode, struct page *page)
186{
187 struct inmem_pages *new;
188
189 set_page_private_atomic(page);
190
191 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
192
193
194 new->page = page;
195 INIT_LIST_HEAD(&new->list);
196
197
198 get_page(page);
199 mutex_lock(&F2FS_I(inode)->inmem_lock);
200 list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
201 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
202 mutex_unlock(&F2FS_I(inode)->inmem_lock);
203
204 trace_f2fs_register_inmem_page(page, INMEM);
205}
206
207static int __revoke_inmem_pages(struct inode *inode,
208 struct list_head *head, bool drop, bool recover,
209 bool trylock)
210{
211 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
212 struct inmem_pages *cur, *tmp;
213 int err = 0;
214
215 list_for_each_entry_safe(cur, tmp, head, list) {
216 struct page *page = cur->page;
217
218 if (drop)
219 trace_f2fs_commit_inmem_page(page, INMEM_DROP);
220
221 if (trylock) {
222
223
224
225
226 if (!trylock_page(page))
227 continue;
228 } else {
229 lock_page(page);
230 }
231
232 f2fs_wait_on_page_writeback(page, DATA, true, true);
233
234 if (recover) {
235 struct dnode_of_data dn;
236 struct node_info ni;
237
238 trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
239retry:
240 set_new_dnode(&dn, inode, NULL, NULL, 0);
241 err = f2fs_get_dnode_of_data(&dn, page->index,
242 LOOKUP_NODE);
243 if (err) {
244 if (err == -ENOMEM) {
245 congestion_wait(BLK_RW_ASYNC,
246 DEFAULT_IO_TIMEOUT);
247 cond_resched();
248 goto retry;
249 }
250 err = -EAGAIN;
251 goto next;
252 }
253
254 err = f2fs_get_node_info(sbi, dn.nid, &ni);
255 if (err) {
256 f2fs_put_dnode(&dn);
257 return err;
258 }
259
260 if (cur->old_addr == NEW_ADDR) {
261 f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
262 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
263 } else
264 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
265 cur->old_addr, ni.version, true, true);
266 f2fs_put_dnode(&dn);
267 }
268next:
269
270 if (drop || recover) {
271 ClearPageUptodate(page);
272 clear_page_private_gcing(page);
273 }
274 detach_page_private(page);
275 set_page_private(page, 0);
276 f2fs_put_page(page, 1);
277
278 list_del(&cur->list);
279 kmem_cache_free(inmem_entry_slab, cur);
280 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
281 }
282 return err;
283}
284
285void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
286{
287 struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
288 struct inode *inode;
289 struct f2fs_inode_info *fi;
290 unsigned int count = sbi->atomic_files;
291 unsigned int looped = 0;
292next:
293 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
294 if (list_empty(head)) {
295 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
296 return;
297 }
298 fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
299 inode = igrab(&fi->vfs_inode);
300 if (inode)
301 list_move_tail(&fi->inmem_ilist, head);
302 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
303
304 if (inode) {
305 if (gc_failure) {
306 if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
307 goto skip;
308 }
309 set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
310 f2fs_drop_inmem_pages(inode);
311skip:
312 iput(inode);
313 }
314 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
315 cond_resched();
316 if (gc_failure) {
317 if (++looped >= count)
318 return;
319 }
320 goto next;
321}
322
323void f2fs_drop_inmem_pages(struct inode *inode)
324{
325 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
326 struct f2fs_inode_info *fi = F2FS_I(inode);
327
328 do {
329 mutex_lock(&fi->inmem_lock);
330 if (list_empty(&fi->inmem_pages)) {
331 fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
332
333 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
334 if (!list_empty(&fi->inmem_ilist))
335 list_del_init(&fi->inmem_ilist);
336 if (f2fs_is_atomic_file(inode)) {
337 clear_inode_flag(inode, FI_ATOMIC_FILE);
338 sbi->atomic_files--;
339 }
340 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
341
342 mutex_unlock(&fi->inmem_lock);
343 break;
344 }
345 __revoke_inmem_pages(inode, &fi->inmem_pages,
346 true, false, true);
347 mutex_unlock(&fi->inmem_lock);
348 } while (1);
349}
350
351void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
352{
353 struct f2fs_inode_info *fi = F2FS_I(inode);
354 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
355 struct list_head *head = &fi->inmem_pages;
356 struct inmem_pages *cur = NULL;
357
358 f2fs_bug_on(sbi, !page_private_atomic(page));
359
360 mutex_lock(&fi->inmem_lock);
361 list_for_each_entry(cur, head, list) {
362 if (cur->page == page)
363 break;
364 }
365
366 f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
367 list_del(&cur->list);
368 mutex_unlock(&fi->inmem_lock);
369
370 dec_page_count(sbi, F2FS_INMEM_PAGES);
371 kmem_cache_free(inmem_entry_slab, cur);
372
373 ClearPageUptodate(page);
374 clear_page_private_atomic(page);
375 f2fs_put_page(page, 0);
376
377 detach_page_private(page);
378 set_page_private(page, 0);
379
380 trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
381}
382
383static int __f2fs_commit_inmem_pages(struct inode *inode)
384{
385 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
386 struct f2fs_inode_info *fi = F2FS_I(inode);
387 struct inmem_pages *cur, *tmp;
388 struct f2fs_io_info fio = {
389 .sbi = sbi,
390 .ino = inode->i_ino,
391 .type = DATA,
392 .op = REQ_OP_WRITE,
393 .op_flags = REQ_SYNC | REQ_PRIO,
394 .io_type = FS_DATA_IO,
395 };
396 struct list_head revoke_list;
397 bool submit_bio = false;
398 int err = 0;
399
400 INIT_LIST_HEAD(&revoke_list);
401
402 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
403 struct page *page = cur->page;
404
405 lock_page(page);
406 if (page->mapping == inode->i_mapping) {
407 trace_f2fs_commit_inmem_page(page, INMEM);
408
409 f2fs_wait_on_page_writeback(page, DATA, true, true);
410
411 set_page_dirty(page);
412 if (clear_page_dirty_for_io(page)) {
413 inode_dec_dirty_pages(inode);
414 f2fs_remove_dirty_inode(inode);
415 }
416retry:
417 fio.page = page;
418 fio.old_blkaddr = NULL_ADDR;
419 fio.encrypted_page = NULL;
420 fio.need_lock = LOCK_DONE;
421 err = f2fs_do_write_data_page(&fio);
422 if (err) {
423 if (err == -ENOMEM) {
424 congestion_wait(BLK_RW_ASYNC,
425 DEFAULT_IO_TIMEOUT);
426 cond_resched();
427 goto retry;
428 }
429 unlock_page(page);
430 break;
431 }
432
433 cur->old_addr = fio.old_blkaddr;
434 submit_bio = true;
435 }
436 unlock_page(page);
437 list_move_tail(&cur->list, &revoke_list);
438 }
439
440 if (submit_bio)
441 f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
442
443 if (err) {
444
445
446
447
448
449
450
451
452 err = __revoke_inmem_pages(inode, &revoke_list,
453 false, true, false);
454
455
456 __revoke_inmem_pages(inode, &fi->inmem_pages,
457 true, false, false);
458 } else {
459 __revoke_inmem_pages(inode, &revoke_list,
460 false, false, false);
461 }
462
463 return err;
464}
465
466int f2fs_commit_inmem_pages(struct inode *inode)
467{
468 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
469 struct f2fs_inode_info *fi = F2FS_I(inode);
470 int err;
471
472 f2fs_balance_fs(sbi, true);
473
474 down_write(&fi->i_gc_rwsem[WRITE]);
475
476 f2fs_lock_op(sbi);
477 set_inode_flag(inode, FI_ATOMIC_COMMIT);
478
479 mutex_lock(&fi->inmem_lock);
480 err = __f2fs_commit_inmem_pages(inode);
481 mutex_unlock(&fi->inmem_lock);
482
483 clear_inode_flag(inode, FI_ATOMIC_COMMIT);
484
485 f2fs_unlock_op(sbi);
486 up_write(&fi->i_gc_rwsem[WRITE]);
487
488 return err;
489}
490
491
492
493
494
495void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
496{
497 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
498 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
499 f2fs_stop_checkpoint(sbi, false);
500 }
501
502
503 if (need && excess_cached_nats(sbi))
504 f2fs_balance_fs_bg(sbi, false);
505
506 if (!f2fs_is_checkpoint_ready(sbi))
507 return;
508
509
510
511
512
513 if (has_not_enough_free_secs(sbi, 0, 0)) {
514 if (test_opt(sbi, GC_MERGE) && sbi->gc_thread &&
515 sbi->gc_thread->f2fs_gc_task) {
516 DEFINE_WAIT(wait);
517
518 prepare_to_wait(&sbi->gc_thread->fggc_wq, &wait,
519 TASK_UNINTERRUPTIBLE);
520 wake_up(&sbi->gc_thread->gc_wait_queue_head);
521 io_schedule();
522 finish_wait(&sbi->gc_thread->fggc_wq, &wait);
523 } else {
524 down_write(&sbi->gc_lock);
525 f2fs_gc(sbi, false, false, false, NULL_SEGNO);
526 }
527 }
528}
529
530void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
531{
532 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
533 return;
534
535
536 if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
537 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
538
539
540 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
541 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
542
543 if (!f2fs_available_free_memory(sbi, FREE_NIDS))
544 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
545 else
546 f2fs_build_free_nids(sbi, false, false);
547
548 if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
549 excess_prefree_segs(sbi))
550 goto do_sync;
551
552
553 if (is_inflight_io(sbi, REQ_TIME) ||
554 (!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem)))
555 return;
556
557
558 if (f2fs_time_over(sbi, CP_TIME))
559 goto do_sync;
560
561
562 if (f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
563 f2fs_available_free_memory(sbi, INO_ENTRIES))
564 return;
565
566do_sync:
567 if (test_opt(sbi, DATA_FLUSH) && from_bg) {
568 struct blk_plug plug;
569
570 mutex_lock(&sbi->flush_lock);
571
572 blk_start_plug(&plug);
573 f2fs_sync_dirty_inodes(sbi, FILE_INODE);
574 blk_finish_plug(&plug);
575
576 mutex_unlock(&sbi->flush_lock);
577 }
578 f2fs_sync_fs(sbi->sb, true);
579 stat_inc_bg_cp_count(sbi->stat_info);
580}
581
582static int __submit_flush_wait(struct f2fs_sb_info *sbi,
583 struct block_device *bdev)
584{
585 int ret = blkdev_issue_flush(bdev);
586
587 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
588 test_opt(sbi, FLUSH_MERGE), ret);
589 return ret;
590}
591
592static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
593{
594 int ret = 0;
595 int i;
596
597 if (!f2fs_is_multi_device(sbi))
598 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
599
600 for (i = 0; i < sbi->s_ndevs; i++) {
601 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
602 continue;
603 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
604 if (ret)
605 break;
606 }
607 return ret;
608}
609
610static int issue_flush_thread(void *data)
611{
612 struct f2fs_sb_info *sbi = data;
613 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
614 wait_queue_head_t *q = &fcc->flush_wait_queue;
615repeat:
616 if (kthread_should_stop())
617 return 0;
618
619 if (!llist_empty(&fcc->issue_list)) {
620 struct flush_cmd *cmd, *next;
621 int ret;
622
623 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
624 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
625
626 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
627
628 ret = submit_flush_wait(sbi, cmd->ino);
629 atomic_inc(&fcc->issued_flush);
630
631 llist_for_each_entry_safe(cmd, next,
632 fcc->dispatch_list, llnode) {
633 cmd->ret = ret;
634 complete(&cmd->wait);
635 }
636 fcc->dispatch_list = NULL;
637 }
638
639 wait_event_interruptible(*q,
640 kthread_should_stop() || !llist_empty(&fcc->issue_list));
641 goto repeat;
642}
643
644int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
645{
646 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
647 struct flush_cmd cmd;
648 int ret;
649
650 if (test_opt(sbi, NOBARRIER))
651 return 0;
652
653 if (!test_opt(sbi, FLUSH_MERGE)) {
654 atomic_inc(&fcc->queued_flush);
655 ret = submit_flush_wait(sbi, ino);
656 atomic_dec(&fcc->queued_flush);
657 atomic_inc(&fcc->issued_flush);
658 return ret;
659 }
660
661 if (atomic_inc_return(&fcc->queued_flush) == 1 ||
662 f2fs_is_multi_device(sbi)) {
663 ret = submit_flush_wait(sbi, ino);
664 atomic_dec(&fcc->queued_flush);
665
666 atomic_inc(&fcc->issued_flush);
667 return ret;
668 }
669
670 cmd.ino = ino;
671 init_completion(&cmd.wait);
672
673 llist_add(&cmd.llnode, &fcc->issue_list);
674
675
676
677
678
679
680 smp_mb();
681
682 if (waitqueue_active(&fcc->flush_wait_queue))
683 wake_up(&fcc->flush_wait_queue);
684
685 if (fcc->f2fs_issue_flush) {
686 wait_for_completion(&cmd.wait);
687 atomic_dec(&fcc->queued_flush);
688 } else {
689 struct llist_node *list;
690
691 list = llist_del_all(&fcc->issue_list);
692 if (!list) {
693 wait_for_completion(&cmd.wait);
694 atomic_dec(&fcc->queued_flush);
695 } else {
696 struct flush_cmd *tmp, *next;
697
698 ret = submit_flush_wait(sbi, ino);
699
700 llist_for_each_entry_safe(tmp, next, list, llnode) {
701 if (tmp == &cmd) {
702 cmd.ret = ret;
703 atomic_dec(&fcc->queued_flush);
704 continue;
705 }
706 tmp->ret = ret;
707 complete(&tmp->wait);
708 }
709 }
710 }
711
712 return cmd.ret;
713}
714
715int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
716{
717 dev_t dev = sbi->sb->s_bdev->bd_dev;
718 struct flush_cmd_control *fcc;
719 int err = 0;
720
721 if (SM_I(sbi)->fcc_info) {
722 fcc = SM_I(sbi)->fcc_info;
723 if (fcc->f2fs_issue_flush)
724 return err;
725 goto init_thread;
726 }
727
728 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
729 if (!fcc)
730 return -ENOMEM;
731 atomic_set(&fcc->issued_flush, 0);
732 atomic_set(&fcc->queued_flush, 0);
733 init_waitqueue_head(&fcc->flush_wait_queue);
734 init_llist_head(&fcc->issue_list);
735 SM_I(sbi)->fcc_info = fcc;
736 if (!test_opt(sbi, FLUSH_MERGE))
737 return err;
738
739init_thread:
740 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
741 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
742 if (IS_ERR(fcc->f2fs_issue_flush)) {
743 err = PTR_ERR(fcc->f2fs_issue_flush);
744 kfree(fcc);
745 SM_I(sbi)->fcc_info = NULL;
746 return err;
747 }
748
749 return err;
750}
751
752void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
753{
754 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
755
756 if (fcc && fcc->f2fs_issue_flush) {
757 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
758
759 fcc->f2fs_issue_flush = NULL;
760 kthread_stop(flush_thread);
761 }
762 if (free) {
763 kfree(fcc);
764 SM_I(sbi)->fcc_info = NULL;
765 }
766}
767
768int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
769{
770 int ret = 0, i;
771
772 if (!f2fs_is_multi_device(sbi))
773 return 0;
774
775 if (test_opt(sbi, NOBARRIER))
776 return 0;
777
778 for (i = 1; i < sbi->s_ndevs; i++) {
779 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
780 continue;
781 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
782 if (ret)
783 break;
784
785 spin_lock(&sbi->dev_lock);
786 f2fs_clear_bit(i, (char *)&sbi->dirty_device);
787 spin_unlock(&sbi->dev_lock);
788 }
789
790 return ret;
791}
792
793static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
794 enum dirty_type dirty_type)
795{
796 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
797
798
799 if (IS_CURSEG(sbi, segno))
800 return;
801
802 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
803 dirty_i->nr_dirty[dirty_type]++;
804
805 if (dirty_type == DIRTY) {
806 struct seg_entry *sentry = get_seg_entry(sbi, segno);
807 enum dirty_type t = sentry->type;
808
809 if (unlikely(t >= DIRTY)) {
810 f2fs_bug_on(sbi, 1);
811 return;
812 }
813 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
814 dirty_i->nr_dirty[t]++;
815
816 if (__is_large_section(sbi)) {
817 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
818 block_t valid_blocks =
819 get_valid_blocks(sbi, segno, true);
820
821 f2fs_bug_on(sbi, unlikely(!valid_blocks ||
822 valid_blocks == BLKS_PER_SEC(sbi)));
823
824 if (!IS_CURSEC(sbi, secno))
825 set_bit(secno, dirty_i->dirty_secmap);
826 }
827 }
828}
829
830static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
831 enum dirty_type dirty_type)
832{
833 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
834 block_t valid_blocks;
835
836 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
837 dirty_i->nr_dirty[dirty_type]--;
838
839 if (dirty_type == DIRTY) {
840 struct seg_entry *sentry = get_seg_entry(sbi, segno);
841 enum dirty_type t = sentry->type;
842
843 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
844 dirty_i->nr_dirty[t]--;
845
846 valid_blocks = get_valid_blocks(sbi, segno, true);
847 if (valid_blocks == 0) {
848 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
849 dirty_i->victim_secmap);
850#ifdef CONFIG_F2FS_CHECK_FS
851 clear_bit(segno, SIT_I(sbi)->invalid_segmap);
852#endif
853 }
854 if (__is_large_section(sbi)) {
855 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
856
857 if (!valid_blocks ||
858 valid_blocks == BLKS_PER_SEC(sbi)) {
859 clear_bit(secno, dirty_i->dirty_secmap);
860 return;
861 }
862
863 if (!IS_CURSEC(sbi, secno))
864 set_bit(secno, dirty_i->dirty_secmap);
865 }
866 }
867}
868
869
870
871
872
873
874static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
875{
876 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
877 unsigned short valid_blocks, ckpt_valid_blocks;
878 unsigned int usable_blocks;
879
880 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
881 return;
882
883 usable_blocks = f2fs_usable_blks_in_seg(sbi, segno);
884 mutex_lock(&dirty_i->seglist_lock);
885
886 valid_blocks = get_valid_blocks(sbi, segno, false);
887 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
888
889 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
890 ckpt_valid_blocks == usable_blocks)) {
891 __locate_dirty_segment(sbi, segno, PRE);
892 __remove_dirty_segment(sbi, segno, DIRTY);
893 } else if (valid_blocks < usable_blocks) {
894 __locate_dirty_segment(sbi, segno, DIRTY);
895 } else {
896
897 __remove_dirty_segment(sbi, segno, DIRTY);
898 }
899
900 mutex_unlock(&dirty_i->seglist_lock);
901}
902
903
904void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
905{
906 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
907 unsigned int segno;
908
909 mutex_lock(&dirty_i->seglist_lock);
910 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
911 if (get_valid_blocks(sbi, segno, false))
912 continue;
913 if (IS_CURSEG(sbi, segno))
914 continue;
915 __locate_dirty_segment(sbi, segno, PRE);
916 __remove_dirty_segment(sbi, segno, DIRTY);
917 }
918 mutex_unlock(&dirty_i->seglist_lock);
919}
920
921block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
922{
923 int ovp_hole_segs =
924 (overprovision_segments(sbi) - reserved_segments(sbi));
925 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
926 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
927 block_t holes[2] = {0, 0};
928 block_t unusable;
929 struct seg_entry *se;
930 unsigned int segno;
931
932 mutex_lock(&dirty_i->seglist_lock);
933 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
934 se = get_seg_entry(sbi, segno);
935 if (IS_NODESEG(se->type))
936 holes[NODE] += f2fs_usable_blks_in_seg(sbi, segno) -
937 se->valid_blocks;
938 else
939 holes[DATA] += f2fs_usable_blks_in_seg(sbi, segno) -
940 se->valid_blocks;
941 }
942 mutex_unlock(&dirty_i->seglist_lock);
943
944 unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
945 if (unusable > ovp_holes)
946 return unusable - ovp_holes;
947 return 0;
948}
949
950int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
951{
952 int ovp_hole_segs =
953 (overprovision_segments(sbi) - reserved_segments(sbi));
954 if (unusable > F2FS_OPTION(sbi).unusable_cap)
955 return -EAGAIN;
956 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
957 dirty_segments(sbi) > ovp_hole_segs)
958 return -EAGAIN;
959 return 0;
960}
961
962
963static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
964{
965 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
966 unsigned int segno = 0;
967
968 mutex_lock(&dirty_i->seglist_lock);
969 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
970 if (get_valid_blocks(sbi, segno, false))
971 continue;
972 if (get_ckpt_valid_blocks(sbi, segno, false))
973 continue;
974 mutex_unlock(&dirty_i->seglist_lock);
975 return segno;
976 }
977 mutex_unlock(&dirty_i->seglist_lock);
978 return NULL_SEGNO;
979}
980
981static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
982 struct block_device *bdev, block_t lstart,
983 block_t start, block_t len)
984{
985 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
986 struct list_head *pend_list;
987 struct discard_cmd *dc;
988
989 f2fs_bug_on(sbi, !len);
990
991 pend_list = &dcc->pend_list[plist_idx(len)];
992
993 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
994 INIT_LIST_HEAD(&dc->list);
995 dc->bdev = bdev;
996 dc->lstart = lstart;
997 dc->start = start;
998 dc->len = len;
999 dc->ref = 0;
1000 dc->state = D_PREP;
1001 dc->queued = 0;
1002 dc->error = 0;
1003 init_completion(&dc->wait);
1004 list_add_tail(&dc->list, pend_list);
1005 spin_lock_init(&dc->lock);
1006 dc->bio_ref = 0;
1007 atomic_inc(&dcc->discard_cmd_cnt);
1008 dcc->undiscard_blks += len;
1009
1010 return dc;
1011}
1012
1013static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
1014 struct block_device *bdev, block_t lstart,
1015 block_t start, block_t len,
1016 struct rb_node *parent, struct rb_node **p,
1017 bool leftmost)
1018{
1019 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1020 struct discard_cmd *dc;
1021
1022 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
1023
1024 rb_link_node(&dc->rb_node, parent, p);
1025 rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
1026
1027 return dc;
1028}
1029
1030static void __detach_discard_cmd(struct discard_cmd_control *dcc,
1031 struct discard_cmd *dc)
1032{
1033 if (dc->state == D_DONE)
1034 atomic_sub(dc->queued, &dcc->queued_discard);
1035
1036 list_del(&dc->list);
1037 rb_erase_cached(&dc->rb_node, &dcc->root);
1038 dcc->undiscard_blks -= dc->len;
1039
1040 kmem_cache_free(discard_cmd_slab, dc);
1041
1042 atomic_dec(&dcc->discard_cmd_cnt);
1043}
1044
1045static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1046 struct discard_cmd *dc)
1047{
1048 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1049 unsigned long flags;
1050
1051 trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
1052
1053 spin_lock_irqsave(&dc->lock, flags);
1054 if (dc->bio_ref) {
1055 spin_unlock_irqrestore(&dc->lock, flags);
1056 return;
1057 }
1058 spin_unlock_irqrestore(&dc->lock, flags);
1059
1060 f2fs_bug_on(sbi, dc->ref);
1061
1062 if (dc->error == -EOPNOTSUPP)
1063 dc->error = 0;
1064
1065 if (dc->error)
1066 printk_ratelimited(
1067 "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1068 KERN_INFO, sbi->sb->s_id,
1069 dc->lstart, dc->start, dc->len, dc->error);
1070 __detach_discard_cmd(dcc, dc);
1071}
1072
1073static void f2fs_submit_discard_endio(struct bio *bio)
1074{
1075 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
1076 unsigned long flags;
1077
1078 spin_lock_irqsave(&dc->lock, flags);
1079 if (!dc->error)
1080 dc->error = blk_status_to_errno(bio->bi_status);
1081 dc->bio_ref--;
1082 if (!dc->bio_ref && dc->state == D_SUBMIT) {
1083 dc->state = D_DONE;
1084 complete_all(&dc->wait);
1085 }
1086 spin_unlock_irqrestore(&dc->lock, flags);
1087 bio_put(bio);
1088}
1089
1090static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
1091 block_t start, block_t end)
1092{
1093#ifdef CONFIG_F2FS_CHECK_FS
1094 struct seg_entry *sentry;
1095 unsigned int segno;
1096 block_t blk = start;
1097 unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1098 unsigned long *map;
1099
1100 while (blk < end) {
1101 segno = GET_SEGNO(sbi, blk);
1102 sentry = get_seg_entry(sbi, segno);
1103 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1104
1105 if (end < START_BLOCK(sbi, segno + 1))
1106 size = GET_BLKOFF_FROM_SEG0(sbi, end);
1107 else
1108 size = max_blocks;
1109 map = (unsigned long *)(sentry->cur_valid_map);
1110 offset = __find_rev_next_bit(map, size, offset);
1111 f2fs_bug_on(sbi, offset != size);
1112 blk = START_BLOCK(sbi, segno + 1);
1113 }
1114#endif
1115}
1116
1117static void __init_discard_policy(struct f2fs_sb_info *sbi,
1118 struct discard_policy *dpolicy,
1119 int discard_type, unsigned int granularity)
1120{
1121 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1122
1123
1124 dpolicy->type = discard_type;
1125 dpolicy->sync = true;
1126 dpolicy->ordered = false;
1127 dpolicy->granularity = granularity;
1128
1129 dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1130 dpolicy->io_aware_gran = MAX_PLIST_NUM;
1131 dpolicy->timeout = false;
1132
1133 if (discard_type == DPOLICY_BG) {
1134 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1135 dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1136 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1137 dpolicy->io_aware = true;
1138 dpolicy->sync = false;
1139 dpolicy->ordered = true;
1140 if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
1141 dpolicy->granularity = 1;
1142 if (atomic_read(&dcc->discard_cmd_cnt))
1143 dpolicy->max_interval =
1144 DEF_MIN_DISCARD_ISSUE_TIME;
1145 }
1146 } else if (discard_type == DPOLICY_FORCE) {
1147 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1148 dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
1149 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1150 dpolicy->io_aware = false;
1151 } else if (discard_type == DPOLICY_FSTRIM) {
1152 dpolicy->io_aware = false;
1153 } else if (discard_type == DPOLICY_UMOUNT) {
1154 dpolicy->io_aware = false;
1155
1156 dpolicy->granularity = 1;
1157 dpolicy->timeout = true;
1158 }
1159}
1160
1161static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1162 struct block_device *bdev, block_t lstart,
1163 block_t start, block_t len);
1164
1165static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
1166 struct discard_policy *dpolicy,
1167 struct discard_cmd *dc,
1168 unsigned int *issued)
1169{
1170 struct block_device *bdev = dc->bdev;
1171 struct request_queue *q = bdev_get_queue(bdev);
1172 unsigned int max_discard_blocks =
1173 SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1174 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1175 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1176 &(dcc->fstrim_list) : &(dcc->wait_list);
1177 int flag = dpolicy->sync ? REQ_SYNC : 0;
1178 block_t lstart, start, len, total_len;
1179 int err = 0;
1180
1181 if (dc->state != D_PREP)
1182 return 0;
1183
1184 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1185 return 0;
1186
1187 trace_f2fs_issue_discard(bdev, dc->start, dc->len);
1188
1189 lstart = dc->lstart;
1190 start = dc->start;
1191 len = dc->len;
1192 total_len = len;
1193
1194 dc->len = 0;
1195
1196 while (total_len && *issued < dpolicy->max_requests && !err) {
1197 struct bio *bio = NULL;
1198 unsigned long flags;
1199 bool last = true;
1200
1201 if (len > max_discard_blocks) {
1202 len = max_discard_blocks;
1203 last = false;
1204 }
1205
1206 (*issued)++;
1207 if (*issued == dpolicy->max_requests)
1208 last = true;
1209
1210 dc->len += len;
1211
1212 if (time_to_inject(sbi, FAULT_DISCARD)) {
1213 f2fs_show_injection_info(sbi, FAULT_DISCARD);
1214 err = -EIO;
1215 goto submit;
1216 }
1217 err = __blkdev_issue_discard(bdev,
1218 SECTOR_FROM_BLOCK(start),
1219 SECTOR_FROM_BLOCK(len),
1220 GFP_NOFS, 0, &bio);
1221submit:
1222 if (err) {
1223 spin_lock_irqsave(&dc->lock, flags);
1224 if (dc->state == D_PARTIAL)
1225 dc->state = D_SUBMIT;
1226 spin_unlock_irqrestore(&dc->lock, flags);
1227
1228 break;
1229 }
1230
1231 f2fs_bug_on(sbi, !bio);
1232
1233
1234
1235
1236
1237 spin_lock_irqsave(&dc->lock, flags);
1238 if (last)
1239 dc->state = D_SUBMIT;
1240 else
1241 dc->state = D_PARTIAL;
1242 dc->bio_ref++;
1243 spin_unlock_irqrestore(&dc->lock, flags);
1244
1245 atomic_inc(&dcc->queued_discard);
1246 dc->queued++;
1247 list_move_tail(&dc->list, wait_list);
1248
1249
1250 __check_sit_bitmap(sbi, lstart, lstart + len);
1251
1252 bio->bi_private = dc;
1253 bio->bi_end_io = f2fs_submit_discard_endio;
1254 bio->bi_opf |= flag;
1255 submit_bio(bio);
1256
1257 atomic_inc(&dcc->issued_discard);
1258
1259 f2fs_update_iostat(sbi, FS_DISCARD, 1);
1260
1261 lstart += len;
1262 start += len;
1263 total_len -= len;
1264 len = total_len;
1265 }
1266
1267 if (!err && len) {
1268 dcc->undiscard_blks -= len;
1269 __update_discard_tree_range(sbi, bdev, lstart, start, len);
1270 }
1271 return err;
1272}
1273
1274static void __insert_discard_tree(struct f2fs_sb_info *sbi,
1275 struct block_device *bdev, block_t lstart,
1276 block_t start, block_t len,
1277 struct rb_node **insert_p,
1278 struct rb_node *insert_parent)
1279{
1280 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1281 struct rb_node **p;
1282 struct rb_node *parent = NULL;
1283 bool leftmost = true;
1284
1285 if (insert_p && insert_parent) {
1286 parent = insert_parent;
1287 p = insert_p;
1288 goto do_insert;
1289 }
1290
1291 p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1292 lstart, &leftmost);
1293do_insert:
1294 __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
1295 p, leftmost);
1296}
1297
1298static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1299 struct discard_cmd *dc)
1300{
1301 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
1302}
1303
1304static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1305 struct discard_cmd *dc, block_t blkaddr)
1306{
1307 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1308 struct discard_info di = dc->di;
1309 bool modified = false;
1310
1311 if (dc->state == D_DONE || dc->len == 1) {
1312 __remove_discard_cmd(sbi, dc);
1313 return;
1314 }
1315
1316 dcc->undiscard_blks -= di.len;
1317
1318 if (blkaddr > di.lstart) {
1319 dc->len = blkaddr - dc->lstart;
1320 dcc->undiscard_blks += dc->len;
1321 __relocate_discard_cmd(dcc, dc);
1322 modified = true;
1323 }
1324
1325 if (blkaddr < di.lstart + di.len - 1) {
1326 if (modified) {
1327 __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1328 di.start + blkaddr + 1 - di.lstart,
1329 di.lstart + di.len - 1 - blkaddr,
1330 NULL, NULL);
1331 } else {
1332 dc->lstart++;
1333 dc->len--;
1334 dc->start++;
1335 dcc->undiscard_blks += dc->len;
1336 __relocate_discard_cmd(dcc, dc);
1337 }
1338 }
1339}
1340
1341static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1342 struct block_device *bdev, block_t lstart,
1343 block_t start, block_t len)
1344{
1345 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1346 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1347 struct discard_cmd *dc;
1348 struct discard_info di = {0};
1349 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1350 struct request_queue *q = bdev_get_queue(bdev);
1351 unsigned int max_discard_blocks =
1352 SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
1353 block_t end = lstart + len;
1354
1355 dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1356 NULL, lstart,
1357 (struct rb_entry **)&prev_dc,
1358 (struct rb_entry **)&next_dc,
1359 &insert_p, &insert_parent, true, NULL);
1360 if (dc)
1361 prev_dc = dc;
1362
1363 if (!prev_dc) {
1364 di.lstart = lstart;
1365 di.len = next_dc ? next_dc->lstart - lstart : len;
1366 di.len = min(di.len, len);
1367 di.start = start;
1368 }
1369
1370 while (1) {
1371 struct rb_node *node;
1372 bool merged = false;
1373 struct discard_cmd *tdc = NULL;
1374
1375 if (prev_dc) {
1376 di.lstart = prev_dc->lstart + prev_dc->len;
1377 if (di.lstart < lstart)
1378 di.lstart = lstart;
1379 if (di.lstart >= end)
1380 break;
1381
1382 if (!next_dc || next_dc->lstart > end)
1383 di.len = end - di.lstart;
1384 else
1385 di.len = next_dc->lstart - di.lstart;
1386 di.start = start + di.lstart - lstart;
1387 }
1388
1389 if (!di.len)
1390 goto next;
1391
1392 if (prev_dc && prev_dc->state == D_PREP &&
1393 prev_dc->bdev == bdev &&
1394 __is_discard_back_mergeable(&di, &prev_dc->di,
1395 max_discard_blocks)) {
1396 prev_dc->di.len += di.len;
1397 dcc->undiscard_blks += di.len;
1398 __relocate_discard_cmd(dcc, prev_dc);
1399 di = prev_dc->di;
1400 tdc = prev_dc;
1401 merged = true;
1402 }
1403
1404 if (next_dc && next_dc->state == D_PREP &&
1405 next_dc->bdev == bdev &&
1406 __is_discard_front_mergeable(&di, &next_dc->di,
1407 max_discard_blocks)) {
1408 next_dc->di.lstart = di.lstart;
1409 next_dc->di.len += di.len;
1410 next_dc->di.start = di.start;
1411 dcc->undiscard_blks += di.len;
1412 __relocate_discard_cmd(dcc, next_dc);
1413 if (tdc)
1414 __remove_discard_cmd(sbi, tdc);
1415 merged = true;
1416 }
1417
1418 if (!merged) {
1419 __insert_discard_tree(sbi, bdev, di.lstart, di.start,
1420 di.len, NULL, NULL);
1421 }
1422 next:
1423 prev_dc = next_dc;
1424 if (!prev_dc)
1425 break;
1426
1427 node = rb_next(&prev_dc->rb_node);
1428 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1429 }
1430}
1431
1432static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1433 struct block_device *bdev, block_t blkstart, block_t blklen)
1434{
1435 block_t lblkstart = blkstart;
1436
1437 if (!f2fs_bdev_support_discard(bdev))
1438 return 0;
1439
1440 trace_f2fs_queue_discard(bdev, blkstart, blklen);
1441
1442 if (f2fs_is_multi_device(sbi)) {
1443 int devi = f2fs_target_device_index(sbi, blkstart);
1444
1445 blkstart -= FDEV(devi).start_blk;
1446 }
1447 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
1448 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
1449 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
1450 return 0;
1451}
1452
1453static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1454 struct discard_policy *dpolicy)
1455{
1456 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1457 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1458 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1459 struct discard_cmd *dc;
1460 struct blk_plug plug;
1461 unsigned int pos = dcc->next_pos;
1462 unsigned int issued = 0;
1463 bool io_interrupted = false;
1464
1465 mutex_lock(&dcc->cmd_lock);
1466 dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1467 NULL, pos,
1468 (struct rb_entry **)&prev_dc,
1469 (struct rb_entry **)&next_dc,
1470 &insert_p, &insert_parent, true, NULL);
1471 if (!dc)
1472 dc = next_dc;
1473
1474 blk_start_plug(&plug);
1475
1476 while (dc) {
1477 struct rb_node *node;
1478 int err = 0;
1479
1480 if (dc->state != D_PREP)
1481 goto next;
1482
1483 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
1484 io_interrupted = true;
1485 break;
1486 }
1487
1488 dcc->next_pos = dc->lstart + dc->len;
1489 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1490
1491 if (issued >= dpolicy->max_requests)
1492 break;
1493next:
1494 node = rb_next(&dc->rb_node);
1495 if (err)
1496 __remove_discard_cmd(sbi, dc);
1497 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1498 }
1499
1500 blk_finish_plug(&plug);
1501
1502 if (!dc)
1503 dcc->next_pos = 0;
1504
1505 mutex_unlock(&dcc->cmd_lock);
1506
1507 if (!issued && io_interrupted)
1508 issued = -1;
1509
1510 return issued;
1511}
1512static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1513 struct discard_policy *dpolicy);
1514
1515static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1516 struct discard_policy *dpolicy)
1517{
1518 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1519 struct list_head *pend_list;
1520 struct discard_cmd *dc, *tmp;
1521 struct blk_plug plug;
1522 int i, issued;
1523 bool io_interrupted = false;
1524
1525 if (dpolicy->timeout)
1526 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
1527
1528retry:
1529 issued = 0;
1530 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1531 if (dpolicy->timeout &&
1532 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1533 break;
1534
1535 if (i + 1 < dpolicy->granularity)
1536 break;
1537
1538 if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
1539 return __issue_discard_cmd_orderly(sbi, dpolicy);
1540
1541 pend_list = &dcc->pend_list[i];
1542
1543 mutex_lock(&dcc->cmd_lock);
1544 if (list_empty(pend_list))
1545 goto next;
1546 if (unlikely(dcc->rbtree_check))
1547 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
1548 &dcc->root, false));
1549 blk_start_plug(&plug);
1550 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1551 f2fs_bug_on(sbi, dc->state != D_PREP);
1552
1553 if (dpolicy->timeout &&
1554 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
1555 break;
1556
1557 if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1558 !is_idle(sbi, DISCARD_TIME)) {
1559 io_interrupted = true;
1560 break;
1561 }
1562
1563 __submit_discard_cmd(sbi, dpolicy, dc, &issued);
1564
1565 if (issued >= dpolicy->max_requests)
1566 break;
1567 }
1568 blk_finish_plug(&plug);
1569next:
1570 mutex_unlock(&dcc->cmd_lock);
1571
1572 if (issued >= dpolicy->max_requests || io_interrupted)
1573 break;
1574 }
1575
1576 if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1577 __wait_all_discard_cmd(sbi, dpolicy);
1578 goto retry;
1579 }
1580
1581 if (!issued && io_interrupted)
1582 issued = -1;
1583
1584 return issued;
1585}
1586
1587static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
1588{
1589 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1590 struct list_head *pend_list;
1591 struct discard_cmd *dc, *tmp;
1592 int i;
1593 bool dropped = false;
1594
1595 mutex_lock(&dcc->cmd_lock);
1596 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1597 pend_list = &dcc->pend_list[i];
1598 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1599 f2fs_bug_on(sbi, dc->state != D_PREP);
1600 __remove_discard_cmd(sbi, dc);
1601 dropped = true;
1602 }
1603 }
1604 mutex_unlock(&dcc->cmd_lock);
1605
1606 return dropped;
1607}
1608
1609void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
1610{
1611 __drop_discard_cmd(sbi);
1612}
1613
1614static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
1615 struct discard_cmd *dc)
1616{
1617 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1618 unsigned int len = 0;
1619
1620 wait_for_completion_io(&dc->wait);
1621 mutex_lock(&dcc->cmd_lock);
1622 f2fs_bug_on(sbi, dc->state != D_DONE);
1623 dc->ref--;
1624 if (!dc->ref) {
1625 if (!dc->error)
1626 len = dc->len;
1627 __remove_discard_cmd(sbi, dc);
1628 }
1629 mutex_unlock(&dcc->cmd_lock);
1630
1631 return len;
1632}
1633
1634static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
1635 struct discard_policy *dpolicy,
1636 block_t start, block_t end)
1637{
1638 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1639 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1640 &(dcc->fstrim_list) : &(dcc->wait_list);
1641 struct discard_cmd *dc, *tmp;
1642 bool need_wait;
1643 unsigned int trimmed = 0;
1644
1645next:
1646 need_wait = false;
1647
1648 mutex_lock(&dcc->cmd_lock);
1649 list_for_each_entry_safe(dc, tmp, wait_list, list) {
1650 if (dc->lstart + dc->len <= start || end <= dc->lstart)
1651 continue;
1652 if (dc->len < dpolicy->granularity)
1653 continue;
1654 if (dc->state == D_DONE && !dc->ref) {
1655 wait_for_completion_io(&dc->wait);
1656 if (!dc->error)
1657 trimmed += dc->len;
1658 __remove_discard_cmd(sbi, dc);
1659 } else {
1660 dc->ref++;
1661 need_wait = true;
1662 break;
1663 }
1664 }
1665 mutex_unlock(&dcc->cmd_lock);
1666
1667 if (need_wait) {
1668 trimmed += __wait_one_discard_bio(sbi, dc);
1669 goto next;
1670 }
1671
1672 return trimmed;
1673}
1674
1675static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1676 struct discard_policy *dpolicy)
1677{
1678 struct discard_policy dp;
1679 unsigned int discard_blks;
1680
1681 if (dpolicy)
1682 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
1683
1684
1685 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
1686 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1687 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
1688 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1689
1690 return discard_blks;
1691}
1692
1693
1694static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
1695{
1696 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1697 struct discard_cmd *dc;
1698 bool need_wait = false;
1699
1700 mutex_lock(&dcc->cmd_lock);
1701 dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
1702 NULL, blkaddr);
1703 if (dc) {
1704 if (dc->state == D_PREP) {
1705 __punch_discard_cmd(sbi, dc, blkaddr);
1706 } else {
1707 dc->ref++;
1708 need_wait = true;
1709 }
1710 }
1711 mutex_unlock(&dcc->cmd_lock);
1712
1713 if (need_wait)
1714 __wait_one_discard_bio(sbi, dc);
1715}
1716
1717void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
1718{
1719 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1720
1721 if (dcc && dcc->f2fs_issue_discard) {
1722 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1723
1724 dcc->f2fs_issue_discard = NULL;
1725 kthread_stop(discard_thread);
1726 }
1727}
1728
1729
1730bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
1731{
1732 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1733 struct discard_policy dpolicy;
1734 bool dropped;
1735
1736 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1737 dcc->discard_granularity);
1738 __issue_discard_cmd(sbi, &dpolicy);
1739 dropped = __drop_discard_cmd(sbi);
1740
1741
1742 __wait_all_discard_cmd(sbi, NULL);
1743
1744 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
1745 return dropped;
1746}
1747
1748static int issue_discard_thread(void *data)
1749{
1750 struct f2fs_sb_info *sbi = data;
1751 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1752 wait_queue_head_t *q = &dcc->discard_wait_queue;
1753 struct discard_policy dpolicy;
1754 unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1755 int issued;
1756
1757 set_freezable();
1758
1759 do {
1760 if (sbi->gc_mode == GC_URGENT_HIGH ||
1761 !f2fs_available_free_memory(sbi, DISCARD_CACHE))
1762 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
1763 else
1764 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
1765 dcc->discard_granularity);
1766
1767 if (!atomic_read(&dcc->discard_cmd_cnt))
1768 wait_ms = dpolicy.max_interval;
1769
1770 wait_event_interruptible_timeout(*q,
1771 kthread_should_stop() || freezing(current) ||
1772 dcc->discard_wake,
1773 msecs_to_jiffies(wait_ms));
1774
1775 if (dcc->discard_wake)
1776 dcc->discard_wake = 0;
1777
1778
1779 if (atomic_read(&dcc->queued_discard))
1780 __wait_all_discard_cmd(sbi, NULL);
1781
1782 if (try_to_freeze())
1783 continue;
1784 if (f2fs_readonly(sbi->sb))
1785 continue;
1786 if (kthread_should_stop())
1787 return 0;
1788 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1789 wait_ms = dpolicy.max_interval;
1790 continue;
1791 }
1792 if (!atomic_read(&dcc->discard_cmd_cnt))
1793 continue;
1794
1795 sb_start_intwrite(sbi->sb);
1796
1797 issued = __issue_discard_cmd(sbi, &dpolicy);
1798 if (issued > 0) {
1799 __wait_all_discard_cmd(sbi, &dpolicy);
1800 wait_ms = dpolicy.min_interval;
1801 } else if (issued == -1) {
1802 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1803 if (!wait_ms)
1804 wait_ms = dpolicy.mid_interval;
1805 } else {
1806 wait_ms = dpolicy.max_interval;
1807 }
1808
1809 sb_end_intwrite(sbi->sb);
1810
1811 } while (!kthread_should_stop());
1812 return 0;
1813}
1814
1815#ifdef CONFIG_BLK_DEV_ZONED
1816static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1817 struct block_device *bdev, block_t blkstart, block_t blklen)
1818{
1819 sector_t sector, nr_sects;
1820 block_t lblkstart = blkstart;
1821 int devi = 0;
1822
1823 if (f2fs_is_multi_device(sbi)) {
1824 devi = f2fs_target_device_index(sbi, blkstart);
1825 if (blkstart < FDEV(devi).start_blk ||
1826 blkstart > FDEV(devi).end_blk) {
1827 f2fs_err(sbi, "Invalid block %x", blkstart);
1828 return -EIO;
1829 }
1830 blkstart -= FDEV(devi).start_blk;
1831 }
1832
1833
1834 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
1835 sector = SECTOR_FROM_BLOCK(blkstart);
1836 nr_sects = SECTOR_FROM_BLOCK(blklen);
1837
1838 if (sector & (bdev_zone_sectors(bdev) - 1) ||
1839 nr_sects != bdev_zone_sectors(bdev)) {
1840 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1841 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1842 blkstart, blklen);
1843 return -EIO;
1844 }
1845 trace_f2fs_issue_reset_zone(bdev, blkstart);
1846 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1847 sector, nr_sects, GFP_NOFS);
1848 }
1849
1850
1851 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
1852}
1853#endif
1854
1855static int __issue_discard_async(struct f2fs_sb_info *sbi,
1856 struct block_device *bdev, block_t blkstart, block_t blklen)
1857{
1858#ifdef CONFIG_BLK_DEV_ZONED
1859 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
1860 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1861#endif
1862 return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
1863}
1864
1865static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
1866 block_t blkstart, block_t blklen)
1867{
1868 sector_t start = blkstart, len = 0;
1869 struct block_device *bdev;
1870 struct seg_entry *se;
1871 unsigned int offset;
1872 block_t i;
1873 int err = 0;
1874
1875 bdev = f2fs_target_device(sbi, blkstart, NULL);
1876
1877 for (i = blkstart; i < blkstart + blklen; i++, len++) {
1878 if (i != start) {
1879 struct block_device *bdev2 =
1880 f2fs_target_device(sbi, i, NULL);
1881
1882 if (bdev2 != bdev) {
1883 err = __issue_discard_async(sbi, bdev,
1884 start, len);
1885 if (err)
1886 return err;
1887 bdev = bdev2;
1888 start = i;
1889 len = 0;
1890 }
1891 }
1892
1893 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1894 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1895
1896 if (!f2fs_test_and_set_bit(offset, se->discard_map))
1897 sbi->discard_blks--;
1898 }
1899
1900 if (len)
1901 err = __issue_discard_async(sbi, bdev, start, len);
1902 return err;
1903}
1904
1905static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1906 bool check_only)
1907{
1908 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1909 int max_blocks = sbi->blocks_per_seg;
1910 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
1911 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1912 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1913 unsigned long *discard_map = (unsigned long *)se->discard_map;
1914 unsigned long *dmap = SIT_I(sbi)->tmp_map;
1915 unsigned int start = 0, end = -1;
1916 bool force = (cpc->reason & CP_DISCARD);
1917 struct discard_entry *de = NULL;
1918 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
1919 int i;
1920
1921 if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
1922 return false;
1923
1924 if (!force) {
1925 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
1926 SM_I(sbi)->dcc_info->nr_discards >=
1927 SM_I(sbi)->dcc_info->max_discards)
1928 return false;
1929 }
1930
1931
1932 for (i = 0; i < entries; i++)
1933 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
1934 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
1935
1936 while (force || SM_I(sbi)->dcc_info->nr_discards <=
1937 SM_I(sbi)->dcc_info->max_discards) {
1938 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1939 if (start >= max_blocks)
1940 break;
1941
1942 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
1943 if (force && start && end != max_blocks
1944 && (end - start) < cpc->trim_minlen)
1945 continue;
1946
1947 if (check_only)
1948 return true;
1949
1950 if (!de) {
1951 de = f2fs_kmem_cache_alloc(discard_entry_slab,
1952 GFP_F2FS_ZERO);
1953 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1954 list_add_tail(&de->list, head);
1955 }
1956
1957 for (i = start; i < end; i++)
1958 __set_bit_le(i, (void *)de->discard_map);
1959
1960 SM_I(sbi)->dcc_info->nr_discards += end - start;
1961 }
1962 return false;
1963}
1964
1965static void release_discard_addr(struct discard_entry *entry)
1966{
1967 list_del(&entry->list);
1968 kmem_cache_free(discard_entry_slab, entry);
1969}
1970
1971void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
1972{
1973 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
1974 struct discard_entry *entry, *this;
1975
1976
1977 list_for_each_entry_safe(entry, this, head, list)
1978 release_discard_addr(entry);
1979}
1980
1981
1982
1983
1984static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1985{
1986 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1987 unsigned int segno;
1988
1989 mutex_lock(&dirty_i->seglist_lock);
1990 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
1991 __set_test_and_free(sbi, segno, false);
1992 mutex_unlock(&dirty_i->seglist_lock);
1993}
1994
1995void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
1996 struct cp_control *cpc)
1997{
1998 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1999 struct list_head *head = &dcc->entry_list;
2000 struct discard_entry *entry, *this;
2001 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2002 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
2003 unsigned int start = 0, end = -1;
2004 unsigned int secno, start_segno;
2005 bool force = (cpc->reason & CP_DISCARD);
2006 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
2007
2008 mutex_lock(&dirty_i->seglist_lock);
2009
2010 while (1) {
2011 int i;
2012
2013 if (need_align && end != -1)
2014 end--;
2015 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
2016 if (start >= MAIN_SEGS(sbi))
2017 break;
2018 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
2019 start + 1);
2020
2021 if (need_align) {
2022 start = rounddown(start, sbi->segs_per_sec);
2023 end = roundup(end, sbi->segs_per_sec);
2024 }
2025
2026 for (i = start; i < end; i++) {
2027 if (test_and_clear_bit(i, prefree_map))
2028 dirty_i->nr_dirty[PRE]--;
2029 }
2030
2031 if (!f2fs_realtime_discard_enable(sbi))
2032 continue;
2033
2034 if (force && start >= cpc->trim_start &&
2035 (end - 1) <= cpc->trim_end)
2036 continue;
2037
2038 if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
2039 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
2040 (end - start) << sbi->log_blocks_per_seg);
2041 continue;
2042 }
2043next:
2044 secno = GET_SEC_FROM_SEG(sbi, start);
2045 start_segno = GET_SEG_FROM_SEC(sbi, secno);
2046 if (!IS_CURSEC(sbi, secno) &&
2047 !get_valid_blocks(sbi, start, true))
2048 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
2049 sbi->segs_per_sec << sbi->log_blocks_per_seg);
2050
2051 start = start_segno + sbi->segs_per_sec;
2052 if (start < end)
2053 goto next;
2054 else
2055 end = start - 1;
2056 }
2057 mutex_unlock(&dirty_i->seglist_lock);
2058
2059
2060 list_for_each_entry_safe(entry, this, head, list) {
2061 unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2062 bool is_valid = test_bit_le(0, entry->discard_map);
2063
2064find_next:
2065 if (is_valid) {
2066 next_pos = find_next_zero_bit_le(entry->discard_map,
2067 sbi->blocks_per_seg, cur_pos);
2068 len = next_pos - cur_pos;
2069
2070 if (f2fs_sb_has_blkzoned(sbi) ||
2071 (force && len < cpc->trim_minlen))
2072 goto skip;
2073
2074 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2075 len);
2076 total_len += len;
2077 } else {
2078 next_pos = find_next_bit_le(entry->discard_map,
2079 sbi->blocks_per_seg, cur_pos);
2080 }
2081skip:
2082 cur_pos = next_pos;
2083 is_valid = !is_valid;
2084
2085 if (cur_pos < sbi->blocks_per_seg)
2086 goto find_next;
2087
2088 release_discard_addr(entry);
2089 dcc->nr_discards -= total_len;
2090 }
2091
2092 wake_up_discard_thread(sbi, false);
2093}
2094
2095static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
2096{
2097 dev_t dev = sbi->sb->s_bdev->bd_dev;
2098 struct discard_cmd_control *dcc;
2099 int err = 0, i;
2100
2101 if (SM_I(sbi)->dcc_info) {
2102 dcc = SM_I(sbi)->dcc_info;
2103 goto init_thread;
2104 }
2105
2106 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
2107 if (!dcc)
2108 return -ENOMEM;
2109
2110 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
2111 INIT_LIST_HEAD(&dcc->entry_list);
2112 for (i = 0; i < MAX_PLIST_NUM; i++)
2113 INIT_LIST_HEAD(&dcc->pend_list[i]);
2114 INIT_LIST_HEAD(&dcc->wait_list);
2115 INIT_LIST_HEAD(&dcc->fstrim_list);
2116 mutex_init(&dcc->cmd_lock);
2117 atomic_set(&dcc->issued_discard, 0);
2118 atomic_set(&dcc->queued_discard, 0);
2119 atomic_set(&dcc->discard_cmd_cnt, 0);
2120 dcc->nr_discards = 0;
2121 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
2122 dcc->undiscard_blks = 0;
2123 dcc->next_pos = 0;
2124 dcc->root = RB_ROOT_CACHED;
2125 dcc->rbtree_check = false;
2126
2127 init_waitqueue_head(&dcc->discard_wait_queue);
2128 SM_I(sbi)->dcc_info = dcc;
2129init_thread:
2130 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2131 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2132 if (IS_ERR(dcc->f2fs_issue_discard)) {
2133 err = PTR_ERR(dcc->f2fs_issue_discard);
2134 kfree(dcc);
2135 SM_I(sbi)->dcc_info = NULL;
2136 return err;
2137 }
2138
2139 return err;
2140}
2141
2142static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
2143{
2144 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2145
2146 if (!dcc)
2147 return;
2148
2149 f2fs_stop_discard_thread(sbi);
2150
2151
2152
2153
2154
2155 if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
2156 f2fs_issue_discard_timeout(sbi);
2157
2158 kfree(dcc);
2159 SM_I(sbi)->dcc_info = NULL;
2160}
2161
2162static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
2163{
2164 struct sit_info *sit_i = SIT_I(sbi);
2165
2166 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
2167 sit_i->dirty_sentries++;
2168 return false;
2169 }
2170
2171 return true;
2172}
2173
2174static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2175 unsigned int segno, int modified)
2176{
2177 struct seg_entry *se = get_seg_entry(sbi, segno);
2178
2179 se->type = type;
2180 if (modified)
2181 __mark_sit_entry_dirty(sbi, segno);
2182}
2183
2184static inline unsigned long long get_segment_mtime(struct f2fs_sb_info *sbi,
2185 block_t blkaddr)
2186{
2187 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2188
2189 if (segno == NULL_SEGNO)
2190 return 0;
2191 return get_seg_entry(sbi, segno)->mtime;
2192}
2193
2194static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr,
2195 unsigned long long old_mtime)
2196{
2197 struct seg_entry *se;
2198 unsigned int segno = GET_SEGNO(sbi, blkaddr);
2199 unsigned long long ctime = get_mtime(sbi, false);
2200 unsigned long long mtime = old_mtime ? old_mtime : ctime;
2201
2202 if (segno == NULL_SEGNO)
2203 return;
2204
2205 se = get_seg_entry(sbi, segno);
2206
2207 if (!se->mtime)
2208 se->mtime = mtime;
2209 else
2210 se->mtime = div_u64(se->mtime * se->valid_blocks + mtime,
2211 se->valid_blocks + 1);
2212
2213 if (ctime > SIT_I(sbi)->max_mtime)
2214 SIT_I(sbi)->max_mtime = ctime;
2215}
2216
2217static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2218{
2219 struct seg_entry *se;
2220 unsigned int segno, offset;
2221 long int new_vblocks;
2222 bool exist;
2223#ifdef CONFIG_F2FS_CHECK_FS
2224 bool mir_exist;
2225#endif
2226
2227 segno = GET_SEGNO(sbi, blkaddr);
2228
2229 se = get_seg_entry(sbi, segno);
2230 new_vblocks = se->valid_blocks + del;
2231 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2232
2233 f2fs_bug_on(sbi, (new_vblocks < 0 ||
2234 (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno))));
2235
2236 se->valid_blocks = new_vblocks;
2237
2238
2239 if (del > 0) {
2240 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
2241#ifdef CONFIG_F2FS_CHECK_FS
2242 mir_exist = f2fs_test_and_set_bit(offset,
2243 se->cur_valid_map_mir);
2244 if (unlikely(exist != mir_exist)) {
2245 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2246 blkaddr, exist);
2247 f2fs_bug_on(sbi, 1);
2248 }
2249#endif
2250 if (unlikely(exist)) {
2251 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2252 blkaddr);
2253 f2fs_bug_on(sbi, 1);
2254 se->valid_blocks--;
2255 del = 0;
2256 }
2257
2258 if (!f2fs_test_and_set_bit(offset, se->discard_map))
2259 sbi->discard_blks--;
2260
2261
2262
2263
2264
2265 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
2266 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2267 se->ckpt_valid_blocks++;
2268 }
2269 } else {
2270 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
2271#ifdef CONFIG_F2FS_CHECK_FS
2272 mir_exist = f2fs_test_and_clear_bit(offset,
2273 se->cur_valid_map_mir);
2274 if (unlikely(exist != mir_exist)) {
2275 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2276 blkaddr, exist);
2277 f2fs_bug_on(sbi, 1);
2278 }
2279#endif
2280 if (unlikely(!exist)) {
2281 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2282 blkaddr);
2283 f2fs_bug_on(sbi, 1);
2284 se->valid_blocks++;
2285 del = 0;
2286 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2287
2288
2289
2290
2291
2292
2293 if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2294 spin_lock(&sbi->stat_lock);
2295 sbi->unusable_block_count++;
2296 spin_unlock(&sbi->stat_lock);
2297 }
2298 }
2299
2300 if (f2fs_test_and_clear_bit(offset, se->discard_map))
2301 sbi->discard_blks++;
2302 }
2303 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2304 se->ckpt_valid_blocks += del;
2305
2306 __mark_sit_entry_dirty(sbi, segno);
2307
2308
2309 SIT_I(sbi)->written_valid_blocks += del;
2310
2311 if (__is_large_section(sbi))
2312 get_sec_entry(sbi, segno)->valid_blocks += del;
2313}
2314
2315void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
2316{
2317 unsigned int segno = GET_SEGNO(sbi, addr);
2318 struct sit_info *sit_i = SIT_I(sbi);
2319
2320 f2fs_bug_on(sbi, addr == NULL_ADDR);
2321 if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
2322 return;
2323
2324 invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2325 f2fs_invalidate_compress_page(sbi, addr);
2326
2327
2328 down_write(&sit_i->sentry_lock);
2329
2330 update_segment_mtime(sbi, addr, 0);
2331 update_sit_entry(sbi, addr, -1);
2332
2333
2334 locate_dirty_segment(sbi, segno);
2335
2336 up_write(&sit_i->sentry_lock);
2337}
2338
2339bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
2340{
2341 struct sit_info *sit_i = SIT_I(sbi);
2342 unsigned int segno, offset;
2343 struct seg_entry *se;
2344 bool is_cp = false;
2345
2346 if (!__is_valid_data_blkaddr(blkaddr))
2347 return true;
2348
2349 down_read(&sit_i->sentry_lock);
2350
2351 segno = GET_SEGNO(sbi, blkaddr);
2352 se = get_seg_entry(sbi, segno);
2353 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2354
2355 if (f2fs_test_bit(offset, se->ckpt_valid_map))
2356 is_cp = true;
2357
2358 up_read(&sit_i->sentry_lock);
2359
2360 return is_cp;
2361}
2362
2363
2364
2365
2366static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
2367 struct f2fs_summary *sum)
2368{
2369 struct curseg_info *curseg = CURSEG_I(sbi, type);
2370 void *addr = curseg->sum_blk;
2371
2372 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
2373 memcpy(addr, sum, sizeof(struct f2fs_summary));
2374}
2375
2376
2377
2378
2379int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
2380{
2381 int valid_sum_count = 0;
2382 int i, sum_in_page;
2383
2384 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2385 if (sbi->ckpt->alloc_type[i] == SSR)
2386 valid_sum_count += sbi->blocks_per_seg;
2387 else {
2388 if (for_ra)
2389 valid_sum_count += le16_to_cpu(
2390 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2391 else
2392 valid_sum_count += curseg_blkoff(sbi, i);
2393 }
2394 }
2395
2396 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
2397 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2398 if (valid_sum_count <= sum_in_page)
2399 return 1;
2400 else if ((valid_sum_count - sum_in_page) <=
2401 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
2402 return 2;
2403 return 3;
2404}
2405
2406
2407
2408
2409struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
2410{
2411 if (unlikely(f2fs_cp_error(sbi)))
2412 return ERR_PTR(-EIO);
2413 return f2fs_get_meta_page_retry(sbi, GET_SUM_BLOCK(sbi, segno));
2414}
2415
2416void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2417 void *src, block_t blk_addr)
2418{
2419 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2420
2421 memcpy(page_address(page), src, PAGE_SIZE);
2422 set_page_dirty(page);
2423 f2fs_put_page(page, 1);
2424}
2425
2426static void write_sum_page(struct f2fs_sb_info *sbi,
2427 struct f2fs_summary_block *sum_blk, block_t blk_addr)
2428{
2429 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
2430}
2431
2432static void write_current_sum_page(struct f2fs_sb_info *sbi,
2433 int type, block_t blk_addr)
2434{
2435 struct curseg_info *curseg = CURSEG_I(sbi, type);
2436 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
2437 struct f2fs_summary_block *src = curseg->sum_blk;
2438 struct f2fs_summary_block *dst;
2439
2440 dst = (struct f2fs_summary_block *)page_address(page);
2441 memset(dst, 0, PAGE_SIZE);
2442
2443 mutex_lock(&curseg->curseg_mutex);
2444
2445 down_read(&curseg->journal_rwsem);
2446 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2447 up_read(&curseg->journal_rwsem);
2448
2449 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2450 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2451
2452 mutex_unlock(&curseg->curseg_mutex);
2453
2454 set_page_dirty(page);
2455 f2fs_put_page(page, 1);
2456}
2457
2458static int is_next_segment_free(struct f2fs_sb_info *sbi,
2459 struct curseg_info *curseg, int type)
2460{
2461 unsigned int segno = curseg->segno + 1;
2462 struct free_segmap_info *free_i = FREE_I(sbi);
2463
2464 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2465 return !test_bit(segno, free_i->free_segmap);
2466 return 0;
2467}
2468
2469
2470
2471
2472
2473static void get_new_segment(struct f2fs_sb_info *sbi,
2474 unsigned int *newseg, bool new_sec, int dir)
2475{
2476 struct free_segmap_info *free_i = FREE_I(sbi);
2477 unsigned int segno, secno, zoneno;
2478 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
2479 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2480 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
2481 unsigned int left_start = hint;
2482 bool init = true;
2483 int go_left = 0;
2484 int i;
2485
2486 spin_lock(&free_i->segmap_lock);
2487
2488 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2489 segno = find_next_zero_bit(free_i->free_segmap,
2490 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2491 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
2492 goto got_it;
2493 }
2494find_other_zone:
2495 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2496 if (secno >= MAIN_SECS(sbi)) {
2497 if (dir == ALLOC_RIGHT) {
2498 secno = find_next_zero_bit(free_i->free_secmap,
2499 MAIN_SECS(sbi), 0);
2500 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
2501 } else {
2502 go_left = 1;
2503 left_start = hint - 1;
2504 }
2505 }
2506 if (go_left == 0)
2507 goto skip_left;
2508
2509 while (test_bit(left_start, free_i->free_secmap)) {
2510 if (left_start > 0) {
2511 left_start--;
2512 continue;
2513 }
2514 left_start = find_next_zero_bit(free_i->free_secmap,
2515 MAIN_SECS(sbi), 0);
2516 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
2517 break;
2518 }
2519 secno = left_start;
2520skip_left:
2521 segno = GET_SEG_FROM_SEC(sbi, secno);
2522 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
2523
2524
2525 if (!init)
2526 goto got_it;
2527 if (sbi->secs_per_zone == 1)
2528 goto got_it;
2529 if (zoneno == old_zoneno)
2530 goto got_it;
2531 if (dir == ALLOC_LEFT) {
2532 if (!go_left && zoneno + 1 >= total_zones)
2533 goto got_it;
2534 if (go_left && zoneno == 0)
2535 goto got_it;
2536 }
2537 for (i = 0; i < NR_CURSEG_TYPE; i++)
2538 if (CURSEG_I(sbi, i)->zone == zoneno)
2539 break;
2540
2541 if (i < NR_CURSEG_TYPE) {
2542
2543 if (go_left)
2544 hint = zoneno * sbi->secs_per_zone - 1;
2545 else if (zoneno + 1 >= total_zones)
2546 hint = 0;
2547 else
2548 hint = (zoneno + 1) * sbi->secs_per_zone;
2549 init = false;
2550 goto find_other_zone;
2551 }
2552got_it:
2553
2554 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
2555 __set_inuse(sbi, segno);
2556 *newseg = segno;
2557 spin_unlock(&free_i->segmap_lock);
2558}
2559
2560static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2561{
2562 struct curseg_info *curseg = CURSEG_I(sbi, type);
2563 struct summary_footer *sum_footer;
2564 unsigned short seg_type = curseg->seg_type;
2565
2566 curseg->inited = true;
2567 curseg->segno = curseg->next_segno;
2568 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
2569 curseg->next_blkoff = 0;
2570 curseg->next_segno = NULL_SEGNO;
2571
2572 sum_footer = &(curseg->sum_blk->footer);
2573 memset(sum_footer, 0, sizeof(struct summary_footer));
2574
2575 sanity_check_seg_type(sbi, seg_type);
2576
2577 if (IS_DATASEG(seg_type))
2578 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2579 if (IS_NODESEG(seg_type))
2580 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2581 __set_sit_entry_type(sbi, seg_type, curseg->segno, modified);
2582}
2583
2584static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2585{
2586 struct curseg_info *curseg = CURSEG_I(sbi, type);
2587 unsigned short seg_type = curseg->seg_type;
2588
2589 sanity_check_seg_type(sbi, seg_type);
2590
2591
2592 if (__is_large_section(sbi))
2593 return curseg->segno;
2594
2595
2596 if (!curseg->inited)
2597 return 0;
2598
2599 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2600 return 0;
2601
2602 if (test_opt(sbi, NOHEAP) &&
2603 (seg_type == CURSEG_HOT_DATA || IS_NODESEG(seg_type)))
2604 return 0;
2605
2606 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2607 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
2608
2609
2610 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2611 return 0;
2612
2613 return curseg->segno;
2614}
2615
2616
2617
2618
2619
2620static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2621{
2622 struct curseg_info *curseg = CURSEG_I(sbi, type);
2623 unsigned short seg_type = curseg->seg_type;
2624 unsigned int segno = curseg->segno;
2625 int dir = ALLOC_LEFT;
2626
2627 if (curseg->inited)
2628 write_sum_page(sbi, curseg->sum_blk,
2629 GET_SUM_BLOCK(sbi, segno));
2630 if (seg_type == CURSEG_WARM_DATA || seg_type == CURSEG_COLD_DATA)
2631 dir = ALLOC_RIGHT;
2632
2633 if (test_opt(sbi, NOHEAP))
2634 dir = ALLOC_RIGHT;
2635
2636 segno = __get_next_segno(sbi, type);
2637 get_new_segment(sbi, &segno, new_sec, dir);
2638 curseg->next_segno = segno;
2639 reset_curseg(sbi, type, 1);
2640 curseg->alloc_type = LFS;
2641}
2642
2643static int __next_free_blkoff(struct f2fs_sb_info *sbi,
2644 int segno, block_t start)
2645{
2646 struct seg_entry *se = get_seg_entry(sbi, segno);
2647 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
2648 unsigned long *target_map = SIT_I(sbi)->tmp_map;
2649 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2650 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2651 int i;
2652
2653 for (i = 0; i < entries; i++)
2654 target_map[i] = ckpt_map[i] | cur_map[i];
2655
2656 return __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2657}
2658
2659
2660
2661
2662
2663
2664static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2665 struct curseg_info *seg)
2666{
2667 if (seg->alloc_type == SSR)
2668 seg->next_blkoff =
2669 __next_free_blkoff(sbi, seg->segno,
2670 seg->next_blkoff + 1);
2671 else
2672 seg->next_blkoff++;
2673}
2674
2675bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
2676{
2677 return __next_free_blkoff(sbi, segno, 0) < sbi->blocks_per_seg;
2678}
2679
2680
2681
2682
2683
2684static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush)
2685{
2686 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2687 struct curseg_info *curseg = CURSEG_I(sbi, type);
2688 unsigned int new_segno = curseg->next_segno;
2689 struct f2fs_summary_block *sum_node;
2690 struct page *sum_page;
2691
2692 if (flush)
2693 write_sum_page(sbi, curseg->sum_blk,
2694 GET_SUM_BLOCK(sbi, curseg->segno));
2695
2696 __set_test_and_inuse(sbi, new_segno);
2697
2698 mutex_lock(&dirty_i->seglist_lock);
2699 __remove_dirty_segment(sbi, new_segno, PRE);
2700 __remove_dirty_segment(sbi, new_segno, DIRTY);
2701 mutex_unlock(&dirty_i->seglist_lock);
2702
2703 reset_curseg(sbi, type, 1);
2704 curseg->alloc_type = SSR;
2705 curseg->next_blkoff = __next_free_blkoff(sbi, curseg->segno, 0);
2706
2707 sum_page = f2fs_get_sum_page(sbi, new_segno);
2708 if (IS_ERR(sum_page)) {
2709
2710 memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
2711 return;
2712 }
2713 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2714 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2715 f2fs_put_page(sum_page, 1);
2716}
2717
2718static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2719 int alloc_mode, unsigned long long age);
2720
2721static void get_atssr_segment(struct f2fs_sb_info *sbi, int type,
2722 int target_type, int alloc_mode,
2723 unsigned long long age)
2724{
2725 struct curseg_info *curseg = CURSEG_I(sbi, type);
2726
2727 curseg->seg_type = target_type;
2728
2729 if (get_ssr_segment(sbi, type, alloc_mode, age)) {
2730 struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno);
2731
2732 curseg->seg_type = se->type;
2733 change_curseg(sbi, type, true);
2734 } else {
2735
2736 curseg->seg_type = CURSEG_COLD_DATA;
2737 new_curseg(sbi, type, true);
2738 }
2739 stat_inc_seg_type(sbi, curseg);
2740}
2741
2742static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
2743{
2744 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC);
2745
2746 if (!sbi->am.atgc_enabled)
2747 return;
2748
2749 down_read(&SM_I(sbi)->curseg_lock);
2750
2751 mutex_lock(&curseg->curseg_mutex);
2752 down_write(&SIT_I(sbi)->sentry_lock);
2753
2754 get_atssr_segment(sbi, CURSEG_ALL_DATA_ATGC, CURSEG_COLD_DATA, SSR, 0);
2755
2756 up_write(&SIT_I(sbi)->sentry_lock);
2757 mutex_unlock(&curseg->curseg_mutex);
2758
2759 up_read(&SM_I(sbi)->curseg_lock);
2760
2761}
2762void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
2763{
2764 __f2fs_init_atgc_curseg(sbi);
2765}
2766
2767static void __f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2768{
2769 struct curseg_info *curseg = CURSEG_I(sbi, type);
2770
2771 mutex_lock(&curseg->curseg_mutex);
2772 if (!curseg->inited)
2773 goto out;
2774
2775 if (get_valid_blocks(sbi, curseg->segno, false)) {
2776 write_sum_page(sbi, curseg->sum_blk,
2777 GET_SUM_BLOCK(sbi, curseg->segno));
2778 } else {
2779 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2780 __set_test_and_free(sbi, curseg->segno, true);
2781 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2782 }
2783out:
2784 mutex_unlock(&curseg->curseg_mutex);
2785}
2786
2787void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi)
2788{
2789 __f2fs_save_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2790
2791 if (sbi->am.atgc_enabled)
2792 __f2fs_save_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2793}
2794
2795static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi, int type)
2796{
2797 struct curseg_info *curseg = CURSEG_I(sbi, type);
2798
2799 mutex_lock(&curseg->curseg_mutex);
2800 if (!curseg->inited)
2801 goto out;
2802 if (get_valid_blocks(sbi, curseg->segno, false))
2803 goto out;
2804
2805 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2806 __set_test_and_inuse(sbi, curseg->segno);
2807 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2808out:
2809 mutex_unlock(&curseg->curseg_mutex);
2810}
2811
2812void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi)
2813{
2814 __f2fs_restore_inmem_curseg(sbi, CURSEG_COLD_DATA_PINNED);
2815
2816 if (sbi->am.atgc_enabled)
2817 __f2fs_restore_inmem_curseg(sbi, CURSEG_ALL_DATA_ATGC);
2818}
2819
2820static int get_ssr_segment(struct f2fs_sb_info *sbi, int type,
2821 int alloc_mode, unsigned long long age)
2822{
2823 struct curseg_info *curseg = CURSEG_I(sbi, type);
2824 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
2825 unsigned segno = NULL_SEGNO;
2826 unsigned short seg_type = curseg->seg_type;
2827 int i, cnt;
2828 bool reversed = false;
2829
2830 sanity_check_seg_type(sbi, seg_type);
2831
2832
2833 if (!v_ops->get_victim(sbi, &segno, BG_GC, seg_type, alloc_mode, age)) {
2834 curseg->next_segno = segno;
2835 return 1;
2836 }
2837
2838
2839 if (IS_NODESEG(seg_type)) {
2840 if (seg_type >= CURSEG_WARM_NODE) {
2841 reversed = true;
2842 i = CURSEG_COLD_NODE;
2843 } else {
2844 i = CURSEG_HOT_NODE;
2845 }
2846 cnt = NR_CURSEG_NODE_TYPE;
2847 } else {
2848 if (seg_type >= CURSEG_WARM_DATA) {
2849 reversed = true;
2850 i = CURSEG_COLD_DATA;
2851 } else {
2852 i = CURSEG_HOT_DATA;
2853 }
2854 cnt = NR_CURSEG_DATA_TYPE;
2855 }
2856
2857 for (; cnt-- > 0; reversed ? i-- : i++) {
2858 if (i == seg_type)
2859 continue;
2860 if (!v_ops->get_victim(sbi, &segno, BG_GC, i, alloc_mode, age)) {
2861 curseg->next_segno = segno;
2862 return 1;
2863 }
2864 }
2865
2866
2867 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2868 segno = get_free_segment(sbi);
2869 if (segno != NULL_SEGNO) {
2870 curseg->next_segno = segno;
2871 return 1;
2872 }
2873 }
2874 return 0;
2875}
2876
2877
2878
2879
2880
2881static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2882 int type, bool force)
2883{
2884 struct curseg_info *curseg = CURSEG_I(sbi, type);
2885
2886 if (force)
2887 new_curseg(sbi, type, true);
2888 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2889 curseg->seg_type == CURSEG_WARM_NODE)
2890 new_curseg(sbi, type, false);
2891 else if (curseg->alloc_type == LFS &&
2892 is_next_segment_free(sbi, curseg, type) &&
2893 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2894 new_curseg(sbi, type, false);
2895 else if (f2fs_need_SSR(sbi) &&
2896 get_ssr_segment(sbi, type, SSR, 0))
2897 change_curseg(sbi, type, true);
2898 else
2899 new_curseg(sbi, type, false);
2900
2901 stat_inc_seg_type(sbi, curseg);
2902}
2903
2904void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
2905 unsigned int start, unsigned int end)
2906{
2907 struct curseg_info *curseg = CURSEG_I(sbi, type);
2908 unsigned int segno;
2909
2910 down_read(&SM_I(sbi)->curseg_lock);
2911 mutex_lock(&curseg->curseg_mutex);
2912 down_write(&SIT_I(sbi)->sentry_lock);
2913
2914 segno = CURSEG_I(sbi, type)->segno;
2915 if (segno < start || segno > end)
2916 goto unlock;
2917
2918 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0))
2919 change_curseg(sbi, type, true);
2920 else
2921 new_curseg(sbi, type, true);
2922
2923 stat_inc_seg_type(sbi, curseg);
2924
2925 locate_dirty_segment(sbi, segno);
2926unlock:
2927 up_write(&SIT_I(sbi)->sentry_lock);
2928
2929 if (segno != curseg->segno)
2930 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
2931 type, segno, curseg->segno);
2932
2933 mutex_unlock(&curseg->curseg_mutex);
2934 up_read(&SM_I(sbi)->curseg_lock);
2935}
2936
2937static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
2938 bool new_sec, bool force)
2939{
2940 struct curseg_info *curseg = CURSEG_I(sbi, type);
2941 unsigned int old_segno;
2942
2943 if (!curseg->inited)
2944 goto alloc;
2945
2946 if (force || curseg->next_blkoff ||
2947 get_valid_blocks(sbi, curseg->segno, new_sec))
2948 goto alloc;
2949
2950 if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
2951 return;
2952alloc:
2953 old_segno = curseg->segno;
2954 SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
2955 locate_dirty_segment(sbi, old_segno);
2956}
2957
2958static void __allocate_new_section(struct f2fs_sb_info *sbi,
2959 int type, bool force)
2960{
2961 __allocate_new_segment(sbi, type, true, force);
2962}
2963
2964void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
2965{
2966 down_read(&SM_I(sbi)->curseg_lock);
2967 down_write(&SIT_I(sbi)->sentry_lock);
2968 __allocate_new_section(sbi, type, force);
2969 up_write(&SIT_I(sbi)->sentry_lock);
2970 up_read(&SM_I(sbi)->curseg_lock);
2971}
2972
2973void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
2974{
2975 int i;
2976
2977 down_read(&SM_I(sbi)->curseg_lock);
2978 down_write(&SIT_I(sbi)->sentry_lock);
2979 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
2980 __allocate_new_segment(sbi, i, false, false);
2981 up_write(&SIT_I(sbi)->sentry_lock);
2982 up_read(&SM_I(sbi)->curseg_lock);
2983}
2984
2985static const struct segment_allocation default_salloc_ops = {
2986 .allocate_segment = allocate_segment_by_default,
2987};
2988
2989bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
2990 struct cp_control *cpc)
2991{
2992 __u64 trim_start = cpc->trim_start;
2993 bool has_candidate = false;
2994
2995 down_write(&SIT_I(sbi)->sentry_lock);
2996 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2997 if (add_discard_addrs(sbi, cpc, true)) {
2998 has_candidate = true;
2999 break;
3000 }
3001 }
3002 up_write(&SIT_I(sbi)->sentry_lock);
3003
3004 cpc->trim_start = trim_start;
3005 return has_candidate;
3006}
3007
3008static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
3009 struct discard_policy *dpolicy,
3010 unsigned int start, unsigned int end)
3011{
3012 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
3013 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
3014 struct rb_node **insert_p = NULL, *insert_parent = NULL;
3015 struct discard_cmd *dc;
3016 struct blk_plug plug;
3017 int issued;
3018 unsigned int trimmed = 0;
3019
3020next:
3021 issued = 0;
3022
3023 mutex_lock(&dcc->cmd_lock);
3024 if (unlikely(dcc->rbtree_check))
3025 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
3026 &dcc->root, false));
3027
3028 dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
3029 NULL, start,
3030 (struct rb_entry **)&prev_dc,
3031 (struct rb_entry **)&next_dc,
3032 &insert_p, &insert_parent, true, NULL);
3033 if (!dc)
3034 dc = next_dc;
3035
3036 blk_start_plug(&plug);
3037
3038 while (dc && dc->lstart <= end) {
3039 struct rb_node *node;
3040 int err = 0;
3041
3042 if (dc->len < dpolicy->granularity)
3043 goto skip;
3044
3045 if (dc->state != D_PREP) {
3046 list_move_tail(&dc->list, &dcc->fstrim_list);
3047 goto skip;
3048 }
3049
3050 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
3051
3052 if (issued >= dpolicy->max_requests) {
3053 start = dc->lstart + dc->len;
3054
3055 if (err)
3056 __remove_discard_cmd(sbi, dc);
3057
3058 blk_finish_plug(&plug);
3059 mutex_unlock(&dcc->cmd_lock);
3060 trimmed += __wait_all_discard_cmd(sbi, NULL);
3061 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
3062 goto next;
3063 }
3064skip:
3065 node = rb_next(&dc->rb_node);
3066 if (err)
3067 __remove_discard_cmd(sbi, dc);
3068 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
3069
3070 if (fatal_signal_pending(current))
3071 break;
3072 }
3073
3074 blk_finish_plug(&plug);
3075 mutex_unlock(&dcc->cmd_lock);
3076
3077 return trimmed;
3078}
3079
3080int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
3081{
3082 __u64 start = F2FS_BYTES_TO_BLK(range->start);
3083 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
3084 unsigned int start_segno, end_segno;
3085 block_t start_block, end_block;
3086 struct cp_control cpc;
3087 struct discard_policy dpolicy;
3088 unsigned long long trimmed = 0;
3089 int err = 0;
3090 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
3091
3092 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
3093 return -EINVAL;
3094
3095 if (end < MAIN_BLKADDR(sbi))
3096 goto out;
3097
3098 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
3099 f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
3100 return -EFSCORRUPTED;
3101 }
3102
3103
3104 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
3105 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
3106 GET_SEGNO(sbi, end);
3107 if (need_align) {
3108 start_segno = rounddown(start_segno, sbi->segs_per_sec);
3109 end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
3110 }
3111
3112 cpc.reason = CP_DISCARD;
3113 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
3114 cpc.trim_start = start_segno;
3115 cpc.trim_end = end_segno;
3116
3117 if (sbi->discard_blks == 0)
3118 goto out;
3119
3120 down_write(&sbi->gc_lock);
3121 err = f2fs_write_checkpoint(sbi, &cpc);
3122 up_write(&sbi->gc_lock);
3123 if (err)
3124 goto out;
3125
3126
3127
3128
3129
3130
3131
3132 if (f2fs_realtime_discard_enable(sbi))
3133 goto out;
3134
3135 start_block = START_BLOCK(sbi, start_segno);
3136 end_block = START_BLOCK(sbi, end_segno + 1);
3137
3138 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
3139 trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
3140 start_block, end_block);
3141
3142 trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
3143 start_block, end_block);
3144out:
3145 if (!err)
3146 range->len = F2FS_BLK_TO_BYTES(trimmed);
3147 return err;
3148}
3149
3150static bool __has_curseg_space(struct f2fs_sb_info *sbi,
3151 struct curseg_info *curseg)
3152{
3153 return curseg->next_blkoff < f2fs_usable_blks_in_seg(sbi,
3154 curseg->segno);
3155}
3156
3157int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
3158{
3159 switch (hint) {
3160 case WRITE_LIFE_SHORT:
3161 return CURSEG_HOT_DATA;
3162 case WRITE_LIFE_EXTREME:
3163 return CURSEG_COLD_DATA;
3164 default:
3165 return CURSEG_WARM_DATA;
3166 }
3167}
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3231 enum page_type type, enum temp_type temp)
3232{
3233 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
3234 if (type == DATA) {
3235 if (temp == WARM)
3236 return WRITE_LIFE_NOT_SET;
3237 else if (temp == HOT)
3238 return WRITE_LIFE_SHORT;
3239 else if (temp == COLD)
3240 return WRITE_LIFE_EXTREME;
3241 } else {
3242 return WRITE_LIFE_NOT_SET;
3243 }
3244 } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
3245 if (type == DATA) {
3246 if (temp == WARM)
3247 return WRITE_LIFE_LONG;
3248 else if (temp == HOT)
3249 return WRITE_LIFE_SHORT;
3250 else if (temp == COLD)
3251 return WRITE_LIFE_EXTREME;
3252 } else if (type == NODE) {
3253 if (temp == WARM || temp == HOT)
3254 return WRITE_LIFE_NOT_SET;
3255 else if (temp == COLD)
3256 return WRITE_LIFE_NONE;
3257 } else if (type == META) {
3258 return WRITE_LIFE_MEDIUM;
3259 }
3260 }
3261 return WRITE_LIFE_NOT_SET;
3262}
3263
3264static int __get_segment_type_2(struct f2fs_io_info *fio)
3265{
3266 if (fio->type == DATA)
3267 return CURSEG_HOT_DATA;
3268 else
3269 return CURSEG_HOT_NODE;
3270}
3271
3272static int __get_segment_type_4(struct f2fs_io_info *fio)
3273{
3274 if (fio->type == DATA) {
3275 struct inode *inode = fio->page->mapping->host;
3276
3277 if (S_ISDIR(inode->i_mode))
3278 return CURSEG_HOT_DATA;
3279 else
3280 return CURSEG_COLD_DATA;
3281 } else {
3282 if (IS_DNODE(fio->page) && is_cold_node(fio->page))
3283 return CURSEG_WARM_NODE;
3284 else
3285 return CURSEG_COLD_NODE;
3286 }
3287}
3288
3289static int __get_segment_type_6(struct f2fs_io_info *fio)
3290{
3291 if (fio->type == DATA) {
3292 struct inode *inode = fio->page->mapping->host;
3293
3294 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
3295 return CURSEG_COLD_DATA_PINNED;
3296
3297 if (page_private_gcing(fio->page)) {
3298 if (fio->sbi->am.atgc_enabled &&
3299 (fio->io_type == FS_DATA_IO) &&
3300 (fio->sbi->gc_mode != GC_URGENT_HIGH))
3301 return CURSEG_ALL_DATA_ATGC;
3302 else
3303 return CURSEG_COLD_DATA;
3304 }
3305 if (file_is_cold(inode) || f2fs_need_compress_data(inode))
3306 return CURSEG_COLD_DATA;
3307 if (file_is_hot(inode) ||
3308 is_inode_flag_set(inode, FI_HOT_DATA) ||
3309 f2fs_is_atomic_file(inode) ||
3310 f2fs_is_volatile_file(inode))
3311 return CURSEG_HOT_DATA;
3312 return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
3313 } else {
3314 if (IS_DNODE(fio->page))
3315 return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
3316 CURSEG_HOT_NODE;
3317 return CURSEG_COLD_NODE;
3318 }
3319}
3320
3321static int __get_segment_type(struct f2fs_io_info *fio)
3322{
3323 int type = 0;
3324
3325 switch (F2FS_OPTION(fio->sbi).active_logs) {
3326 case 2:
3327 type = __get_segment_type_2(fio);
3328 break;
3329 case 4:
3330 type = __get_segment_type_4(fio);
3331 break;
3332 case 6:
3333 type = __get_segment_type_6(fio);
3334 break;
3335 default:
3336 f2fs_bug_on(fio->sbi, true);
3337 }
3338
3339 if (IS_HOT(type))
3340 fio->temp = HOT;
3341 else if (IS_WARM(type))
3342 fio->temp = WARM;
3343 else
3344 fio->temp = COLD;
3345 return type;
3346}
3347
3348void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3349 block_t old_blkaddr, block_t *new_blkaddr,
3350 struct f2fs_summary *sum, int type,
3351 struct f2fs_io_info *fio)
3352{
3353 struct sit_info *sit_i = SIT_I(sbi);
3354 struct curseg_info *curseg = CURSEG_I(sbi, type);
3355 unsigned long long old_mtime;
3356 bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
3357 struct seg_entry *se = NULL;
3358
3359 down_read(&SM_I(sbi)->curseg_lock);
3360
3361 mutex_lock(&curseg->curseg_mutex);
3362 down_write(&sit_i->sentry_lock);
3363
3364 if (from_gc) {
3365 f2fs_bug_on(sbi, GET_SEGNO(sbi, old_blkaddr) == NULL_SEGNO);
3366 se = get_seg_entry(sbi, GET_SEGNO(sbi, old_blkaddr));
3367 sanity_check_seg_type(sbi, se->type);
3368 f2fs_bug_on(sbi, IS_NODESEG(se->type));
3369 }
3370 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3371
3372 f2fs_bug_on(sbi, curseg->next_blkoff >= sbi->blocks_per_seg);
3373
3374 f2fs_wait_discard_bio(sbi, *new_blkaddr);
3375
3376
3377
3378
3379
3380
3381 __add_sum_entry(sbi, type, sum);
3382
3383 __refresh_next_blkoff(sbi, curseg);
3384
3385 stat_inc_block_count(sbi, curseg);
3386
3387 if (from_gc) {
3388 old_mtime = get_segment_mtime(sbi, old_blkaddr);
3389 } else {
3390 update_segment_mtime(sbi, old_blkaddr, 0);
3391 old_mtime = 0;
3392 }
3393 update_segment_mtime(sbi, *new_blkaddr, old_mtime);
3394
3395
3396
3397
3398
3399 update_sit_entry(sbi, *new_blkaddr, 1);
3400 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3401 update_sit_entry(sbi, old_blkaddr, -1);
3402
3403 if (!__has_curseg_space(sbi, curseg)) {
3404 if (from_gc)
3405 get_atssr_segment(sbi, type, se->type,
3406 AT_SSR, se->mtime);
3407 else
3408 sit_i->s_ops->allocate_segment(sbi, type, false);
3409 }
3410
3411
3412
3413
3414
3415 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3416 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
3417
3418 up_write(&sit_i->sentry_lock);
3419
3420 if (page && IS_NODESEG(type)) {
3421 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3422
3423 f2fs_inode_chksum_set(sbi, page);
3424 }
3425
3426 if (fio) {
3427 struct f2fs_bio_info *io;
3428
3429 if (F2FS_IO_ALIGNED(sbi))
3430 fio->retry = false;
3431
3432 INIT_LIST_HEAD(&fio->list);
3433 fio->in_list = true;
3434 io = sbi->write_io[fio->type] + fio->temp;
3435 spin_lock(&io->io_lock);
3436 list_add_tail(&fio->list, &io->io_list);
3437 spin_unlock(&io->io_lock);
3438 }
3439
3440 mutex_unlock(&curseg->curseg_mutex);
3441
3442 up_read(&SM_I(sbi)->curseg_lock);
3443}
3444
3445static void update_device_state(struct f2fs_io_info *fio)
3446{
3447 struct f2fs_sb_info *sbi = fio->sbi;
3448 unsigned int devidx;
3449
3450 if (!f2fs_is_multi_device(sbi))
3451 return;
3452
3453 devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
3454
3455
3456 f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
3457
3458
3459 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3460 spin_lock(&sbi->dev_lock);
3461 f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3462 spin_unlock(&sbi->dev_lock);
3463 }
3464}
3465
3466static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
3467{
3468 int type = __get_segment_type(fio);
3469 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
3470
3471 if (keep_order)
3472 down_read(&fio->sbi->io_order_lock);
3473reallocate:
3474 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
3475 &fio->new_blkaddr, sum, type, fio);
3476 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
3477 invalidate_mapping_pages(META_MAPPING(fio->sbi),
3478 fio->old_blkaddr, fio->old_blkaddr);
3479 f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
3480 }
3481
3482
3483 f2fs_submit_page_write(fio);
3484 if (fio->retry) {
3485 fio->old_blkaddr = fio->new_blkaddr;
3486 goto reallocate;
3487 }
3488
3489 update_device_state(fio);
3490
3491 if (keep_order)
3492 up_read(&fio->sbi->io_order_lock);
3493}
3494
3495void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3496 enum iostat_type io_type)
3497{
3498 struct f2fs_io_info fio = {
3499 .sbi = sbi,
3500 .type = META,
3501 .temp = HOT,
3502 .op = REQ_OP_WRITE,
3503 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
3504 .old_blkaddr = page->index,
3505 .new_blkaddr = page->index,
3506 .page = page,
3507 .encrypted_page = NULL,
3508 .in_list = false,
3509 };
3510
3511 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
3512 fio.op_flags &= ~REQ_META;
3513
3514 set_page_writeback(page);
3515 ClearPageError(page);
3516 f2fs_submit_page_write(&fio);
3517
3518 stat_inc_meta_count(sbi, page->index);
3519 f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
3520}
3521
3522void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
3523{
3524 struct f2fs_summary sum;
3525
3526 set_summary(&sum, nid, 0, 0);
3527 do_write_page(&sum, fio);
3528
3529 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3530}
3531
3532void f2fs_outplace_write_data(struct dnode_of_data *dn,
3533 struct f2fs_io_info *fio)
3534{
3535 struct f2fs_sb_info *sbi = fio->sbi;
3536 struct f2fs_summary sum;
3537
3538 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
3539 set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
3540 do_write_page(&sum, fio);
3541 f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
3542
3543 f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
3544}
3545
3546int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3547{
3548 int err;
3549 struct f2fs_sb_info *sbi = fio->sbi;
3550 unsigned int segno;
3551
3552 fio->new_blkaddr = fio->old_blkaddr;
3553
3554 __get_segment_type(fio);
3555
3556 segno = GET_SEGNO(sbi, fio->new_blkaddr);
3557
3558 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3559 set_sbi_flag(sbi, SBI_NEED_FSCK);
3560 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3561 __func__, segno);
3562 err = -EFSCORRUPTED;
3563 goto drop_bio;
3564 }
3565
3566 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) || f2fs_cp_error(sbi)) {
3567 err = -EIO;
3568 goto drop_bio;
3569 }
3570
3571 stat_inc_inplace_blocks(fio->sbi);
3572
3573 if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
3574 err = f2fs_merge_page_bio(fio);
3575 else
3576 err = f2fs_submit_page_bio(fio);
3577 if (!err) {
3578 update_device_state(fio);
3579 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3580 }
3581
3582 return err;
3583drop_bio:
3584 if (fio->bio && *(fio->bio)) {
3585 struct bio *bio = *(fio->bio);
3586
3587 bio->bi_status = BLK_STS_IOERR;
3588 bio_endio(bio);
3589 *(fio->bio) = NULL;
3590 }
3591 return err;
3592}
3593
3594static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3595 unsigned int segno)
3596{
3597 int i;
3598
3599 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3600 if (CURSEG_I(sbi, i)->segno == segno)
3601 break;
3602 }
3603 return i;
3604}
3605
3606void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3607 block_t old_blkaddr, block_t new_blkaddr,
3608 bool recover_curseg, bool recover_newaddr,
3609 bool from_gc)
3610{
3611 struct sit_info *sit_i = SIT_I(sbi);
3612 struct curseg_info *curseg;
3613 unsigned int segno, old_cursegno;
3614 struct seg_entry *se;
3615 int type;
3616 unsigned short old_blkoff;
3617 unsigned char old_alloc_type;
3618
3619 segno = GET_SEGNO(sbi, new_blkaddr);
3620 se = get_seg_entry(sbi, segno);
3621 type = se->type;
3622
3623 down_write(&SM_I(sbi)->curseg_lock);
3624
3625 if (!recover_curseg) {
3626
3627 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3628 if (old_blkaddr == NULL_ADDR)
3629 type = CURSEG_COLD_DATA;
3630 else
3631 type = CURSEG_WARM_DATA;
3632 }
3633 } else {
3634 if (IS_CURSEG(sbi, segno)) {
3635
3636 type = __f2fs_get_curseg(sbi, segno);
3637 f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3638 } else {
3639 type = CURSEG_WARM_DATA;
3640 }
3641 }
3642
3643 f2fs_bug_on(sbi, !IS_DATASEG(type));
3644 curseg = CURSEG_I(sbi, type);
3645
3646 mutex_lock(&curseg->curseg_mutex);
3647 down_write(&sit_i->sentry_lock);
3648
3649 old_cursegno = curseg->segno;
3650 old_blkoff = curseg->next_blkoff;
3651 old_alloc_type = curseg->alloc_type;
3652
3653
3654 if (segno != curseg->segno) {
3655 curseg->next_segno = segno;
3656 change_curseg(sbi, type, true);
3657 }
3658
3659 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
3660 __add_sum_entry(sbi, type, sum);
3661
3662 if (!recover_curseg || recover_newaddr) {
3663 if (!from_gc)
3664 update_segment_mtime(sbi, new_blkaddr, 0);
3665 update_sit_entry(sbi, new_blkaddr, 1);
3666 }
3667 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3668 invalidate_mapping_pages(META_MAPPING(sbi),
3669 old_blkaddr, old_blkaddr);
3670 f2fs_invalidate_compress_page(sbi, old_blkaddr);
3671 if (!from_gc)
3672 update_segment_mtime(sbi, old_blkaddr, 0);
3673 update_sit_entry(sbi, old_blkaddr, -1);
3674 }
3675
3676 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3677 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3678
3679 locate_dirty_segment(sbi, old_cursegno);
3680
3681 if (recover_curseg) {
3682 if (old_cursegno != curseg->segno) {
3683 curseg->next_segno = old_cursegno;
3684 change_curseg(sbi, type, true);
3685 }
3686 curseg->next_blkoff = old_blkoff;
3687 curseg->alloc_type = old_alloc_type;
3688 }
3689
3690 up_write(&sit_i->sentry_lock);
3691 mutex_unlock(&curseg->curseg_mutex);
3692 up_write(&SM_I(sbi)->curseg_lock);
3693}
3694
3695void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3696 block_t old_addr, block_t new_addr,
3697 unsigned char version, bool recover_curseg,
3698 bool recover_newaddr)
3699{
3700 struct f2fs_summary sum;
3701
3702 set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3703
3704 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
3705 recover_curseg, recover_newaddr, false);
3706
3707 f2fs_update_data_blkaddr(dn, new_addr);
3708}
3709
3710void f2fs_wait_on_page_writeback(struct page *page,
3711 enum page_type type, bool ordered, bool locked)
3712{
3713 if (PageWriteback(page)) {
3714 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3715
3716
3717 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
3718
3719 f2fs_submit_merged_ipu_write(sbi, NULL, page);
3720 if (ordered) {
3721 wait_on_page_writeback(page);
3722 f2fs_bug_on(sbi, locked && PageWriteback(page));
3723 } else {
3724 wait_for_stable_page(page);
3725 }
3726 }
3727}
3728
3729void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
3730{
3731 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3732 struct page *cpage;
3733
3734 if (!f2fs_post_read_required(inode))
3735 return;
3736
3737 if (!__is_valid_data_blkaddr(blkaddr))
3738 return;
3739
3740 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3741 if (cpage) {
3742 f2fs_wait_on_page_writeback(cpage, DATA, true, true);
3743 f2fs_put_page(cpage, 1);
3744 }
3745}
3746
3747void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3748 block_t len)
3749{
3750 block_t i;
3751
3752 for (i = 0; i < len; i++)
3753 f2fs_wait_on_block_writeback(inode, blkaddr + i);
3754}
3755
3756static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3757{
3758 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3759 struct curseg_info *seg_i;
3760 unsigned char *kaddr;
3761 struct page *page;
3762 block_t start;
3763 int i, j, offset;
3764
3765 start = start_sum_block(sbi);
3766
3767 page = f2fs_get_meta_page(sbi, start++);
3768 if (IS_ERR(page))
3769 return PTR_ERR(page);
3770 kaddr = (unsigned char *)page_address(page);
3771
3772
3773 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3774 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
3775
3776
3777 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3778 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
3779 offset = 2 * SUM_JOURNAL_SIZE;
3780
3781
3782 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3783 unsigned short blk_off;
3784 unsigned int segno;
3785
3786 seg_i = CURSEG_I(sbi, i);
3787 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3788 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3789 seg_i->next_segno = segno;
3790 reset_curseg(sbi, i, 0);
3791 seg_i->alloc_type = ckpt->alloc_type[i];
3792 seg_i->next_blkoff = blk_off;
3793
3794 if (seg_i->alloc_type == SSR)
3795 blk_off = sbi->blocks_per_seg;
3796
3797 for (j = 0; j < blk_off; j++) {
3798 struct f2fs_summary *s;
3799
3800 s = (struct f2fs_summary *)(kaddr + offset);
3801 seg_i->sum_blk->entries[j] = *s;
3802 offset += SUMMARY_SIZE;
3803 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
3804 SUM_FOOTER_SIZE)
3805 continue;
3806
3807 f2fs_put_page(page, 1);
3808 page = NULL;
3809
3810 page = f2fs_get_meta_page(sbi, start++);
3811 if (IS_ERR(page))
3812 return PTR_ERR(page);
3813 kaddr = (unsigned char *)page_address(page);
3814 offset = 0;
3815 }
3816 }
3817 f2fs_put_page(page, 1);
3818 return 0;
3819}
3820
3821static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3822{
3823 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3824 struct f2fs_summary_block *sum;
3825 struct curseg_info *curseg;
3826 struct page *new;
3827 unsigned short blk_off;
3828 unsigned int segno = 0;
3829 block_t blk_addr = 0;
3830 int err = 0;
3831
3832
3833 if (IS_DATASEG(type)) {
3834 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3835 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3836 CURSEG_HOT_DATA]);
3837 if (__exist_node_summaries(sbi))
3838 blk_addr = sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type);
3839 else
3840 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3841 } else {
3842 segno = le32_to_cpu(ckpt->cur_node_segno[type -
3843 CURSEG_HOT_NODE]);
3844 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3845 CURSEG_HOT_NODE]);
3846 if (__exist_node_summaries(sbi))
3847 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3848 type - CURSEG_HOT_NODE);
3849 else
3850 blk_addr = GET_SUM_BLOCK(sbi, segno);
3851 }
3852
3853 new = f2fs_get_meta_page(sbi, blk_addr);
3854 if (IS_ERR(new))
3855 return PTR_ERR(new);
3856 sum = (struct f2fs_summary_block *)page_address(new);
3857
3858 if (IS_NODESEG(type)) {
3859 if (__exist_node_summaries(sbi)) {
3860 struct f2fs_summary *ns = &sum->entries[0];
3861 int i;
3862
3863 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3864 ns->version = 0;
3865 ns->ofs_in_node = 0;
3866 }
3867 } else {
3868 err = f2fs_restore_node_summary(sbi, segno, sum);
3869 if (err)
3870 goto out;
3871 }
3872 }
3873
3874
3875 curseg = CURSEG_I(sbi, type);
3876 mutex_lock(&curseg->curseg_mutex);
3877
3878
3879 down_write(&curseg->journal_rwsem);
3880 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3881 up_write(&curseg->journal_rwsem);
3882
3883 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3884 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
3885 curseg->next_segno = segno;
3886 reset_curseg(sbi, type, 0);
3887 curseg->alloc_type = ckpt->alloc_type[type];
3888 curseg->next_blkoff = blk_off;
3889 mutex_unlock(&curseg->curseg_mutex);
3890out:
3891 f2fs_put_page(new, 1);
3892 return err;
3893}
3894
3895static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3896{
3897 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3898 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
3899 int type = CURSEG_HOT_DATA;
3900 int err;
3901
3902 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
3903 int npages = f2fs_npages_for_summary_flush(sbi, true);
3904
3905 if (npages >= 2)
3906 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
3907 META_CP, true);
3908
3909
3910 err = read_compacted_summaries(sbi);
3911 if (err)
3912 return err;
3913 type = CURSEG_HOT_NODE;
3914 }
3915
3916 if (__exist_node_summaries(sbi))
3917 f2fs_ra_meta_pages(sbi,
3918 sum_blk_addr(sbi, NR_CURSEG_PERSIST_TYPE, type),
3919 NR_CURSEG_PERSIST_TYPE - type, META_CP, true);
3920
3921 for (; type <= CURSEG_COLD_NODE; type++) {
3922 err = read_normal_summaries(sbi, type);
3923 if (err)
3924 return err;
3925 }
3926
3927
3928 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
3929 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
3930 f2fs_err(sbi, "invalid journal entries nats %u sits %u",
3931 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
3932 return -EINVAL;
3933 }
3934
3935 return 0;
3936}
3937
3938static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
3939{
3940 struct page *page;
3941 unsigned char *kaddr;
3942 struct f2fs_summary *summary;
3943 struct curseg_info *seg_i;
3944 int written_size = 0;
3945 int i, j;
3946
3947 page = f2fs_grab_meta_page(sbi, blkaddr++);
3948 kaddr = (unsigned char *)page_address(page);
3949 memset(kaddr, 0, PAGE_SIZE);
3950
3951
3952 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
3953 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
3954 written_size += SUM_JOURNAL_SIZE;
3955
3956
3957 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
3958 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
3959 written_size += SUM_JOURNAL_SIZE;
3960
3961
3962 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3963 unsigned short blkoff;
3964
3965 seg_i = CURSEG_I(sbi, i);
3966 if (sbi->ckpt->alloc_type[i] == SSR)
3967 blkoff = sbi->blocks_per_seg;
3968 else
3969 blkoff = curseg_blkoff(sbi, i);
3970
3971 for (j = 0; j < blkoff; j++) {
3972 if (!page) {
3973 page = f2fs_grab_meta_page(sbi, blkaddr++);
3974 kaddr = (unsigned char *)page_address(page);
3975 memset(kaddr, 0, PAGE_SIZE);
3976 written_size = 0;
3977 }
3978 summary = (struct f2fs_summary *)(kaddr + written_size);
3979 *summary = seg_i->sum_blk->entries[j];
3980 written_size += SUMMARY_SIZE;
3981
3982 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
3983 SUM_FOOTER_SIZE)
3984 continue;
3985
3986 set_page_dirty(page);
3987 f2fs_put_page(page, 1);
3988 page = NULL;
3989 }
3990 }
3991 if (page) {
3992 set_page_dirty(page);
3993 f2fs_put_page(page, 1);
3994 }
3995}
3996
3997static void write_normal_summaries(struct f2fs_sb_info *sbi,
3998 block_t blkaddr, int type)
3999{
4000 int i, end;
4001
4002 if (IS_DATASEG(type))
4003 end = type + NR_CURSEG_DATA_TYPE;
4004 else
4005 end = type + NR_CURSEG_NODE_TYPE;
4006
4007 for (i = type; i < end; i++)
4008 write_current_sum_page(sbi, i, blkaddr + (i - type));
4009}
4010
4011void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4012{
4013 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
4014 write_compacted_summaries(sbi, start_blk);
4015 else
4016 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
4017}
4018
4019void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
4020{
4021 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
4022}
4023
4024int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
4025 unsigned int val, int alloc)
4026{
4027 int i;
4028
4029 if (type == NAT_JOURNAL) {
4030 for (i = 0; i < nats_in_cursum(journal); i++) {
4031 if (le32_to_cpu(nid_in_journal(journal, i)) == val)
4032 return i;
4033 }
4034 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
4035 return update_nats_in_cursum(journal, 1);
4036 } else if (type == SIT_JOURNAL) {
4037 for (i = 0; i < sits_in_cursum(journal); i++)
4038 if (le32_to_cpu(segno_in_journal(journal, i)) == val)
4039 return i;
4040 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
4041 return update_sits_in_cursum(journal, 1);
4042 }
4043 return -1;
4044}
4045
4046static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
4047 unsigned int segno)
4048{
4049 return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
4050}
4051
4052static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
4053 unsigned int start)
4054{
4055 struct sit_info *sit_i = SIT_I(sbi);
4056 struct page *page;
4057 pgoff_t src_off, dst_off;
4058
4059 src_off = current_sit_addr(sbi, start);
4060 dst_off = next_sit_addr(sbi, src_off);
4061
4062 page = f2fs_grab_meta_page(sbi, dst_off);
4063 seg_info_to_sit_page(sbi, page, start);
4064
4065 set_page_dirty(page);
4066 set_to_next_sit(sit_i, start);
4067
4068 return page;
4069}
4070
4071static struct sit_entry_set *grab_sit_entry_set(void)
4072{
4073 struct sit_entry_set *ses =
4074 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
4075
4076 ses->entry_cnt = 0;
4077 INIT_LIST_HEAD(&ses->set_list);
4078 return ses;
4079}
4080
4081static void release_sit_entry_set(struct sit_entry_set *ses)
4082{
4083 list_del(&ses->set_list);
4084 kmem_cache_free(sit_entry_set_slab, ses);
4085}
4086
4087static void adjust_sit_entry_set(struct sit_entry_set *ses,
4088 struct list_head *head)
4089{
4090 struct sit_entry_set *next = ses;
4091
4092 if (list_is_last(&ses->set_list, head))
4093 return;
4094
4095 list_for_each_entry_continue(next, head, set_list)
4096 if (ses->entry_cnt <= next->entry_cnt)
4097 break;
4098
4099 list_move_tail(&ses->set_list, &next->set_list);
4100}
4101
4102static void add_sit_entry(unsigned int segno, struct list_head *head)
4103{
4104 struct sit_entry_set *ses;
4105 unsigned int start_segno = START_SEGNO(segno);
4106
4107 list_for_each_entry(ses, head, set_list) {
4108 if (ses->start_segno == start_segno) {
4109 ses->entry_cnt++;
4110 adjust_sit_entry_set(ses, head);
4111 return;
4112 }
4113 }
4114
4115 ses = grab_sit_entry_set();
4116
4117 ses->start_segno = start_segno;
4118 ses->entry_cnt++;
4119 list_add(&ses->set_list, head);
4120}
4121
4122static void add_sits_in_set(struct f2fs_sb_info *sbi)
4123{
4124 struct f2fs_sm_info *sm_info = SM_I(sbi);
4125 struct list_head *set_list = &sm_info->sit_entry_set;
4126 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
4127 unsigned int segno;
4128
4129 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
4130 add_sit_entry(segno, set_list);
4131}
4132
4133static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
4134{
4135 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4136 struct f2fs_journal *journal = curseg->journal;
4137 int i;
4138
4139 down_write(&curseg->journal_rwsem);
4140 for (i = 0; i < sits_in_cursum(journal); i++) {
4141 unsigned int segno;
4142 bool dirtied;
4143
4144 segno = le32_to_cpu(segno_in_journal(journal, i));
4145 dirtied = __mark_sit_entry_dirty(sbi, segno);
4146
4147 if (!dirtied)
4148 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
4149 }
4150 update_sits_in_cursum(journal, -i);
4151 up_write(&curseg->journal_rwsem);
4152}
4153
4154
4155
4156
4157
4158void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4159{
4160 struct sit_info *sit_i = SIT_I(sbi);
4161 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
4162 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4163 struct f2fs_journal *journal = curseg->journal;
4164 struct sit_entry_set *ses, *tmp;
4165 struct list_head *head = &SM_I(sbi)->sit_entry_set;
4166 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4167 struct seg_entry *se;
4168
4169 down_write(&sit_i->sentry_lock);
4170
4171 if (!sit_i->dirty_sentries)
4172 goto out;
4173
4174
4175
4176
4177
4178 add_sits_in_set(sbi);
4179
4180
4181
4182
4183
4184
4185 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
4186 !to_journal)
4187 remove_sits_in_journal(sbi);
4188
4189
4190
4191
4192
4193
4194 list_for_each_entry_safe(ses, tmp, head, set_list) {
4195 struct page *page = NULL;
4196 struct f2fs_sit_block *raw_sit = NULL;
4197 unsigned int start_segno = ses->start_segno;
4198 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
4199 (unsigned long)MAIN_SEGS(sbi));
4200 unsigned int segno = start_segno;
4201
4202 if (to_journal &&
4203 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
4204 to_journal = false;
4205
4206 if (to_journal) {
4207 down_write(&curseg->journal_rwsem);
4208 } else {
4209 page = get_next_sit_page(sbi, start_segno);
4210 raw_sit = page_address(page);
4211 }
4212
4213
4214 for_each_set_bit_from(segno, bitmap, end) {
4215 int offset, sit_offset;
4216
4217 se = get_seg_entry(sbi, segno);
4218#ifdef CONFIG_F2FS_CHECK_FS
4219 if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
4220 SIT_VBLOCK_MAP_SIZE))
4221 f2fs_bug_on(sbi, 1);
4222#endif
4223
4224
4225 if (!(cpc->reason & CP_DISCARD)) {
4226 cpc->trim_start = segno;
4227 add_discard_addrs(sbi, cpc, false);
4228 }
4229
4230 if (to_journal) {
4231 offset = f2fs_lookup_journal_in_cursum(journal,
4232 SIT_JOURNAL, segno, 1);
4233 f2fs_bug_on(sbi, offset < 0);
4234 segno_in_journal(journal, offset) =
4235 cpu_to_le32(segno);
4236 seg_info_to_raw_sit(se,
4237 &sit_in_journal(journal, offset));
4238 check_block_count(sbi, segno,
4239 &sit_in_journal(journal, offset));
4240 } else {
4241 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
4242 seg_info_to_raw_sit(se,
4243 &raw_sit->entries[sit_offset]);
4244 check_block_count(sbi, segno,
4245 &raw_sit->entries[sit_offset]);
4246 }
4247
4248 __clear_bit(segno, bitmap);
4249 sit_i->dirty_sentries--;
4250 ses->entry_cnt--;
4251 }
4252
4253 if (to_journal)
4254 up_write(&curseg->journal_rwsem);
4255 else
4256 f2fs_put_page(page, 1);
4257
4258 f2fs_bug_on(sbi, ses->entry_cnt);
4259 release_sit_entry_set(ses);
4260 }
4261
4262 f2fs_bug_on(sbi, !list_empty(head));
4263 f2fs_bug_on(sbi, sit_i->dirty_sentries);
4264out:
4265 if (cpc->reason & CP_DISCARD) {
4266 __u64 trim_start = cpc->trim_start;
4267
4268 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
4269 add_discard_addrs(sbi, cpc, false);
4270
4271 cpc->trim_start = trim_start;
4272 }
4273 up_write(&sit_i->sentry_lock);
4274
4275 set_prefree_as_free_segments(sbi);
4276}
4277
4278static int build_sit_info(struct f2fs_sb_info *sbi)
4279{
4280 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4281 struct sit_info *sit_i;
4282 unsigned int sit_segs, start;
4283 char *src_bitmap, *bitmap;
4284 unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
4285
4286
4287 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
4288 if (!sit_i)
4289 return -ENOMEM;
4290
4291 SM_I(sbi)->sit_info = sit_i;
4292
4293 sit_i->sentries =
4294 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4295 MAIN_SEGS(sbi)),
4296 GFP_KERNEL);
4297 if (!sit_i->sentries)
4298 return -ENOMEM;
4299
4300 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4301 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
4302 GFP_KERNEL);
4303 if (!sit_i->dirty_sentries_bitmap)
4304 return -ENOMEM;
4305
4306#ifdef CONFIG_F2FS_CHECK_FS
4307 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 4;
4308#else
4309 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 3;
4310#endif
4311 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4312 if (!sit_i->bitmap)
4313 return -ENOMEM;
4314
4315 bitmap = sit_i->bitmap;
4316
4317 for (start = 0; start < MAIN_SEGS(sbi); start++) {
4318 sit_i->sentries[start].cur_valid_map = bitmap;
4319 bitmap += SIT_VBLOCK_MAP_SIZE;
4320
4321 sit_i->sentries[start].ckpt_valid_map = bitmap;
4322 bitmap += SIT_VBLOCK_MAP_SIZE;
4323
4324#ifdef CONFIG_F2FS_CHECK_FS
4325 sit_i->sentries[start].cur_valid_map_mir = bitmap;
4326 bitmap += SIT_VBLOCK_MAP_SIZE;
4327#endif
4328
4329 sit_i->sentries[start].discard_map = bitmap;
4330 bitmap += SIT_VBLOCK_MAP_SIZE;
4331 }
4332
4333 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
4334 if (!sit_i->tmp_map)
4335 return -ENOMEM;
4336
4337 if (__is_large_section(sbi)) {
4338 sit_i->sec_entries =
4339 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4340 MAIN_SECS(sbi)),
4341 GFP_KERNEL);
4342 if (!sit_i->sec_entries)
4343 return -ENOMEM;
4344 }
4345
4346
4347 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4348
4349
4350 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
4351 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4352
4353 sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
4354 if (!sit_i->sit_bitmap)
4355 return -ENOMEM;
4356
4357#ifdef CONFIG_F2FS_CHECK_FS
4358 sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4359 sit_bitmap_size, GFP_KERNEL);
4360 if (!sit_i->sit_bitmap_mir)
4361 return -ENOMEM;
4362
4363 sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4364 main_bitmap_size, GFP_KERNEL);
4365 if (!sit_i->invalid_segmap)
4366 return -ENOMEM;
4367#endif
4368
4369
4370 sit_i->s_ops = &default_salloc_ops;
4371
4372 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4373 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
4374 sit_i->written_valid_blocks = 0;
4375 sit_i->bitmap_size = sit_bitmap_size;
4376 sit_i->dirty_sentries = 0;
4377 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4378 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
4379 sit_i->mounted_time = ktime_get_boottime_seconds();
4380 init_rwsem(&sit_i->sentry_lock);
4381 return 0;
4382}
4383
4384static int build_free_segmap(struct f2fs_sb_info *sbi)
4385{
4386 struct free_segmap_info *free_i;
4387 unsigned int bitmap_size, sec_bitmap_size;
4388
4389
4390 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
4391 if (!free_i)
4392 return -ENOMEM;
4393
4394 SM_I(sbi)->free_info = free_i;
4395
4396 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4397 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
4398 if (!free_i->free_segmap)
4399 return -ENOMEM;
4400
4401 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4402 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
4403 if (!free_i->free_secmap)
4404 return -ENOMEM;
4405
4406
4407 memset(free_i->free_segmap, 0xff, bitmap_size);
4408 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4409
4410
4411 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
4412 free_i->free_segments = 0;
4413 free_i->free_sections = 0;
4414 spin_lock_init(&free_i->segmap_lock);
4415 return 0;
4416}
4417
4418static int build_curseg(struct f2fs_sb_info *sbi)
4419{
4420 struct curseg_info *array;
4421 int i;
4422
4423 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE,
4424 sizeof(*array)), GFP_KERNEL);
4425 if (!array)
4426 return -ENOMEM;
4427
4428 SM_I(sbi)->curseg_array = array;
4429
4430 for (i = 0; i < NO_CHECK_TYPE; i++) {
4431 mutex_init(&array[i].curseg_mutex);
4432 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
4433 if (!array[i].sum_blk)
4434 return -ENOMEM;
4435 init_rwsem(&array[i].journal_rwsem);
4436 array[i].journal = f2fs_kzalloc(sbi,
4437 sizeof(struct f2fs_journal), GFP_KERNEL);
4438 if (!array[i].journal)
4439 return -ENOMEM;
4440 if (i < NR_PERSISTENT_LOG)
4441 array[i].seg_type = CURSEG_HOT_DATA + i;
4442 else if (i == CURSEG_COLD_DATA_PINNED)
4443 array[i].seg_type = CURSEG_COLD_DATA;
4444 else if (i == CURSEG_ALL_DATA_ATGC)
4445 array[i].seg_type = CURSEG_COLD_DATA;
4446 array[i].segno = NULL_SEGNO;
4447 array[i].next_blkoff = 0;
4448 array[i].inited = false;
4449 }
4450 return restore_curseg_summaries(sbi);
4451}
4452
4453static int build_sit_entries(struct f2fs_sb_info *sbi)
4454{
4455 struct sit_info *sit_i = SIT_I(sbi);
4456 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
4457 struct f2fs_journal *journal = curseg->journal;
4458 struct seg_entry *se;
4459 struct f2fs_sit_entry sit;
4460 int sit_blk_cnt = SIT_BLK_CNT(sbi);
4461 unsigned int i, start, end;
4462 unsigned int readed, start_blk = 0;
4463 int err = 0;
4464 block_t total_node_blocks = 0;
4465
4466 do {
4467 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
4468 META_SIT, true);
4469
4470 start = start_blk * sit_i->sents_per_block;
4471 end = (start_blk + readed) * sit_i->sents_per_block;
4472
4473 for (; start < end && start < MAIN_SEGS(sbi); start++) {
4474 struct f2fs_sit_block *sit_blk;
4475 struct page *page;
4476
4477 se = &sit_i->sentries[start];
4478 page = get_current_sit_page(sbi, start);
4479 if (IS_ERR(page))
4480 return PTR_ERR(page);
4481 sit_blk = (struct f2fs_sit_block *)page_address(page);
4482 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4483 f2fs_put_page(page, 1);
4484
4485 err = check_block_count(sbi, start, &sit);
4486 if (err)
4487 return err;
4488 seg_info_from_raw_sit(se, &sit);
4489 if (IS_NODESEG(se->type))
4490 total_node_blocks += se->valid_blocks;
4491
4492
4493 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4494 memset(se->discard_map, 0xff,
4495 SIT_VBLOCK_MAP_SIZE);
4496 } else {
4497 memcpy(se->discard_map,
4498 se->cur_valid_map,
4499 SIT_VBLOCK_MAP_SIZE);
4500 sbi->discard_blks +=
4501 sbi->blocks_per_seg -
4502 se->valid_blocks;
4503 }
4504
4505 if (__is_large_section(sbi))
4506 get_sec_entry(sbi, start)->valid_blocks +=
4507 se->valid_blocks;
4508 }
4509 start_blk += readed;
4510 } while (start_blk < sit_blk_cnt);
4511
4512 down_read(&curseg->journal_rwsem);
4513 for (i = 0; i < sits_in_cursum(journal); i++) {
4514 unsigned int old_valid_blocks;
4515
4516 start = le32_to_cpu(segno_in_journal(journal, i));
4517 if (start >= MAIN_SEGS(sbi)) {
4518 f2fs_err(sbi, "Wrong journal entry on segno %u",
4519 start);
4520 err = -EFSCORRUPTED;
4521 break;
4522 }
4523
4524 se = &sit_i->sentries[start];
4525 sit = sit_in_journal(journal, i);
4526
4527 old_valid_blocks = se->valid_blocks;
4528 if (IS_NODESEG(se->type))
4529 total_node_blocks -= old_valid_blocks;
4530
4531 err = check_block_count(sbi, start, &sit);
4532 if (err)
4533 break;
4534 seg_info_from_raw_sit(se, &sit);
4535 if (IS_NODESEG(se->type))
4536 total_node_blocks += se->valid_blocks;
4537
4538 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4539 memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4540 } else {
4541 memcpy(se->discard_map, se->cur_valid_map,
4542 SIT_VBLOCK_MAP_SIZE);
4543 sbi->discard_blks += old_valid_blocks;
4544 sbi->discard_blks -= se->valid_blocks;
4545 }
4546
4547 if (__is_large_section(sbi)) {
4548 get_sec_entry(sbi, start)->valid_blocks +=
4549 se->valid_blocks;
4550 get_sec_entry(sbi, start)->valid_blocks -=
4551 old_valid_blocks;
4552 }
4553 }
4554 up_read(&curseg->journal_rwsem);
4555
4556 if (!err && total_node_blocks != valid_node_count(sbi)) {
4557 f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4558 total_node_blocks, valid_node_count(sbi));
4559 err = -EFSCORRUPTED;
4560 }
4561
4562 return err;
4563}
4564
4565static void init_free_segmap(struct f2fs_sb_info *sbi)
4566{
4567 unsigned int start;
4568 int type;
4569 struct seg_entry *sentry;
4570
4571 for (start = 0; start < MAIN_SEGS(sbi); start++) {
4572 if (f2fs_usable_blks_in_seg(sbi, start) == 0)
4573 continue;
4574 sentry = get_seg_entry(sbi, start);
4575 if (!sentry->valid_blocks)
4576 __set_free(sbi, start);
4577 else
4578 SIT_I(sbi)->written_valid_blocks +=
4579 sentry->valid_blocks;
4580 }
4581
4582
4583 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4584 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4585
4586 __set_test_and_inuse(sbi, curseg_t->segno);
4587 }
4588}
4589
4590static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4591{
4592 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4593 struct free_segmap_info *free_i = FREE_I(sbi);
4594 unsigned int segno = 0, offset = 0, secno;
4595 block_t valid_blocks, usable_blks_in_seg;
4596 block_t blks_per_sec = BLKS_PER_SEC(sbi);
4597
4598 while (1) {
4599
4600 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4601 if (segno >= MAIN_SEGS(sbi))
4602 break;
4603 offset = segno + 1;
4604 valid_blocks = get_valid_blocks(sbi, segno, false);
4605 usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
4606 if (valid_blocks == usable_blks_in_seg || !valid_blocks)
4607 continue;
4608 if (valid_blocks > usable_blks_in_seg) {
4609 f2fs_bug_on(sbi, 1);
4610 continue;
4611 }
4612 mutex_lock(&dirty_i->seglist_lock);
4613 __locate_dirty_segment(sbi, segno, DIRTY);
4614 mutex_unlock(&dirty_i->seglist_lock);
4615 }
4616
4617 if (!__is_large_section(sbi))
4618 return;
4619
4620 mutex_lock(&dirty_i->seglist_lock);
4621 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
4622 valid_blocks = get_valid_blocks(sbi, segno, true);
4623 secno = GET_SEC_FROM_SEG(sbi, segno);
4624
4625 if (!valid_blocks || valid_blocks == blks_per_sec)
4626 continue;
4627 if (IS_CURSEC(sbi, secno))
4628 continue;
4629 set_bit(secno, dirty_i->dirty_secmap);
4630 }
4631 mutex_unlock(&dirty_i->seglist_lock);
4632}
4633
4634static int init_victim_secmap(struct f2fs_sb_info *sbi)
4635{
4636 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4637 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4638
4639 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4640 if (!dirty_i->victim_secmap)
4641 return -ENOMEM;
4642 return 0;
4643}
4644
4645static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4646{
4647 struct dirty_seglist_info *dirty_i;
4648 unsigned int bitmap_size, i;
4649
4650
4651 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4652 GFP_KERNEL);
4653 if (!dirty_i)
4654 return -ENOMEM;
4655
4656 SM_I(sbi)->dirty_info = dirty_i;
4657 mutex_init(&dirty_i->seglist_lock);
4658
4659 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4660
4661 for (i = 0; i < NR_DIRTY_TYPE; i++) {
4662 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4663 GFP_KERNEL);
4664 if (!dirty_i->dirty_segmap[i])
4665 return -ENOMEM;
4666 }
4667
4668 if (__is_large_section(sbi)) {
4669 bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
4670 dirty_i->dirty_secmap = f2fs_kvzalloc(sbi,
4671 bitmap_size, GFP_KERNEL);
4672 if (!dirty_i->dirty_secmap)
4673 return -ENOMEM;
4674 }
4675
4676 init_dirty_segmap(sbi);
4677 return init_victim_secmap(sbi);
4678}
4679
4680static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4681{
4682 int i;
4683
4684
4685
4686
4687
4688 for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4689 struct curseg_info *curseg = CURSEG_I(sbi, i);
4690 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4691 unsigned int blkofs = curseg->next_blkoff;
4692
4693 if (f2fs_sb_has_readonly(sbi) &&
4694 i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
4695 continue;
4696
4697 sanity_check_seg_type(sbi, curseg->seg_type);
4698
4699 if (f2fs_test_bit(blkofs, se->cur_valid_map))
4700 goto out;
4701
4702 if (curseg->alloc_type == SSR)
4703 continue;
4704
4705 for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4706 if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4707 continue;
4708out:
4709 f2fs_err(sbi,
4710 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4711 i, curseg->segno, curseg->alloc_type,
4712 curseg->next_blkoff, blkofs);
4713 return -EFSCORRUPTED;
4714 }
4715 }
4716 return 0;
4717}
4718
4719#ifdef CONFIG_BLK_DEV_ZONED
4720
4721static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4722 struct f2fs_dev_info *fdev,
4723 struct blk_zone *zone)
4724{
4725 unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4726 block_t zone_block, wp_block, last_valid_block;
4727 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4728 int i, s, b, ret;
4729 struct seg_entry *se;
4730
4731 if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4732 return 0;
4733
4734 wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4735 wp_segno = GET_SEGNO(sbi, wp_block);
4736 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4737 zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4738 zone_segno = GET_SEGNO(sbi, zone_block);
4739 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4740
4741 if (zone_segno >= MAIN_SEGS(sbi))
4742 return 0;
4743
4744
4745
4746
4747
4748 for (i = 0; i < NO_CHECK_TYPE; i++)
4749 if (zone_secno == GET_SEC_FROM_SEG(sbi,
4750 CURSEG_I(sbi, i)->segno))
4751 return 0;
4752
4753
4754
4755
4756 last_valid_block = zone_block - 1;
4757 for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4758 segno = zone_segno + s;
4759 se = get_seg_entry(sbi, segno);
4760 for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4761 if (f2fs_test_bit(b, se->cur_valid_map)) {
4762 last_valid_block = START_BLOCK(sbi, segno) + b;
4763 break;
4764 }
4765 if (last_valid_block >= zone_block)
4766 break;
4767 }
4768
4769
4770
4771
4772
4773
4774
4775 if (last_valid_block >= wp_block) {
4776 f2fs_notice(sbi, "Valid block beyond write pointer: "
4777 "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4778 GET_SEGNO(sbi, last_valid_block),
4779 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4780 wp_segno, wp_blkoff);
4781 return 0;
4782 }
4783
4784
4785
4786
4787
4788 if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
4789 f2fs_notice(sbi,
4790 "Zone without valid block has non-zero write "
4791 "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4792 wp_segno, wp_blkoff);
4793 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4794 zone->len >> log_sectors_per_block);
4795 if (ret) {
4796 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4797 fdev->path, ret);
4798 return ret;
4799 }
4800 }
4801
4802 return 0;
4803}
4804
4805static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4806 block_t zone_blkaddr)
4807{
4808 int i;
4809
4810 for (i = 0; i < sbi->s_ndevs; i++) {
4811 if (!bdev_is_zoned(FDEV(i).bdev))
4812 continue;
4813 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4814 zone_blkaddr <= FDEV(i).end_blk))
4815 return &FDEV(i);
4816 }
4817
4818 return NULL;
4819}
4820
4821static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4822 void *data)
4823{
4824 memcpy(data, zone, sizeof(struct blk_zone));
4825 return 0;
4826}
4827
4828static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4829{
4830 struct curseg_info *cs = CURSEG_I(sbi, type);
4831 struct f2fs_dev_info *zbd;
4832 struct blk_zone zone;
4833 unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4834 block_t cs_zone_block, wp_block;
4835 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4836 sector_t zone_sector;
4837 int err;
4838
4839 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4840 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4841
4842 zbd = get_target_zoned_dev(sbi, cs_zone_block);
4843 if (!zbd)
4844 return 0;
4845
4846
4847 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4848 << log_sectors_per_block;
4849 err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4850 report_one_zone_cb, &zone);
4851 if (err != 1) {
4852 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4853 zbd->path, err);
4854 return err;
4855 }
4856
4857 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4858 return 0;
4859
4860 wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4861 wp_segno = GET_SEGNO(sbi, wp_block);
4862 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4863 wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4864
4865 if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4866 wp_sector_off == 0)
4867 return 0;
4868
4869 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
4870 "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4871 type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
4872
4873 f2fs_notice(sbi, "Assign new section to curseg[%d]: "
4874 "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
4875
4876 f2fs_allocate_new_section(sbi, type, true);
4877
4878
4879 if (check_zone_write_pointer(sbi, zbd, &zone))
4880 return -EIO;
4881
4882
4883 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4884 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4885
4886 zbd = get_target_zoned_dev(sbi, cs_zone_block);
4887 if (!zbd)
4888 return 0;
4889
4890 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4891 << log_sectors_per_block;
4892 err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4893 report_one_zone_cb, &zone);
4894 if (err != 1) {
4895 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4896 zbd->path, err);
4897 return err;
4898 }
4899
4900 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4901 return 0;
4902
4903 if (zone.wp != zone.start) {
4904 f2fs_notice(sbi,
4905 "New zone for curseg[%d] is not yet discarded. "
4906 "Reset the zone: curseg[0x%x,0x%x]",
4907 type, cs->segno, cs->next_blkoff);
4908 err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
4909 zone_sector >> log_sectors_per_block,
4910 zone.len >> log_sectors_per_block);
4911 if (err) {
4912 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4913 zbd->path, err);
4914 return err;
4915 }
4916 }
4917
4918 return 0;
4919}
4920
4921int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4922{
4923 int i, ret;
4924
4925 for (i = 0; i < NR_PERSISTENT_LOG; i++) {
4926 ret = fix_curseg_write_pointer(sbi, i);
4927 if (ret)
4928 return ret;
4929 }
4930
4931 return 0;
4932}
4933
4934struct check_zone_write_pointer_args {
4935 struct f2fs_sb_info *sbi;
4936 struct f2fs_dev_info *fdev;
4937};
4938
4939static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
4940 void *data)
4941{
4942 struct check_zone_write_pointer_args *args;
4943
4944 args = (struct check_zone_write_pointer_args *)data;
4945
4946 return check_zone_write_pointer(args->sbi, args->fdev, zone);
4947}
4948
4949int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4950{
4951 int i, ret;
4952 struct check_zone_write_pointer_args args;
4953
4954 for (i = 0; i < sbi->s_ndevs; i++) {
4955 if (!bdev_is_zoned(FDEV(i).bdev))
4956 continue;
4957
4958 args.sbi = sbi;
4959 args.fdev = &FDEV(i);
4960 ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
4961 check_zone_write_pointer_cb, &args);
4962 if (ret < 0)
4963 return ret;
4964 }
4965
4966 return 0;
4967}
4968
4969static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
4970 unsigned int dev_idx)
4971{
4972 if (!bdev_is_zoned(FDEV(dev_idx).bdev))
4973 return true;
4974 return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
4975}
4976
4977
4978static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
4979 int dev_idx)
4980{
4981 block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
4982
4983 return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
4984 sbi->log_blocks_per_blkz;
4985}
4986
4987
4988
4989
4990
4991static inline unsigned int f2fs_usable_zone_segs_in_sec(
4992 struct f2fs_sb_info *sbi, unsigned int segno)
4993{
4994 unsigned int dev_idx, zone_idx, unusable_segs_in_sec;
4995
4996 dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
4997 zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
4998
4999
5000 if (is_conv_zone(sbi, zone_idx, dev_idx))
5001 return sbi->segs_per_sec;
5002
5003
5004
5005
5006
5007 if (!FDEV(dev_idx).zone_capacity_blocks)
5008 return sbi->segs_per_sec;
5009
5010
5011 unusable_segs_in_sec = (sbi->blocks_per_blkz -
5012 FDEV(dev_idx).zone_capacity_blocks[zone_idx]) >>
5013 sbi->log_blocks_per_seg;
5014 return sbi->segs_per_sec - unusable_segs_in_sec;
5015}
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025static inline unsigned int f2fs_usable_zone_blks_in_seg(
5026 struct f2fs_sb_info *sbi, unsigned int segno)
5027{
5028 block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
5029 unsigned int zone_idx, dev_idx, secno;
5030
5031 secno = GET_SEC_FROM_SEG(sbi, segno);
5032 seg_start = START_BLOCK(sbi, segno);
5033 dev_idx = f2fs_target_device_index(sbi, seg_start);
5034 zone_idx = get_zone_idx(sbi, secno, dev_idx);
5035
5036
5037
5038
5039
5040 if (is_conv_zone(sbi, zone_idx, dev_idx))
5041 return sbi->blocks_per_seg;
5042
5043 if (!FDEV(dev_idx).zone_capacity_blocks)
5044 return sbi->blocks_per_seg;
5045
5046 sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
5047 sec_cap_blkaddr = sec_start_blkaddr +
5048 FDEV(dev_idx).zone_capacity_blocks[zone_idx];
5049
5050
5051
5052
5053
5054
5055
5056 if (seg_start >= sec_cap_blkaddr)
5057 return 0;
5058 if (seg_start + sbi->blocks_per_seg > sec_cap_blkaddr)
5059 return sec_cap_blkaddr - seg_start;
5060
5061 return sbi->blocks_per_seg;
5062}
5063#else
5064int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
5065{
5066 return 0;
5067}
5068
5069int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
5070{
5071 return 0;
5072}
5073
5074static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi,
5075 unsigned int segno)
5076{
5077 return 0;
5078}
5079
5080static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
5081 unsigned int segno)
5082{
5083 return 0;
5084}
5085#endif
5086unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
5087 unsigned int segno)
5088{
5089 if (f2fs_sb_has_blkzoned(sbi))
5090 return f2fs_usable_zone_blks_in_seg(sbi, segno);
5091
5092 return sbi->blocks_per_seg;
5093}
5094
5095unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
5096 unsigned int segno)
5097{
5098 if (f2fs_sb_has_blkzoned(sbi))
5099 return f2fs_usable_zone_segs_in_sec(sbi, segno);
5100
5101 return sbi->segs_per_sec;
5102}
5103
5104
5105
5106
5107static void init_min_max_mtime(struct f2fs_sb_info *sbi)
5108{
5109 struct sit_info *sit_i = SIT_I(sbi);
5110 unsigned int segno;
5111
5112 down_write(&sit_i->sentry_lock);
5113
5114 sit_i->min_mtime = ULLONG_MAX;
5115
5116 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
5117 unsigned int i;
5118 unsigned long long mtime = 0;
5119
5120 for (i = 0; i < sbi->segs_per_sec; i++)
5121 mtime += get_seg_entry(sbi, segno + i)->mtime;
5122
5123 mtime = div_u64(mtime, sbi->segs_per_sec);
5124
5125 if (sit_i->min_mtime > mtime)
5126 sit_i->min_mtime = mtime;
5127 }
5128 sit_i->max_mtime = get_mtime(sbi, false);
5129 sit_i->dirty_max_mtime = 0;
5130 up_write(&sit_i->sentry_lock);
5131}
5132
5133int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
5134{
5135 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
5136 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
5137 struct f2fs_sm_info *sm_info;
5138 int err;
5139
5140 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
5141 if (!sm_info)
5142 return -ENOMEM;
5143
5144
5145 sbi->sm_info = sm_info;
5146 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5147 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
5148 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
5149 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
5150 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
5151 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
5152 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
5153 sm_info->rec_prefree_segments = sm_info->main_segments *
5154 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
5155 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
5156 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
5157
5158 if (!f2fs_lfs_mode(sbi))
5159 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
5160 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
5161 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
5162 sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec;
5163 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
5164 sm_info->min_ssr_sections = reserved_sections(sbi);
5165
5166 INIT_LIST_HEAD(&sm_info->sit_entry_set);
5167
5168 init_rwsem(&sm_info->curseg_lock);
5169
5170 if (!f2fs_readonly(sbi->sb)) {
5171 err = f2fs_create_flush_cmd_control(sbi);
5172 if (err)
5173 return err;
5174 }
5175
5176 err = create_discard_cmd_control(sbi);
5177 if (err)
5178 return err;
5179
5180 err = build_sit_info(sbi);
5181 if (err)
5182 return err;
5183 err = build_free_segmap(sbi);
5184 if (err)
5185 return err;
5186 err = build_curseg(sbi);
5187 if (err)
5188 return err;
5189
5190
5191 err = build_sit_entries(sbi);
5192 if (err)
5193 return err;
5194
5195 init_free_segmap(sbi);
5196 err = build_dirty_segmap(sbi);
5197 if (err)
5198 return err;
5199
5200 err = sanity_check_curseg(sbi);
5201 if (err)
5202 return err;
5203
5204 init_min_max_mtime(sbi);
5205 return 0;
5206}
5207
5208static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
5209 enum dirty_type dirty_type)
5210{
5211 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5212
5213 mutex_lock(&dirty_i->seglist_lock);
5214 kvfree(dirty_i->dirty_segmap[dirty_type]);
5215 dirty_i->nr_dirty[dirty_type] = 0;
5216 mutex_unlock(&dirty_i->seglist_lock);
5217}
5218
5219static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
5220{
5221 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5222
5223 kvfree(dirty_i->victim_secmap);
5224}
5225
5226static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
5227{
5228 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5229 int i;
5230
5231 if (!dirty_i)
5232 return;
5233
5234
5235 for (i = 0; i < NR_DIRTY_TYPE; i++)
5236 discard_dirty_segmap(sbi, i);
5237
5238 if (__is_large_section(sbi)) {
5239 mutex_lock(&dirty_i->seglist_lock);
5240 kvfree(dirty_i->dirty_secmap);
5241 mutex_unlock(&dirty_i->seglist_lock);
5242 }
5243
5244 destroy_victim_secmap(sbi);
5245 SM_I(sbi)->dirty_info = NULL;
5246 kfree(dirty_i);
5247}
5248
5249static void destroy_curseg(struct f2fs_sb_info *sbi)
5250{
5251 struct curseg_info *array = SM_I(sbi)->curseg_array;
5252 int i;
5253
5254 if (!array)
5255 return;
5256 SM_I(sbi)->curseg_array = NULL;
5257 for (i = 0; i < NR_CURSEG_TYPE; i++) {
5258 kfree(array[i].sum_blk);
5259 kfree(array[i].journal);
5260 }
5261 kfree(array);
5262}
5263
5264static void destroy_free_segmap(struct f2fs_sb_info *sbi)
5265{
5266 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
5267
5268 if (!free_i)
5269 return;
5270 SM_I(sbi)->free_info = NULL;
5271 kvfree(free_i->free_segmap);
5272 kvfree(free_i->free_secmap);
5273 kfree(free_i);
5274}
5275
5276static void destroy_sit_info(struct f2fs_sb_info *sbi)
5277{
5278 struct sit_info *sit_i = SIT_I(sbi);
5279
5280 if (!sit_i)
5281 return;
5282
5283 if (sit_i->sentries)
5284 kvfree(sit_i->bitmap);
5285 kfree(sit_i->tmp_map);
5286
5287 kvfree(sit_i->sentries);
5288 kvfree(sit_i->sec_entries);
5289 kvfree(sit_i->dirty_sentries_bitmap);
5290
5291 SM_I(sbi)->sit_info = NULL;
5292 kvfree(sit_i->sit_bitmap);
5293#ifdef CONFIG_F2FS_CHECK_FS
5294 kvfree(sit_i->sit_bitmap_mir);
5295 kvfree(sit_i->invalid_segmap);
5296#endif
5297 kfree(sit_i);
5298}
5299
5300void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
5301{
5302 struct f2fs_sm_info *sm_info = SM_I(sbi);
5303
5304 if (!sm_info)
5305 return;
5306 f2fs_destroy_flush_cmd_control(sbi, true);
5307 destroy_discard_cmd_control(sbi);
5308 destroy_dirty_segmap(sbi);
5309 destroy_curseg(sbi);
5310 destroy_free_segmap(sbi);
5311 destroy_sit_info(sbi);
5312 sbi->sm_info = NULL;
5313 kfree(sm_info);
5314}
5315
5316int __init f2fs_create_segment_manager_caches(void)
5317{
5318 discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
5319 sizeof(struct discard_entry));
5320 if (!discard_entry_slab)
5321 goto fail;
5322
5323 discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
5324 sizeof(struct discard_cmd));
5325 if (!discard_cmd_slab)
5326 goto destroy_discard_entry;
5327
5328 sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
5329 sizeof(struct sit_entry_set));
5330 if (!sit_entry_set_slab)
5331 goto destroy_discard_cmd;
5332
5333 inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
5334 sizeof(struct inmem_pages));
5335 if (!inmem_entry_slab)
5336 goto destroy_sit_entry_set;
5337 return 0;
5338
5339destroy_sit_entry_set:
5340 kmem_cache_destroy(sit_entry_set_slab);
5341destroy_discard_cmd:
5342 kmem_cache_destroy(discard_cmd_slab);
5343destroy_discard_entry:
5344 kmem_cache_destroy(discard_entry_slab);
5345fail:
5346 return -ENOMEM;
5347}
5348
5349void f2fs_destroy_segment_manager_caches(void)
5350{
5351 kmem_cache_destroy(sit_entry_set_slab);
5352 kmem_cache_destroy(discard_cmd_slab);
5353 kmem_cache_destroy(discard_entry_slab);
5354 kmem_cache_destroy(inmem_entry_slab);
5355}
5356