1
2
3
4
5
6
7
8
9
10#include <linux/swap.h>
11#include <linux/migrate.h>
12#include <linux/compaction.h>
13#include <linux/mm_inline.h>
14#include <linux/backing-dev.h>
15#include <linux/sysctl.h>
16#include <linux/sysfs.h>
17#include "internal.h"
18
19#define CREATE_TRACE_POINTS
20#include <trace/events/compaction.h>
21
22
23
24
25
26
27
28
29struct compact_control {
30 struct list_head freepages;
31 struct list_head migratepages;
32 unsigned long nr_freepages;
33 unsigned long nr_migratepages;
34 unsigned long free_pfn;
35 unsigned long migrate_pfn;
36 bool sync;
37
38
39 unsigned long nr_anon;
40 unsigned long nr_file;
41
42 unsigned int order;
43 int migratetype;
44 struct zone *zone;
45
46 int compact_mode;
47};
48
49static unsigned long release_freepages(struct list_head *freelist)
50{
51 struct page *page, *next;
52 unsigned long count = 0;
53
54 list_for_each_entry_safe(page, next, freelist, lru) {
55 list_del(&page->lru);
56 __free_page(page);
57 count++;
58 }
59
60 return count;
61}
62
63
64static unsigned long isolate_freepages_block(struct zone *zone,
65 unsigned long blockpfn,
66 struct list_head *freelist)
67{
68 unsigned long zone_end_pfn, end_pfn;
69 int nr_scanned = 0, total_isolated = 0;
70 struct page *cursor;
71
72
73 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
74 end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
75
76
77 for (; blockpfn < end_pfn; blockpfn++) {
78 if (pfn_valid_within(blockpfn))
79 break;
80 }
81 cursor = pfn_to_page(blockpfn);
82
83
84 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
85 int isolated, i;
86 struct page *page = cursor;
87
88 if (!pfn_valid_within(blockpfn))
89 continue;
90 nr_scanned++;
91
92 if (!PageBuddy(page))
93 continue;
94
95
96 isolated = split_free_page(page);
97 total_isolated += isolated;
98 for (i = 0; i < isolated; i++) {
99 list_add(&page->lru, freelist);
100 page++;
101 }
102
103
104 if (isolated) {
105 blockpfn += isolated - 1;
106 cursor += isolated - 1;
107 }
108 }
109
110 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
111 return total_isolated;
112}
113
114
115static bool suitable_migration_target(struct page *page)
116{
117
118 int migratetype = get_pageblock_migratetype(page);
119
120
121 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
122 return false;
123
124
125 if (PageBuddy(page) && page_order(page) >= pageblock_order)
126 return true;
127
128
129 if (migratetype == MIGRATE_MOVABLE)
130 return true;
131
132
133 return false;
134}
135
136
137
138
139
140static void isolate_freepages(struct zone *zone,
141 struct compact_control *cc)
142{
143 struct page *page;
144 unsigned long high_pfn, low_pfn, pfn;
145 unsigned long flags;
146 int nr_freepages = cc->nr_freepages;
147 struct list_head *freelist = &cc->freepages;
148
149 pfn = cc->free_pfn;
150 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
151 high_pfn = low_pfn;
152
153
154
155
156
157
158 spin_lock_irqsave(&zone->lock, flags);
159 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
160 pfn -= pageblock_nr_pages) {
161 unsigned long isolated;
162
163 if (!pfn_valid(pfn))
164 continue;
165
166
167
168
169
170
171
172
173 page = pfn_to_page(pfn);
174 if (page_zone(page) != zone)
175 continue;
176
177
178 if (!suitable_migration_target(page))
179 continue;
180
181
182 isolated = isolate_freepages_block(zone, pfn, freelist);
183 nr_freepages += isolated;
184
185
186
187
188
189
190 if (isolated)
191 high_pfn = max(high_pfn, pfn);
192 }
193 spin_unlock_irqrestore(&zone->lock, flags);
194
195
196 list_for_each_entry(page, freelist, lru) {
197 arch_alloc_page(page, 0);
198 kernel_map_pages(page, 1, 1);
199 }
200
201 cc->free_pfn = high_pfn;
202 cc->nr_freepages = nr_freepages;
203}
204
205
206static void acct_isolated(struct zone *zone, struct compact_control *cc)
207{
208 struct page *page;
209 unsigned int count[NR_LRU_LISTS] = { 0, };
210
211 list_for_each_entry(page, &cc->migratepages, lru) {
212 int lru = page_lru_base_type(page);
213 count[lru]++;
214 }
215
216 cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
217 cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
218 __mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
219 __mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
220}
221
222
223static bool too_many_isolated(struct zone *zone)
224{
225 unsigned long active, inactive, isolated;
226
227 inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
228 zone_page_state(zone, NR_INACTIVE_ANON);
229 active = zone_page_state(zone, NR_ACTIVE_FILE) +
230 zone_page_state(zone, NR_ACTIVE_ANON);
231 isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
232 zone_page_state(zone, NR_ISOLATED_ANON);
233
234 return isolated > (inactive + active) / 2;
235}
236
237
238
239
240
241static unsigned long isolate_migratepages(struct zone *zone,
242 struct compact_control *cc)
243{
244 unsigned long low_pfn, end_pfn;
245 unsigned long last_pageblock_nr = 0, pageblock_nr;
246 unsigned long nr_scanned = 0, nr_isolated = 0;
247 struct list_head *migratelist = &cc->migratepages;
248
249
250 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
251
252
253 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
254
255
256 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
257 cc->migrate_pfn = end_pfn;
258 return 0;
259 }
260
261
262
263
264
265
266 while (unlikely(too_many_isolated(zone))) {
267 congestion_wait(BLK_RW_ASYNC, HZ/10);
268
269 if (fatal_signal_pending(current))
270 return 0;
271 }
272
273
274 spin_lock_irq(&zone->lru_lock);
275 for (; low_pfn < end_pfn; low_pfn++) {
276 struct page *page;
277 if (!pfn_valid_within(low_pfn))
278 continue;
279 nr_scanned++;
280
281
282 page = pfn_to_page(low_pfn);
283 if (PageBuddy(page))
284 continue;
285
286
287
288
289
290
291 pageblock_nr = low_pfn >> pageblock_order;
292 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
293 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
294 low_pfn += pageblock_nr_pages;
295 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
296 last_pageblock_nr = pageblock_nr;
297 continue;
298 }
299
300 if (!PageLRU(page))
301 continue;
302
303
304
305
306
307
308 if (PageTransHuge(page)) {
309 low_pfn += (1 << compound_order(page)) - 1;
310 continue;
311 }
312
313
314 if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
315 continue;
316
317 VM_BUG_ON(PageTransCompound(page));
318
319
320 del_page_from_lru_list(zone, page, page_lru(page));
321 list_add(&page->lru, migratelist);
322 cc->nr_migratepages++;
323 nr_isolated++;
324
325
326 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
327 break;
328 }
329
330 acct_isolated(zone, cc);
331
332 spin_unlock_irq(&zone->lru_lock);
333 cc->migrate_pfn = low_pfn;
334
335 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
336
337 return cc->nr_migratepages;
338}
339
340
341
342
343
344static struct page *compaction_alloc(struct page *migratepage,
345 unsigned long data,
346 int **result)
347{
348 struct compact_control *cc = (struct compact_control *)data;
349 struct page *freepage;
350
351
352 if (list_empty(&cc->freepages)) {
353 isolate_freepages(cc->zone, cc);
354
355 if (list_empty(&cc->freepages))
356 return NULL;
357 }
358
359 freepage = list_entry(cc->freepages.next, struct page, lru);
360 list_del(&freepage->lru);
361 cc->nr_freepages--;
362
363 return freepage;
364}
365
366
367
368
369
370
371static void update_nr_listpages(struct compact_control *cc)
372{
373 int nr_migratepages = 0;
374 int nr_freepages = 0;
375 struct page *page;
376
377 list_for_each_entry(page, &cc->migratepages, lru)
378 nr_migratepages++;
379 list_for_each_entry(page, &cc->freepages, lru)
380 nr_freepages++;
381
382 cc->nr_migratepages = nr_migratepages;
383 cc->nr_freepages = nr_freepages;
384}
385
386static int compact_finished(struct zone *zone,
387 struct compact_control *cc)
388{
389 unsigned int order;
390 unsigned long watermark;
391
392 if (fatal_signal_pending(current))
393 return COMPACT_PARTIAL;
394
395
396 if (cc->free_pfn <= cc->migrate_pfn)
397 return COMPACT_COMPLETE;
398
399
400 if (cc->compact_mode != COMPACT_MODE_KSWAPD)
401 watermark = low_wmark_pages(zone);
402 else
403 watermark = high_wmark_pages(zone);
404 watermark += (1 << cc->order);
405
406 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
407 return COMPACT_CONTINUE;
408
409
410
411
412
413 if (cc->order == -1)
414 return COMPACT_CONTINUE;
415
416
417
418
419
420
421
422 if (cc->compact_mode == COMPACT_MODE_KSWAPD)
423 return COMPACT_CONTINUE;
424
425
426 for (order = cc->order; order < MAX_ORDER; order++) {
427
428 if (!list_empty(&zone->free_area[order].free_list[cc->migratetype]))
429 return COMPACT_PARTIAL;
430
431
432 if (order >= pageblock_order && zone->free_area[order].nr_free)
433 return COMPACT_PARTIAL;
434 }
435
436 return COMPACT_CONTINUE;
437}
438
439
440
441
442
443
444
445
446unsigned long compaction_suitable(struct zone *zone, int order)
447{
448 int fragindex;
449 unsigned long watermark;
450
451
452
453
454
455
456 watermark = low_wmark_pages(zone) + (2UL << order);
457 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
458 return COMPACT_SKIPPED;
459
460
461
462
463
464 if (order == -1)
465 return COMPACT_CONTINUE;
466
467
468
469
470
471
472
473
474
475
476
477 fragindex = fragmentation_index(zone, order);
478 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
479 return COMPACT_SKIPPED;
480
481 if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0))
482 return COMPACT_PARTIAL;
483
484 return COMPACT_CONTINUE;
485}
486
487static int compact_zone(struct zone *zone, struct compact_control *cc)
488{
489 int ret;
490
491 ret = compaction_suitable(zone, cc->order);
492 switch (ret) {
493 case COMPACT_PARTIAL:
494 case COMPACT_SKIPPED:
495
496 return ret;
497 case COMPACT_CONTINUE:
498
499 ;
500 }
501
502
503 cc->migrate_pfn = zone->zone_start_pfn;
504 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
505 cc->free_pfn &= ~(pageblock_nr_pages-1);
506
507 migrate_prep_local();
508
509 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
510 unsigned long nr_migrate, nr_remaining;
511
512 if (!isolate_migratepages(zone, cc))
513 continue;
514
515 nr_migrate = cc->nr_migratepages;
516 migrate_pages(&cc->migratepages, compaction_alloc,
517 (unsigned long)cc, false,
518 cc->sync);
519 update_nr_listpages(cc);
520 nr_remaining = cc->nr_migratepages;
521
522 count_vm_event(COMPACTBLOCKS);
523 count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
524 if (nr_remaining)
525 count_vm_events(COMPACTPAGEFAILED, nr_remaining);
526 trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
527 nr_remaining);
528
529
530 if (!list_empty(&cc->migratepages)) {
531 putback_lru_pages(&cc->migratepages);
532 cc->nr_migratepages = 0;
533 }
534
535 }
536
537
538 cc->nr_freepages -= release_freepages(&cc->freepages);
539 VM_BUG_ON(cc->nr_freepages != 0);
540
541 return ret;
542}
543
544unsigned long compact_zone_order(struct zone *zone,
545 int order, gfp_t gfp_mask,
546 bool sync,
547 int compact_mode)
548{
549 struct compact_control cc = {
550 .nr_freepages = 0,
551 .nr_migratepages = 0,
552 .order = order,
553 .migratetype = allocflags_to_migratetype(gfp_mask),
554 .zone = zone,
555 .sync = sync,
556 .compact_mode = compact_mode,
557 };
558 INIT_LIST_HEAD(&cc.freepages);
559 INIT_LIST_HEAD(&cc.migratepages);
560
561 return compact_zone(zone, &cc);
562}
563
564int sysctl_extfrag_threshold = 500;
565
566
567
568
569
570
571
572
573
574
575
576unsigned long try_to_compact_pages(struct zonelist *zonelist,
577 int order, gfp_t gfp_mask, nodemask_t *nodemask,
578 bool sync)
579{
580 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
581 int may_enter_fs = gfp_mask & __GFP_FS;
582 int may_perform_io = gfp_mask & __GFP_IO;
583 struct zoneref *z;
584 struct zone *zone;
585 int rc = COMPACT_SKIPPED;
586
587
588
589
590
591
592 if (!order || !may_enter_fs || !may_perform_io)
593 return rc;
594
595 count_vm_event(COMPACTSTALL);
596
597
598 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
599 nodemask) {
600 int status;
601
602 status = compact_zone_order(zone, order, gfp_mask, sync,
603 COMPACT_MODE_DIRECT_RECLAIM);
604 rc = max(status, rc);
605
606
607 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
608 break;
609 }
610
611 return rc;
612}
613
614
615
616static int compact_node(int nid)
617{
618 int zoneid;
619 pg_data_t *pgdat;
620 struct zone *zone;
621
622 if (nid < 0 || nid >= nr_node_ids || !node_online(nid))
623 return -EINVAL;
624 pgdat = NODE_DATA(nid);
625
626
627 lru_add_drain_all();
628
629 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
630 struct compact_control cc = {
631 .nr_freepages = 0,
632 .nr_migratepages = 0,
633 .order = -1,
634 .compact_mode = COMPACT_MODE_DIRECT_RECLAIM,
635 };
636
637 zone = &pgdat->node_zones[zoneid];
638 if (!populated_zone(zone))
639 continue;
640
641 cc.zone = zone;
642 INIT_LIST_HEAD(&cc.freepages);
643 INIT_LIST_HEAD(&cc.migratepages);
644
645 compact_zone(zone, &cc);
646
647 VM_BUG_ON(!list_empty(&cc.freepages));
648 VM_BUG_ON(!list_empty(&cc.migratepages));
649 }
650
651 return 0;
652}
653
654
655static int compact_nodes(void)
656{
657 int nid;
658
659 for_each_online_node(nid)
660 compact_node(nid);
661
662 return COMPACT_COMPLETE;
663}
664
665
666int sysctl_compact_memory;
667
668
669int sysctl_compaction_handler(struct ctl_table *table, int write,
670 void __user *buffer, size_t *length, loff_t *ppos)
671{
672 if (write)
673 return compact_nodes();
674
675 return 0;
676}
677
678int sysctl_extfrag_handler(struct ctl_table *table, int write,
679 void __user *buffer, size_t *length, loff_t *ppos)
680{
681 proc_dointvec_minmax(table, write, buffer, length, ppos);
682
683 return 0;
684}
685
686#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
687ssize_t sysfs_compact_node(struct sys_device *dev,
688 struct sysdev_attribute *attr,
689 const char *buf, size_t count)
690{
691 compact_node(dev->id);
692
693 return count;
694}
695static SYSDEV_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
696
697int compaction_register_node(struct node *node)
698{
699 return sysdev_create_file(&node->sysdev, &attr_compact);
700}
701
702void compaction_unregister_node(struct node *node)
703{
704 return sysdev_remove_file(&node->sysdev, &attr_compact);
705}
706#endif
707