Lines Matching full:zone
61 * the "fragmentation score" of a node/zone.
160 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
162 zone->compact_considered = 0; in defer_compaction()
163 zone->compact_defer_shift++; in defer_compaction()
165 if (order < zone->compact_order_failed) in defer_compaction()
166 zone->compact_order_failed = order; in defer_compaction()
168 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
169 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
171 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
175 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
177 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
179 if (order < zone->compact_order_failed) in compaction_deferred()
183 if (++zone->compact_considered >= defer_limit) { in compaction_deferred()
184 zone->compact_considered = defer_limit; in compaction_deferred()
188 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
198 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
202 zone->compact_considered = 0; in compaction_defer_reset()
203 zone->compact_defer_shift = 0; in compaction_defer_reset()
205 if (order >= zone->compact_order_failed) in compaction_defer_reset()
206 zone->compact_order_failed = order + 1; in compaction_defer_reset()
208 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
212 bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
214 if (order < zone->compact_order_failed) in compaction_restarting()
217 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && in compaction_restarting()
218 zone->compact_considered >= 1UL << zone->compact_defer_shift; in compaction_restarting()
231 static void reset_cached_positions(struct zone *zone) in reset_cached_positions() argument
233 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions()
234 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions()
235 zone->compact_cached_free_pfn = in reset_cached_positions()
236 pageblock_start_pfn(zone_end_pfn(zone) - 1); in reset_cached_positions()
258 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, in __reset_isolation_pfn() argument
268 if (zone != page_zone(page)) in __reset_isolation_pfn()
288 /* Ensure the start of the pageblock or zone is online and valid */ in __reset_isolation_pfn()
290 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn()
297 /* Ensure the end of the pageblock or zone is online and valid */ in __reset_isolation_pfn()
299 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); in __reset_isolation_pfn()
334 static void __reset_isolation_suitable(struct zone *zone) in __reset_isolation_suitable() argument
336 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable()
337 unsigned long free_pfn = zone_end_pfn(zone) - 1; in __reset_isolation_suitable()
343 if (!zone->compact_blockskip_flush) in __reset_isolation_suitable()
346 zone->compact_blockskip_flush = false; in __reset_isolation_suitable()
349 * Walk the zone and update pageblock skip information. Source looks in __reset_isolation_suitable()
359 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && in __reset_isolation_suitable()
363 zone->compact_init_migrate_pfn = reset_migrate; in __reset_isolation_suitable()
364 zone->compact_cached_migrate_pfn[0] = reset_migrate; in __reset_isolation_suitable()
365 zone->compact_cached_migrate_pfn[1] = reset_migrate; in __reset_isolation_suitable()
369 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && in __reset_isolation_suitable()
373 zone->compact_init_free_pfn = reset_free; in __reset_isolation_suitable()
374 zone->compact_cached_free_pfn = reset_free; in __reset_isolation_suitable()
380 zone->compact_cached_migrate_pfn[0] = migrate_pfn; in __reset_isolation_suitable()
381 zone->compact_cached_migrate_pfn[1] = migrate_pfn; in __reset_isolation_suitable()
382 zone->compact_cached_free_pfn = free_pfn; in __reset_isolation_suitable()
391 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() local
392 if (!populated_zone(zone)) in reset_isolation_suitable()
396 if (zone->compact_blockskip_flush) in reset_isolation_suitable()
397 __reset_isolation_suitable(zone); in reset_isolation_suitable()
426 struct zone *zone = cc->zone; in update_cached_migrate() local
434 if (pfn > zone->compact_cached_migrate_pfn[0]) in update_cached_migrate()
435 zone->compact_cached_migrate_pfn[0] = pfn; in update_cached_migrate()
437 pfn > zone->compact_cached_migrate_pfn[1]) in update_cached_migrate()
438 zone->compact_cached_migrate_pfn[1] = pfn; in update_cached_migrate()
448 struct zone *zone = cc->zone; in update_pageblock_skip() local
459 if (pfn < zone->compact_cached_free_pfn) in update_pageblock_skip()
460 zone->compact_cached_free_pfn = pfn; in update_pageblock_skip()
584 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
619 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block()
656 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
691 * Non-free pages, invalid PFNs, or zone boundaries within the
708 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range()
709 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range()
732 block_end_pfn, cc->zone)) in isolate_freepages_range()
828 pg_data_t *pgdat = cc->zone->zone_pgdat; in isolate_migratepages_block()
924 * Skip if free. We read page order here without zone lock in isolate_migratepages_block()
1144 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range()
1145 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range()
1155 block_end_pfn, cc->zone)) in isolate_migratepages_range()
1200 * We are checking page_order without zone->lock taken. But in suitable_migration_target()
1287 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around()
1288 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); in fast_isolate_around()
1290 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); in fast_isolate_around()
1349 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { in fast_isolate_freepages()
1374 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1384 spin_lock_irqsave(&cc->zone->lock, flags); in fast_isolate_freepages()
1395 cc->zone->zone_start_pfn); in fast_isolate_freepages()
1441 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_isolate_freepages()
1466 zone_end_pfn(cc->zone)), in fast_isolate_freepages()
1467 cc->zone); in fast_isolate_freepages()
1474 if (highest && highest >= cc->zone->compact_cached_free_pfn) { in fast_isolate_freepages()
1476 cc->zone->compact_cached_free_pfn = highest; in fast_isolate_freepages()
1494 struct zone *zone = cc->zone; in isolate_freepages() local
1510 * successfully isolated from, zone-cached value, or the end of the in isolate_freepages()
1511 * zone when isolating for the first time. For looping we also need in isolate_freepages()
1515 * zone which ends in the middle of a pageblock. in isolate_freepages()
1522 zone_end_pfn(zone)); in isolate_freepages()
1538 * This can iterate a massively long zone without finding any in isolate_freepages()
1545 zone); in isolate_freepages()
1631 * freelist. All pages on the freelist are from the same zone, so there is no
1703 * If the migrate_pfn is not at the start of a zone or the start in fast_find_migrateblock()
1707 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) in fast_find_migrateblock()
1734 if (cc->migrate_pfn != cc->zone->zone_start_pfn) in fast_find_migrateblock()
1741 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
1749 spin_lock_irqsave(&cc->zone->lock, flags); in fast_find_migrateblock()
1775 if (pfn < cc->zone->zone_start_pfn) in fast_find_migrateblock()
1776 pfn = cc->zone->zone_start_pfn; in fast_find_migrateblock()
1783 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_find_migrateblock()
1816 * Start at where we last stopped, or beginning of the zone as in isolate_migratepages()
1822 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages()
1823 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages()
1846 * This can potentially iterate a massively long zone with in isolate_migratepages()
1854 block_end_pfn, cc->zone); in isolate_migratepages()
1918 * A zone's fragmentation score is the external fragmentation wrt to the
1921 static unsigned int fragmentation_score_zone(struct zone *zone) in fragmentation_score_zone() argument
1923 return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); in fragmentation_score_zone()
1927 * A weighted zone's fragmentation score is the external fragmentation
1928 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
1936 static unsigned int fragmentation_score_zone_weighted(struct zone *zone) in fragmentation_score_zone_weighted() argument
1940 score = zone->present_pages * fragmentation_score_zone(zone); in fragmentation_score_zone_weighted()
1941 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); in fragmentation_score_zone_weighted()
1957 struct zone *zone; in fragmentation_score_node() local
1959 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_node()
1960 score += fragmentation_score_zone_weighted(zone); in fragmentation_score_node()
1999 reset_cached_positions(cc->zone); in __compact_finished()
2008 cc->zone->compact_blockskip_flush = true; in __compact_finished()
2020 pgdat = cc->zone->zone_pgdat; in __compact_finished()
2024 score = fragmentation_score_zone(cc->zone); in __compact_finished()
2050 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2105 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2113 * compaction_suitable: Is this suitable to run compaction on this zone now?
2119 static enum compact_result __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
2129 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in __compaction_suitable()
2134 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, in __compaction_suitable()
2153 low_wmark_pages(zone) : min_wmark_pages(zone); in __compaction_suitable()
2155 if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, in __compaction_suitable()
2162 enum compact_result compaction_suitable(struct zone *zone, int order, in compaction_suitable() argument
2169 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, in compaction_suitable()
2170 zone_page_state(zone, NR_FREE_PAGES)); in compaction_suitable()
2188 fragindex = fragmentation_index(zone, order); in compaction_suitable()
2193 trace_mm_compaction_suitable(zone, order, ret); in compaction_suitable()
2203 struct zone *zone; in compaction_zonelist_suitable() local
2207 * Make sure at least one zone would pass __compaction_suitable if we continue in compaction_zonelist_suitable()
2210 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable()
2221 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
2222 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in compaction_zonelist_suitable()
2223 compact_result = __compaction_suitable(zone, order, alloc_flags, in compaction_zonelist_suitable()
2236 unsigned long start_pfn = cc->zone->zone_start_pfn; in compact_zone()
2237 unsigned long end_pfn = zone_end_pfn(cc->zone); in compact_zone()
2243 * These counters track activities during zone compaction. Initialize in compact_zone()
2244 * them before compacting a new zone. in compact_zone()
2254 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, in compact_zone()
2267 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2268 __reset_isolation_suitable(cc->zone); in compact_zone()
2271 * Setup to move all movable pages to the end of the zone. Used cached in compact_zone()
2273 * want to compact the whole zone), but check that it is initialised in compact_zone()
2274 * by ensuring the values are within zone boundaries. in compact_zone()
2281 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; in compact_zone()
2282 cc->free_pfn = cc->zone->compact_cached_free_pfn; in compact_zone()
2285 cc->zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
2289 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
2290 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
2293 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) in compact_zone()
2308 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; in compact_zone()
2342 cc->zone->compact_cached_migrate_pfn[1] = in compact_zone()
2343 cc->zone->compact_cached_migrate_pfn[0]; in compact_zone()
2403 lru_add_drain_cpu_zone(cc->zone); in compact_zone()
2430 * already reset to zone end in compact_finished() in compact_zone()
2432 if (free_pfn > cc->zone->compact_cached_free_pfn) in compact_zone()
2433 cc->zone->compact_cached_free_pfn = free_pfn; in compact_zone()
2445 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
2455 .zone = zone, in compact_zone_order()
2513 struct zone *zone; in try_to_compact_pages() local
2525 /* Compact each zone in the list */ in try_to_compact_pages()
2526 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
2531 && compaction_deferred(zone, order)) { in try_to_compact_pages()
2536 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2543 * We think the allocation will succeed in this zone, in try_to_compact_pages()
2546 * succeeds in this zone. in try_to_compact_pages()
2548 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
2556 * We think that allocation won't succeed in this zone in try_to_compact_pages()
2560 defer_compaction(zone, order); in try_to_compact_pages()
2576 * Compact all zones within a node till each zone's fragmentation score
2582 * per-zone locks.
2587 struct zone *zone; in proactive_compact_node() local
2598 zone = &pgdat->node_zones[zoneid]; in proactive_compact_node()
2599 if (!populated_zone(zone)) in proactive_compact_node()
2602 cc.zone = zone; in proactive_compact_node()
2616 struct zone *zone; in compact_node() local
2628 zone = &pgdat->node_zones[zoneid]; in compact_node()
2629 if (!populated_zone(zone)) in compact_node()
2632 cc.zone = zone; in compact_node()
2738 struct zone *zone; in kcompactd_node_suitable() local
2742 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
2744 if (!populated_zone(zone)) in kcompactd_node_suitable()
2747 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, in kcompactd_node_suitable()
2762 struct zone *zone; in kcompactd_do_work() local
2778 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
2779 if (!populated_zone(zone)) in kcompactd_do_work()
2782 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
2785 if (compaction_suitable(zone, cc.order, 0, zoneid) != in kcompactd_do_work()
2792 cc.zone = zone; in kcompactd_do_work()
2796 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
2800 * otherwise coalesce on the zone's free area for in kcompactd_do_work()
2804 drain_all_pages(zone); in kcompactd_do_work()
2810 defer_compaction(zone, cc.order); in kcompactd_do_work()