Lines Matching full:zone

13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
103 * shuffle the whole zone).
123 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
147 struct zone *zone; member
377 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
440 * prev_end_pfn static that contains the end of previous zone in defer_init()
609 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
617 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
618 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
619 sp = zone->spanned_pages; in page_outside_zone_boundaries()
620 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
622 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
625 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
626 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
632 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
636 if (zone != page_zone(page)) in page_is_consistent()
642 * Temporary debugging check for pages not lying within a given zone.
644 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
646 if (page_outside_zone_boundaries(zone, page)) in bad_range()
648 if (!page_is_consistent(zone, page)) in bad_range()
654 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
771 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
784 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
789 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
799 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
802 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
804 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
871 * (d) a page and its buddy are in the same zone.
874 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
888 * zone check is done late to avoid uselessly calculating in page_is_buddy()
889 * zone/node ids for pages that could never merge. in page_is_buddy()
900 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
907 capc->cc->zone == zone ? capc : NULL; in task_capc()
936 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
950 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
953 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
960 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
963 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
974 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
977 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
982 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
992 zone->free_area[order].nr_free--; in del_page_from_free_list()
1051 struct zone *zone, unsigned int order, in __free_one_page() argument
1054 struct capture_control *capc = task_capc(zone); in __free_one_page()
1063 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
1068 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1071 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1076 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1092 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1094 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1109 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
1136 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1138 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1426 * Assumes all pages on list are in same zone, and of same order.
1429 * If the zone was previously in an "all pages pinned" state then look to
1432 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1435 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1485 * under zone->lock. It is believed the overhead of in free_pcppages_bulk()
1496 spin_lock(&zone->lock); in free_pcppages_bulk()
1497 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1511 __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); in free_pcppages_bulk()
1514 spin_unlock(&zone->lock); in free_pcppages_bulk()
1517 static void free_one_page(struct zone *zone, in free_one_page() argument
1522 spin_lock(&zone->lock); in free_one_page()
1523 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1527 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1528 spin_unlock(&zone->lock); in free_one_page()
1532 unsigned long zone, int nid, in __init_single_page() argument
1541 set_page_links(page, zone, nid, pfn); in __init_single_page()
1550 if (!is_highmem_idx(zone)) in __init_single_page()
1568 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1570 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1712 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1721 * belong to a single zone. We assume that a border between node0 and node1
1728 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1743 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1755 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1757 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1761 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1765 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1768 block_end_pfn, zone)) in set_zone_contiguous()
1774 zone->contiguous = true; in set_zone_contiguous()
1777 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1779 zone->contiguous = false; in clear_zone_contiguous()
1868 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
1873 int nid = zone_to_nid(zone); in deferred_init_pages()
1875 int zid = zone_idx(zone); in deferred_init_pages()
1894 * This function is meant to pre-load the iterator for the zone init.
1900 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
1907 * Start out by walking through the ranges in this zone that have in deferred_init_mem_pfn_range_in_zone()
1911 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
1934 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
1943 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
1950 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
1961 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
1982 struct zone *zone = arg; in deferred_init_memmap_chunk() local
1985 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); in deferred_init_memmap_chunk()
1992 deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap_chunk()
2012 struct zone *zone; in deferred_init_memmap() local
2034 * Once we unlock here, the zone cannot be grown anymore, thus if an in deferred_init_memmap()
2035 * interrupt thread must allocate this early in boot, zone must be in deferred_init_memmap()
2040 /* Only the highest zone is deferred so find it */ in deferred_init_memmap()
2042 zone = pgdat->node_zones + zid; in deferred_init_memmap()
2043 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
2047 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_init_memmap()
2048 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2058 .fn_arg = zone, in deferred_init_memmap()
2067 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2071 /* Sanity check that the next zone really is unpopulated */ in deferred_init_memmap()
2072 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
2082 * If this zone has deferred pages, try to grow it by initializing enough
2088 * Return true when zone was grown, otherwise return false. We return true even
2097 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2100 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2106 /* Only the last zone may have deferred pages */ in deferred_grow_zone()
2107 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
2113 * If someone grew this zone while we were waiting for spinlock, return in deferred_grow_zone()
2121 /* If the zone is empty somebody else may have cleared out the zone */ in deferred_grow_zone()
2122 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
2139 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
2164 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2166 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2173 struct zone *zone; in page_alloc_init_late() local
2192 for_each_populated_zone(zone) in page_alloc_init_late()
2193 zone_pcp_update(zone); in page_alloc_init_late()
2211 for_each_populated_zone(zone) in page_alloc_init_late()
2212 set_zone_contiguous(zone); in page_alloc_init_late()
2261 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2269 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2277 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2280 add_to_free_list(&page[size], zone, high, migratetype); in expand()
2421 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2430 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2434 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
2435 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2461 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2464 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2467 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2476 static int move_freepages(struct zone *zone, in move_freepages() argument
2505 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2506 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2509 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2517 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2532 /* Do not cross zone boundaries */ in move_freepages_block()
2533 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2535 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2538 return move_freepages(zone, start_page, end_page, migratetype, in move_freepages_block()
2586 static inline bool boost_watermark(struct zone *zone) in boost_watermark() argument
2598 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()
2601 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2617 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2631 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2658 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback()
2659 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2665 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2689 /* moving whole block can fail due to zone boundary conditions */ in steal_suitable_fallback()
2704 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
2748 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2755 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. in reserve_highatomic_pageblock()
2758 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2759 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2762 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2765 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2772 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2774 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2778 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2796 struct zone *zone; in unreserve_highatomic_pageblock() local
2801 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
2807 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2811 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2813 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2834 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2836 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2849 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2852 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2856 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2873 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2898 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2924 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2940 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2952 * Call me with the zone->lock already held.
2955 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2961 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2963 if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2972 static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, in __rmqueue_cma() argument
2976 struct page *page = __rmqueue_cma_fallback(zone, order); in __rmqueue_cma()
2981 static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order, in __rmqueue_cma() argument
2994 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
3000 spin_lock(&zone->lock); in rmqueue_bulk()
3005 page = __rmqueue_cma(zone, order, migratetype, in rmqueue_bulk()
3008 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue_bulk()
3029 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
3039 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
3040 spin_unlock(&zone->lock); in rmqueue_bulk()
3049 static struct list_head *get_populated_pcp_list(struct zone *zone, in get_populated_pcp_list() argument
3056 pcp->count += rmqueue_bulk(zone, order, in get_populated_pcp_list()
3075 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
3084 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
3090 * Drain pcplists of the indicated processor and zone.
3096 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
3103 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
3107 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
3120 struct zone *zone; in drain_pages() local
3122 for_each_populated_zone(zone) { in drain_pages()
3123 drain_pages_zone(cpu, zone); in drain_pages()
3130 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3131 * the single zone's pages.
3133 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
3137 if (zone) in drain_local_pages()
3138 drain_pages_zone(cpu, zone); in drain_local_pages()
3157 drain_local_pages(drain->zone); in drain_local_pages_wq()
3164 * When zone parameter is non-NULL, spill just the single zone's pages.
3168 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
3187 * a zone. Such callers are primarily CMA and memory hotplug and need in drain_all_pages()
3191 if (!zone) in drain_all_pages()
3204 struct zone *z; in drain_all_pages()
3207 if (zone) { in drain_all_pages()
3208 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
3230 drain->zone = zone; in drain_all_pages()
3247 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
3254 if (zone_is_empty(zone)) in mark_free_pages()
3257 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
3259 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
3260 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
3269 if (page_zone(page) != zone) in mark_free_pages()
3278 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3291 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3309 struct zone *zone = page_zone(page); in free_unref_page_commit() local
3329 free_one_page(zone, page, pfn, 0, migratetype, in free_unref_page_commit()
3336 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
3341 free_pcppages_bulk(zone, batch, pcp); in free_unref_page_commit()
3424 struct zone *zone; in __isolate_free_page() local
3429 zone = page_zone(page); in __isolate_free_page()
3439 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3440 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
3443 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3448 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3480 struct zone *zone = page_zone(page); in __putback_isolated_page() local
3482 /* zone lock should be held when this function is called */ in __putback_isolated_page()
3483 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
3486 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3495 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) in zone_statistics()
3518 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist() argument
3530 list = get_populated_pcp_list(zone, 0, pcp, in __rmqueue_pcplist()
3539 list = get_populated_pcp_list(zone, 0, pcp, in __rmqueue_pcplist()
3555 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3556 struct zone *zone, gfp_t gfp_flags, in rmqueue_pcplist() argument
3564 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
3565 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, in rmqueue_pcplist()
3569 zone_statistics(preferred_zone, zone); in rmqueue_pcplist()
3576 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3579 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3580 struct zone *zone, unsigned int order, in rmqueue() argument
3588 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, in rmqueue()
3598 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3609 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3616 page = __rmqueue_cma(zone, order, migratetype, in rmqueue()
3619 page = __rmqueue(zone, order, migratetype, in rmqueue()
3623 spin_unlock(&zone->lock); in rmqueue()
3626 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3630 zone_statistics(preferred_zone, zone); in rmqueue()
3631 trace_android_vh_rmqueue(preferred_zone, zone, order, in rmqueue()
3637 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { in rmqueue()
3638 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3639 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3642 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3724 static inline long __zone_watermark_unusable_free(struct zone *z, in __zone_watermark_unusable_free()
3750 * one free page of a suitable size. Checking now avoids taking the zone lock
3753 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3825 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3833 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3877 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3891 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3893 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3897 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3904 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3905 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3906 * premature use of a lower zone may cause lowmem pressure problems that
3907 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3912 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3923 if (!zone) in alloc_flags_nofragment()
3926 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
3931 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in alloc_flags_nofragment()
3935 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3967 struct zone *zone; in get_page_from_freelist() local
3973 * Scan zonelist, looking for a zone with enough free. in get_page_from_freelist()
3978 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
3985 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
4007 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
4010 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
4011 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
4017 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
4025 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
4026 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
4032 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
4033 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
4040 * Watermark failed for this zone, but see if we can in get_page_from_freelist()
4041 * grow this zone if it contains deferred pages. in get_page_from_freelist()
4044 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4054 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
4057 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
4067 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
4076 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
4086 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
4091 /* Try again if zone has deferred pages */ in get_page_from_freelist()
4093 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4292 * At least in one zone compaction wasn't deferred or skipped, so let's in __alloc_pages_direct_compact()
4306 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
4308 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
4309 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4344 * compaction considers all the zone as desperately out of memory in should_compact_retry()
4418 struct zone *zone; in should_compact_retry() local
4430 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4432 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4584 struct zone *zone; in wake_all_kswapds() local
4588 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4590 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
4591 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4592 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4695 struct zone *zone; in should_reclaim_retry() local
4724 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4728 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4731 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4732 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4738 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4752 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4863 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
5130 /* Dirty zone balancing only done in the fast path */ in prepare_alloc_pages()
5134 * The preferred zone is used for statistics but crucially it is in prepare_alloc_pages()
5174 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); in __alloc_pages_nodemask()
5477 * @offset: The zone index of the highest zone
5480 * high watermark within all zones at or below a given zone index. For each
5481 * zone, the number of pages is calculated as:
5490 struct zone *zone; in nr_free_zone_pages() local
5497 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5498 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5499 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5522 static inline void show_node(struct zone *zone) in show_node() argument
5525 printk("Node %d ", zone_to_nid(zone)); in show_node()
5535 struct zone *zone; in si_mem_available() local
5541 for_each_zone(zone) in si_mem_available()
5542 wmark_low += low_wmark_pages(zone); in si_mem_available()
5603 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
5605 if (is_highmem(zone)) { in si_meminfo_node()
5606 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
5607 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
5682 struct zone *zone; in show_free_areas() local
5685 for_each_populated_zone(zone) { in show_free_areas()
5686 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5690 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5774 for_each_populated_zone(zone) { in show_free_areas()
5777 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5782 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5784 show_node(zone); in show_free_areas()
5807 zone->name, in show_free_areas()
5808 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
5809 K(min_wmark_pages(zone)), in show_free_areas()
5810 K(low_wmark_pages(zone)), in show_free_areas()
5811 K(high_wmark_pages(zone)), in show_free_areas()
5812 K(zone->nr_reserved_highatomic), in show_free_areas()
5813 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
5814 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
5815 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
5816 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
5817 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
5818 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
5819 K(zone->present_pages), in show_free_areas()
5820 K(zone_managed_pages(zone)), in show_free_areas()
5821 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
5822 K(zone_page_state(zone, NR_PAGETABLE)), in show_free_areas()
5823 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
5825 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
5826 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
5829 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5833 for_each_populated_zone(zone) { in show_free_areas()
5838 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5840 show_node(zone); in show_free_areas()
5841 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5843 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5845 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5857 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5874 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5876 zoneref->zone = zone; in zoneref_set_zone()
5877 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5881 * Builds allocation fallback zone lists.
5887 struct zone *zone; in build_zonerefs_node() local
5893 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5894 if (populated_zone(zone)) { in build_zonerefs_node()
5895 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
5999 * This results in maximum locality--normal zone overflows into local
6000 * DMA zone, if any--but risks exhausting DMA zone.
6018 zonerefs->zone = NULL; in build_zonelists_in_node_order()
6033 zonerefs->zone = NULL; in build_thisnode_zonelists()
6038 * Build zonelists ordered by zone and nodes within zones.
6039 * This results in conserving DMA zone[s] until all Normal memory is
6041 * may still exist in local DMA zone.
6079 * I.e., first node id of first zone in arg node's generic zonelist.
6090 return zone_to_nid(z->zone); in local_memory_node()
6131 zonerefs->zone = NULL; in build_zonelists()
6150 * Other parts of the kernel may not check if the zone is available.
6184 * i.e., the node of the first zone in the generic zonelist. in __build_all_zonelists()
6208 * each zone will be allocated later when the per cpu in build_all_zonelists_init()
6246 * more accurate, but expensive to check per-zone. This check is in build_all_zonelists()
6260 pr_info("Policy zone: %s\n", zone_names[policy_zone]); in build_all_zonelists()
6264 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6266 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
6270 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
6293 * zone stats (e.g., nr_isolate_pageblock) are touched.
6295 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, in memmap_init_zone() argument
6314 if (zone == ZONE_DEVICE) { in memmap_init_zone()
6335 if (overlap_memmap_init(zone, &pfn)) in memmap_init_zone()
6342 __init_single_page(page, pfn, zone, nid, false); in memmap_init_zone()
6360 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
6366 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
6368 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
6372 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) in memmap_init_zone_device()
6392 * phase for it to be fully associated with a zone. in memmap_init_zone_device()
6428 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
6432 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6433 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6452 * - zone and node links point to zone and node that span the page if the
6453 * hole is in the middle of a zone
6454 * - zone and node links point to adjacent zone/node if the hole falls on
6455 * the zone boundary; the pages in such holes will be prepended to the
6456 * zone/node above the hole except for the trailing pages in the last
6457 * section that will be appended to the zone/node below.
6461 int zone, int node) in init_unavailable_range() argument
6472 __init_single_page(pfn_to_page(pfn), pfn, zone, node, true); in init_unavailable_range()
6478 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", in init_unavailable_range()
6479 node, zone_names[zone], pgcnt); in init_unavailable_range()
6482 static void __init memmap_init_zone_range(struct zone *zone, in memmap_init_zone_range() argument
6487 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range()
6488 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range()
6489 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); in memmap_init_zone_range()
6516 struct zone *zone = node->node_zones + j; in memmap_init() local
6518 if (!populated_zone(zone)) in memmap_init()
6521 memmap_init_zone_range(zone, start_pfn, end_pfn, in memmap_init()
6531 * Append the pages in this hole to the highest zone in the last in memmap_init()
6545 unsigned long zone, in arch_memmap_init() argument
6550 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
6557 * size of the zone. in zone_batchsize()
6559 batch = zone_managed_pages(zone) / 1024; in zone_batchsize()
6664 static void pageset_set_high_and_batch(struct zone *zone, in pageset_set_high_and_batch() argument
6669 (zone_managed_pages(zone) / in pageset_set_high_and_batch()
6672 pageset_set_batch(pcp, zone_batchsize(zone)); in pageset_set_high_and_batch()
6675 static void __meminit zone_pageset_init(struct zone *zone, int cpu) in zone_pageset_init() argument
6677 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
6680 pageset_set_high_and_batch(zone, pcp); in zone_pageset_init()
6683 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
6686 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
6688 zone_pageset_init(zone, cpu); in setup_zone_pageset()
6698 struct zone *zone; in setup_per_cpu_pageset() local
6701 for_each_populated_zone(zone) in setup_per_cpu_pageset()
6702 setup_zone_pageset(zone); in setup_per_cpu_pageset()
6723 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
6730 zone->pageset = &boot_pageset; in zone_pcp_init()
6732 if (populated_zone(zone)) in zone_pcp_init()
6733 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", in zone_pcp_init()
6734 zone->name, zone->present_pages, in zone_pcp_init()
6735 zone_batchsize(zone)); in zone_pcp_init()
6738 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
6742 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
6743 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
6748 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
6751 "Initialising map node %d zone %lu pfns %lu -> %lu\n", in init_currently_empty_zone()
6753 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
6756 zone_init_free_lists(zone); in init_currently_empty_zone()
6757 zone->initialized = 1; in init_currently_empty_zone()
6790 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6792 * increasing memory addresses so that the "highest" populated zone is used
6811 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6815 * is distributed. This helper function adjusts the zone ranges
6817 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6848 * Return the number of pages a zone spans in a node, including holes
6864 /* Get the start and end of the zone */ in zone_spanned_pages_in_node()
6871 /* Check that this node has pages within the zone's required range */ in zone_spanned_pages_in_node()
6875 /* Move the zone boundaries inside the node if necessary */ in zone_spanned_pages_in_node()
6916 /* Return the number of page frames in holes in a zone on a node */
6975 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
6993 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
6995 zone->zone_start_pfn = 0; in calculate_node_totalpages()
6996 zone->spanned_pages = size; in calculate_node_totalpages()
6997 zone->present_pages = real_size; in calculate_node_totalpages()
7011 * Calculate the size of the zone->blockflags rounded to an unsigned long
7031 struct zone *zone, in setup_usemap() argument
7036 zone->pageblock_flags = NULL; in setup_usemap()
7038 zone->pageblock_flags = in setup_usemap()
7041 if (!zone->pageblock_flags) in setup_usemap()
7042 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", in setup_usemap()
7043 usemapsize, zone->name, pgdat->node_id); in setup_usemap()
7047 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, in setup_usemap() argument
7095 * the zone and SPARSEMEM is in use. If there are holes within the in calc_memmap_size()
7096 * zone, each populated memory region may cost us one or two extra in calc_memmap_size()
7145 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
7148 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
7149 zone_set_nid(zone, nid); in zone_init_internals()
7150 zone->name = zone_names[idx]; in zone_init_internals()
7151 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
7152 spin_lock_init(&zone->lock); in zone_init_internals()
7153 zone_seqlock_init(zone); in zone_init_internals()
7154 zone_pcp_init(zone); in zone_init_internals()
7158 * Set up the zone data structures
7177 * Set up the zone data structures:
7194 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
7196 unsigned long zone_start_pfn = zone->zone_start_pfn; in free_area_init_core()
7198 size = zone->spanned_pages; in free_area_init_core()
7199 freesize = zone->present_pages; in free_area_init_core()
7203 * is used by this zone for memmap. This affects the watermark in free_area_init_core()
7212 " %s zone: %lu pages used for memmap\n", in free_area_init_core()
7215 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", in free_area_init_core()
7222 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", in free_area_init_core()
7238 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
7244 setup_usemap(pgdat, zone, zone_start_pfn, size); in free_area_init_core()
7245 init_currently_empty_zone(zone, zone_start_pfn, size); in free_area_init_core()
7268 * The zone's endpoints aren't required to be MAX_ORDER in alloc_node_mem_map()
7416 * Sum pages in active regions for movable zone.
7436 * Find the PFN the Movable zone begins in each node. Kernel memory
7661 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
7662 if (populated_zone(zone)) { in check_for_memory()
7682 * free_area_init - Initialise all pg_data_t and zone data
7683 * @max_zone_pfn: an array of max PFNs for each zone
7687 * zone in each node and their holes is calculated. If the maximum PFN
7688 * between two adjacent zones match, it is assumed that the zone is empty.
7690 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7697 int i, nid, zone; in free_area_init() local
7700 /* Record where the zone boundaries are */ in free_area_init()
7711 zone = MAX_NR_ZONES - i - 1; in free_area_init()
7713 zone = i; in free_area_init()
7715 if (zone == ZONE_MOVABLE) in free_area_init()
7718 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
7719 arch_zone_lowest_possible_pfn[zone] = start_pfn; in free_area_init()
7720 arch_zone_highest_possible_pfn[zone] = end_pfn; in free_area_init()
7729 /* Print out the zone ranges */ in free_area_init()
7730 pr_info("Zone ranges:\n"); in free_area_init()
7747 pr_info("Movable zone start for each node\n"); in free_area_init()
7951 * set_dma_reserve - set the specified number of pages reserved in the first zone
7954 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7955 * In the DMA zone, a significant percentage may be consumed by kernel image
7958 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
8034 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
8036 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
8038 /* Find valid and maximum lowmem_reserve in the zone */ in calculate_totalreserve_pages()
8040 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
8041 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
8045 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
8060 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
8062 * pages are left in the zone after a successful __alloc_pages().
8071 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve() local
8073 bool clear = !ratio || !zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
8077 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
8082 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
8084 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
8098 struct zone *zone; in __setup_per_zone_wmarks() local
8102 for_each_zone(zone) { in __setup_per_zone_wmarks()
8103 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
8104 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
8107 for_each_zone(zone) { in __setup_per_zone_wmarks()
8110 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
8111 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
8113 low = (u64)pages_low * zone_managed_pages(zone); in __setup_per_zone_wmarks()
8115 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
8127 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
8129 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
8132 * If it's a lowmem zone, reserve a number of pages in __setup_per_zone_wmarks()
8133 * proportionate to the zone's size. in __setup_per_zone_wmarks()
8135 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
8144 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
8147 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
8148 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + low + tmp; in __setup_per_zone_wmarks()
8149 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + low + tmp * 2; in __setup_per_zone_wmarks()
8151 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
8162 * Ensures that the watermark[min,low,high] values for each zone are set
8271 struct zone *zone; in setup_min_unmapped_ratio() local
8276 for_each_zone(zone) in setup_min_unmapped_ratio()
8277 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
8299 struct zone *zone; in setup_min_slab_ratio() local
8304 for_each_zone(zone) in setup_min_slab_ratio()
8305 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8331 * if in function of the boot time zone sizes.
8349 static void __zone_pcp_update(struct zone *zone) in __zone_pcp_update() argument
8354 pageset_set_high_and_batch(zone, in __zone_pcp_update()
8355 per_cpu_ptr(zone->pageset, cpu)); in __zone_pcp_update()
8359 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8360 * cpu. It is the fraction of total pages in each zone that a hot per cpu
8366 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
8389 for_each_populated_zone(zone) in percpu_pagelist_fraction_sysctl_handler()
8390 __zone_pcp_update(zone); in percpu_pagelist_fraction_sysctl_handler()
8549 struct page *has_unmovable_pages(struct zone *zone, struct page *page, in has_unmovable_pages() argument
8584 * If the zone is movable and we have ruled out all reserved in has_unmovable_pages()
8588 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
8702 /* [start, end) must belong to a single zone. */
8715 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
8743 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
8787 * aligned. The PFN range must belong to a single zone.
8809 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
8853 drain_all_pages(cc.zone); in alloc_contig_range()
8883 * We don't have to hold zone->lock here because the pages are in alloc_contig_range()
8949 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, in pfn_range_valid_contig()
8975 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
8980 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
9009 struct zone *zone; in alloc_contig_pages() local
9013 for_each_zone_zonelist_nodemask(zone, z, zonelist, in alloc_contig_pages()
9015 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
9017 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
9018 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_contig_pages()
9019 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { in alloc_contig_pages()
9021 * We release the zone lock here because in alloc_contig_pages()
9022 * alloc_contig_range() will also lock the zone in alloc_contig_pages()
9027 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
9032 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
9036 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
9057 * The zone indicated has a new number of managed_pages; batch sizes and percpu
9060 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
9063 __zone_pcp_update(zone); in zone_pcp_update()
9067 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
9075 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
9077 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
9078 drain_zonestat(zone, pset); in zone_pcp_reset()
9080 free_percpu(zone->pageset); in zone_pcp_reset()
9081 zone->pageset = &boot_pageset; in zone_pcp_reset()
9088 * All pages in the range must be in a single zone, must not contain holes,
9095 struct zone *zone; in __offline_isolated_pages() local
9100 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
9101 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
9126 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
9129 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
9135 struct zone *zone = page_zone(page); in is_free_buddy_page() local
9140 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
9147 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
9157 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
9176 if (set_page_guard(zone, current_buddy, high, migratetype)) in break_down_buddy_pages()
9180 add_to_free_list(current_buddy, zone, high, migratetype); in break_down_buddy_pages()
9192 struct zone *zone = page_zone(page); in take_page_off_buddy() local
9198 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
9208 del_page_from_free_list(page_head, zone, page_order); in take_page_off_buddy()
9209 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
9212 __mod_zone_freepage_state(zone, -1, migratetype); in take_page_off_buddy()
9219 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()
9230 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; in has_managed_dma() local
9232 if (managed_zone(zone)) in has_managed_dma()