Lines Matching full:order
226 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
265 static void __free_pages_ok(struct page *page, unsigned int order,
347 * many cases very high-order allocations like THP are likely to be
700 * Higher-order pages are called "compound pages". They are structured thusly:
710 * The first tail page's ->compound_order holds the order of allocation.
711 * This usage means that zero-order pages may not be compound.
720 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
723 int nr_pages = 1 << order; in prep_compound_page()
734 set_compound_order(page, order); in prep_compound_page()
772 unsigned int order, int migratetype) in set_page_guard() argument
777 if (order >= debug_guardpage_minorder()) in set_page_guard()
782 set_page_private(page, order); in set_page_guard()
784 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
790 unsigned int order, int migratetype) in clear_page_guard() argument
799 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
803 unsigned int order, int migratetype) { return false; } in set_page_guard() argument
805 unsigned int order, int migratetype) {} in clear_page_guard() argument
811 * order of appearance. So we need to first gather the full picture of what was
859 static inline void set_buddy_order(struct page *page, unsigned int order) in set_buddy_order() argument
861 set_page_private(page, order); in set_buddy_order()
870 * (c) a page and its buddy have the same order &&
876 * For recording page's order, we use page_private(page).
879 unsigned int order) in page_is_buddy() argument
884 if (buddy_order(buddy) != order) in page_is_buddy()
912 int order, int migratetype) in compaction_capture() argument
914 if (!capc || order != capc->cc->order) in compaction_capture()
923 * Do not let lower order allocations polluate a movable pageblock. in compaction_capture()
926 * have trouble finding a high-order free page. in compaction_capture()
928 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) in compaction_capture()
943 int order, int migratetype) in compaction_capture() argument
951 unsigned int order, int migratetype) in add_to_free_list() argument
953 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
961 unsigned int order, int migratetype) in add_to_free_list_tail() argument
963 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
975 unsigned int order, int migratetype) in move_to_free_list() argument
977 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
983 unsigned int order) in del_page_from_free_list() argument
992 zone->free_area[order].nr_free--; in del_page_from_free_list()
997 * of the next-highest order is free. If it is, it's possible
1001 * as a higher order page
1005 struct page *page, unsigned int order) in buddy_merge_likely() argument
1010 if (order >= MAX_ORDER - 2) in buddy_merge_likely()
1018 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); in buddy_merge_likely()
1022 page_is_buddy(higher_page, higher_buddy, order + 1); in buddy_merge_likely()
1038 * free pages of length of (1 << order) and marked with PageBuddy.
1039 * Page's order is recorded in page_private(page) field.
1051 struct zone *zone, unsigned int order, in __free_one_page() argument
1068 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1070 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
1074 while (order < max_order) { in __free_one_page()
1075 if (compaction_capture(capc, page, order, migratetype)) { in __free_one_page()
1076 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1080 buddy_pfn = __find_buddy_pfn(pfn, order); in __free_one_page()
1085 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
1089 * merge with it and move up one order. in __free_one_page()
1092 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1094 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1098 order++; in __free_one_page()
1100 if (order < MAX_ORDER - 1) { in __free_one_page()
1101 /* If we are here, it means order is >= pageblock_order. in __free_one_page()
1107 * low-order merging. in __free_one_page()
1112 buddy_pfn = __find_buddy_pfn(pfn, order); in __free_one_page()
1121 max_order = order + 1; in __free_one_page()
1126 set_buddy_order(page, order); in __free_one_page()
1130 else if (is_shuffle_order(order)) in __free_one_page()
1133 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); in __free_one_page()
1136 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1138 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1142 page_reporting_notify_free(order); in __free_one_page()
1278 unsigned int order, bool check_free, fpi_t fpi_flags) in free_pages_prepare() argument
1285 trace_mm_page_free(page, order); in free_pages_prepare()
1287 if (unlikely(PageHWPoison(page)) && !order) { in free_pages_prepare()
1293 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1294 reset_page_owner(page, order); in free_pages_prepare()
1295 free_page_pinner(page, order); in free_pages_prepare()
1301 * avoid checking PageCompound for order-0 pages. in free_pages_prepare()
1303 if (unlikely(order)) { in free_pages_prepare()
1307 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1311 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1324 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1332 reset_page_owner(page, order); in free_pages_prepare()
1333 free_page_pinner(page, order); in free_pages_prepare()
1337 PAGE_SIZE << order); in free_pages_prepare()
1339 PAGE_SIZE << order); in free_pages_prepare()
1342 kernel_poison_pages(page, 1 << order); in free_pages_prepare()
1354 kasan_free_pages(page, order); in free_pages_prepare()
1359 kernel_init_free_pages(page, 1 << order, false); in free_pages_prepare()
1361 kasan_poison_pages(page, order, init); in free_pages_prepare()
1369 arch_free_page(page, order); in free_pages_prepare()
1371 debug_pagealloc_unmap_pages(page, 1 << order); in free_pages_prepare()
1378 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1396 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1397 * moving from pcp lists to free list in order to reduce overhead. With
1426 * Assumes all pages on list are in same zone, and of same order.
1519 unsigned int order, in free_one_page() argument
1527 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1611 static void __free_pages_ok(struct page *page, unsigned int order, in __free_pages_ok() argument
1618 if (!free_pages_prepare(page, order, true, fpi_flags)) in __free_pages_ok()
1623 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
1624 free_one_page(page_zone(page), page, pfn, order, migratetype, in __free_pages_ok()
1629 void __free_pages_core(struct page *page, unsigned int order) in __free_pages_core() argument
1631 unsigned int nr_pages = 1 << order; in __free_pages_core()
1655 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); in __free_pages_core()
1703 unsigned int order) in memblock_free_pages() argument
1707 __free_pages_core(page, order); in memblock_free_pages()
1929 * In order to try and keep some memory in the cache we have the loop
1930 * broken along max page order boundaries. This way we will not cause
2083 * deferred pages to satisfy the allocation specified by order, rounded up to
2097 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2099 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); in deferred_grow_zone()
2164 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2166 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2248 * The order of subdivision here is critical for the IO subsystem.
2249 * Please do not alter this order without good reasons and regression
2251 * the order in which smaller blocks are delivered depends on the order
2253 * influencing the order in which pages are delivered to the IO
2312 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2330 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2347 static bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
2350 for (i = 0; i < (1 << order); i++) { in check_new_pages()
2360 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
2366 arch_alloc_page(page, order); in post_alloc_hook()
2367 debug_pagealloc_map_pages(page, 1 << order); in post_alloc_hook()
2374 kernel_unpoison_pages(page, 1 << order); in post_alloc_hook()
2382 kasan_alloc_pages(page, order, gfp_flags); in post_alloc_hook()
2386 kasan_unpoison_pages(page, order, init); in post_alloc_hook()
2388 kernel_init_free_pages(page, 1 << order, in post_alloc_hook()
2392 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
2395 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
2398 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
2400 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
2401 prep_compound_page(page, order); in prep_new_page()
2421 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2429 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest()
2435 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2445 * This array describes the order lists are fallen back to when
2462 unsigned int order) in __rmqueue_cma_fallback() argument
2464 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2468 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
2481 unsigned int order; in move_freepages() local
2508 order = buddy_order(page); in move_freepages()
2509 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2510 page += 1 << order; in move_freepages()
2511 pages_moved += 1 << order; in move_freepages()
2565 static bool can_steal_fallback(unsigned int order, int start_mt) in can_steal_fallback() argument
2568 * Leaving this order check is intended, although there is in can_steal_fallback()
2569 * relaxed order check in next check. The reason is that in can_steal_fallback()
2574 if (order >= pageblock_order) in can_steal_fallback()
2577 if (order >= pageblock_order / 2 || in can_steal_fallback()
2624 * This function implements actual steal behaviour. If order is large enough,
2708 * Check whether there is a suitable fallback freepage with requested order.
2713 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
2731 if (can_steal_fallback(order, migratetype)) in find_suitable_fallback()
2745 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2746 * there are no empty page blocks that contain a page with a suitable order
2783 * potentially hurts the reliability of high-order allocations when under
2798 int order; in unreserve_highatomic_pageblock() local
2812 for (order = 0; order < MAX_ORDER; order++) { in unreserve_highatomic_pageblock()
2813 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2868 * The use of signed ints for order and current_order is a deliberate
2873 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2878 int min_order = order; in __rmqueue_fallback()
2913 && current_order > order) in __rmqueue_fallback()
2922 for (current_order = order; current_order < MAX_ORDER; in __rmqueue_fallback()
2943 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
2955 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2961 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2963 if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2967 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
2972 static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, in __rmqueue_cma() argument
2976 struct page *page = __rmqueue_cma_fallback(zone, order); in __rmqueue_cma()
2977 trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA); in __rmqueue_cma()
2981 static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order, in __rmqueue_cma() argument
2994 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
3005 page = __rmqueue_cma(zone, order, migratetype, in rmqueue_bulk()
3008 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue_bulk()
3018 * physical page order. The page is added to the tail of in rmqueue_bulk()
3022 * head, thus also in the physical page order. This is useful in rmqueue_bulk()
3030 -(1 << order)); in rmqueue_bulk()
3039 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
3050 unsigned int order, struct per_cpu_pages *pcp, in get_populated_pcp_list() argument
3056 pcp->count += rmqueue_bulk(zone, order, in get_populated_pcp_list()
3251 unsigned int order, t; in mark_free_pages() local
3276 for_each_migratetype_order(order, t) { in mark_free_pages()
3278 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3282 for (i = 0; i < (1UL << order); i++) { in mark_free_pages()
3346 * Free a 0-order page
3362 * Free a list of 0-order pages
3400 * split_page takes a non-compound higher-order page, and splits it into
3401 * n (1<<order) sub-pages: page[0..n]
3407 void split_page(struct page *page, unsigned int order) in split_page() argument
3414 for (i = 1; i < (1 << order); i++) in split_page()
3416 split_page_owner(page, 1 << order); in split_page()
3417 split_page_memcg(page, 1 << order); in split_page()
3421 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
3435 * emulate a high-order watermark check with a raised order-0 in __isolate_free_page()
3436 * watermark, because we already know our high-order page in __isolate_free_page()
3439 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3443 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3448 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3454 if (order >= pageblock_order - 1) { in __isolate_free_page()
3455 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
3466 return 1UL << order; in __isolate_free_page()
3472 * @order: Order of the isolated page
3478 void __putback_isolated_page(struct page *page, unsigned int order, int mt) in __putback_isolated_page() argument
3486 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3576 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3580 struct zone *zone, unsigned int order, in rmqueue() argument
3587 if (likely(order == 0)) { in rmqueue()
3595 * allocate greater than order-1 page units with __GFP_NOFAIL. in rmqueue()
3597 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); in rmqueue()
3603 * order-0 request can reach here when the pcplist is skipped in rmqueue()
3605 * reserved for high-order atomic allocation, so order-0 in rmqueue()
3608 if (order > 0 && alloc_flags & ALLOC_HARDER) { in rmqueue()
3609 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3611 trace_mm_page_alloc_zone_locked(page, order, migratetype); in rmqueue()
3616 page = __rmqueue_cma(zone, order, migratetype, in rmqueue()
3619 page = __rmqueue(zone, order, migratetype, in rmqueue()
3622 } while (page && check_new_pages(page, order)); in rmqueue()
3626 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3629 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue()
3631 trace_android_vh_rmqueue(preferred_zone, zone, order, in rmqueue()
3671 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3673 if (order < fail_page_alloc.min_order) in __should_fail_alloc_page()
3683 return should_fail(&fail_page_alloc.attr, 1 << order); in __should_fail_alloc_page()
3700 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); in fail_page_alloc_debugfs()
3711 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3718 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3720 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
3725 unsigned int order, unsigned int alloc_flags) in __zone_watermark_unusable_free() argument
3728 long unusable_free = (1 << order) - 1; in __zone_watermark_unusable_free()
3748 * Return true if free base pages are above 'mark'. For high-order checks it
3749 * will return true of the order-0 watermark is reached and there is at least
3753 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok() argument
3762 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); in __zone_watermark_ok()
3781 * Check watermarks for an order-0 allocation request. If these in __zone_watermark_ok()
3782 * are not met, then a high-order request also cannot go ahead in __zone_watermark_ok()
3788 /* If this is an order-0 request then the watermark is fine */ in __zone_watermark_ok()
3789 if (!order) in __zone_watermark_ok()
3792 /* For a high-order request, check at least one suitable page is free */ in __zone_watermark_ok()
3793 for (o = order; o < MAX_ORDER; o++) { in __zone_watermark_ok()
3825 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
3828 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_ok()
3833 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast() argument
3842 * Fast check for order-0 only. If this fails then the reserves in zone_watermark_fast()
3845 if (!order) { in zone_watermark_fast()
3858 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_fast()
3862 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations in zone_watermark_fast()
3867 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast()
3870 return __zone_watermark_ok(z, order, mark, highest_zoneidx, in zone_watermark_fast()
3877 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
3885 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, in zone_watermark_ok_safe()
3963 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
4033 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
4044 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4057 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
4067 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
4076 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
4079 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
4082 * If this is a high-order atomic allocation then check in get_page_from_freelist()
4085 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) in get_page_from_freelist()
4086 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
4093 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4157 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
4163 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4170 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4177 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
4185 .order = order, in __alloc_pages_may_oom()
4209 ~__GFP_DIRECT_RECLAIM, order, in __alloc_pages_may_oom()
4217 /* The OOM killer will not help higher order allocs */ in __alloc_pages_may_oom()
4218 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
4254 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
4269 /* Try memory compaction for high-order allocations before reclaim */
4271 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4279 if (!order) in __alloc_pages_direct_compact()
4285 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
4299 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
4303 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
4309 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4326 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
4337 if (!order) in should_compact_retry()
4352 * compaction was skipped because there are not enough order-0 pages in should_compact_retry()
4356 ret = compaction_zonelist_suitable(ac, order, alloc_flags); in should_compact_retry()
4378 if (order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
4390 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? in should_compact_retry()
4399 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); in should_compact_retry()
4404 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4413 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, in should_compact_retry() argument
4421 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
4427 * Let's give them a good hope and keep retrying while the order-0 in should_compact_retry()
4517 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
4530 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
4543 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
4553 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
4558 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4567 trace_android_vh_drain_all_pages_bypass(gfp_mask, order, in __alloc_pages_direct_reclaim()
4580 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
4591 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4691 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
4701 * their order will become available due to high fragmentation so in should_reclaim_retry()
4704 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) in should_reclaim_retry()
4738 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4740 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
4815 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
4819 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; in __alloc_pages_slowpath()
4832 trace_android_vh_alloc_pages_slowpath_begin(gfp_mask, order, &vh_record); in __alloc_pages_slowpath()
4867 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4873 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4880 * movable high-order allocations, do that as well, as compaction will in __alloc_pages_slowpath()
4888 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
4890 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
4906 * order, fail immediately unless the allocator has in __alloc_pages_slowpath()
4912 * bursty high order allocations, in __alloc_pages_slowpath()
4935 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4953 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4965 trace_android_vh_alloc_pages_reclaim_bypass(gfp_mask, order, in __alloc_pages_slowpath()
4972 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4978 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4988 * Do not retry costly high order allocations unless they are in __alloc_pages_slowpath()
4994 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4999 * It doesn't make any sense to retry for the compaction if the order-0 in __alloc_pages_slowpath()
5005 should_compact_retry(ac, order, alloc_flags, in __alloc_pages_slowpath()
5020 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
5070 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); in __alloc_pages_slowpath()
5078 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
5086 trace_android_vh_alloc_pages_failure_bypass(gfp_mask, order, in __alloc_pages_slowpath()
5092 "page allocation failure: order:%u", order); in __alloc_pages_slowpath()
5094 trace_android_vh_alloc_pages_slowpath_end(gfp_mask, order, vh_record); in __alloc_pages_slowpath()
5098 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
5125 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
5148 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, in __alloc_pages_nodemask() argument
5157 * There are several places where we assume that the order value is sane in __alloc_pages_nodemask()
5160 if (unlikely(order >= MAX_ORDER)) { in __alloc_pages_nodemask()
5167 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) in __alloc_pages_nodemask()
5177 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
5196 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
5200 unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) { in __alloc_pages_nodemask()
5201 __free_pages(page, order); in __alloc_pages_nodemask()
5205 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()
5216 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
5220 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); in __get_free_pages()
5233 static inline void free_the_page(struct page *page, unsigned int order) in free_the_page() argument
5235 if (order == 0) /* Via pcp? */ in free_the_page()
5238 __free_pages_ok(page, order, FPI_NONE); in free_the_page()
5241 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
5243 trace_android_vh_free_pages(page, order); in __free_pages()
5245 free_the_page(page, order); in __free_pages()
5247 while (order-- > 0) in __free_pages()
5248 free_the_page(page + (1 << order), order); in __free_pages()
5252 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
5256 __free_pages(virt_to_page((void *)addr), order); in free_pages()
5265 * within a 0 or higher order page. Multiple fragments within that page
5375 * Frees a page fragment allocated out of either a compound or order 0 page.
5386 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
5390 unsigned long alloc_end = addr + (PAGE_SIZE << order); in make_alloc_exact()
5393 split_page(virt_to_page((void *)addr), order); in make_alloc_exact()
5419 unsigned int order = get_order(size); in alloc_pages_exact() local
5425 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
5426 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
5444 unsigned int order = get_order(size); in alloc_pages_exact_nid() local
5450 p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
5453 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
5834 unsigned int order; in show_free_areas() local
5844 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
5845 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5848 nr[order] = area->nr_free; in show_free_areas()
5849 total += nr[order] << order; in show_free_areas()
5851 types[order] = 0; in show_free_areas()
5854 types[order] |= 1 << type; in show_free_areas()
5858 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
5860 nr[order], K(1UL) << order); in show_free_areas()
5861 if (nr[order]) in show_free_areas()
5862 show_migration_types(types[order]); in show_free_areas()
6215 * needs the percpu allocator in order to allocate its pagesets in build_all_zonelists_init()
6430 unsigned int order, t; in zone_init_free_lists() local
6431 for_each_migratetype_order(order, t) { in zone_init_free_lists()
6432 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6433 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6593 * fragmented and becoming unavailable for high-order allocations. in zone_batchsize()
6619 /* Update high, then batch, in order */ in pageset_update()
6818 * zones within a node are in order of monotonic increases memory addresses
7056 unsigned int order; in set_pageblock_order() local
7063 order = HUGETLB_PAGE_ORDER; in set_pageblock_order()
7065 order = MAX_ORDER - 1; in set_pageblock_order()
7068 * Assume the largest contiguous order of interest is a huge page. in set_pageblock_order()
7072 pageblock_order = order; in set_pageblock_order()
7269 * aligned but the node_mem_map endpoints must be in order in alloc_node_mem_map()
7674 * such cases we allow max_zone_pfn sorted in the descending order
8470 /* Make sure we've got at least a 0-order allocation.. */ in alloc_large_system_hash()
8524 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", in alloc_large_system_hash()
8635 * in MEM_GOING_OFFLINE in order to indicate that these pages in has_unmovable_pages()
8802 unsigned int order; in alloc_contig_range() local
8808 .order = -1, in alloc_contig_range()
8820 * MIGRATE_ISOLATE. Because pageblock and max order pages may in alloc_contig_range()
8879 * page allocator holds, ie. they can be part of higher order in alloc_contig_range()
8887 order = 0; in alloc_contig_range()
8890 if (++order >= MAX_ORDER) { in alloc_contig_range()
8894 outer_start &= ~0UL << order; in alloc_contig_range()
8898 order = buddy_order(pfn_to_page(outer_start)); in alloc_contig_range()
8901 * outer_start page could be small order buddy page and in alloc_contig_range()
8906 if (outer_start + (1UL << order) <= start) in alloc_contig_range()
9096 unsigned int order; in __offline_isolated_pages() local
9125 order = buddy_order(page); in __offline_isolated_pages()
9126 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
9127 pfn += (1 << order); in __offline_isolated_pages()
9138 unsigned int order; in is_free_buddy_page() local
9141 for (order = 0; order < MAX_ORDER; order++) { in is_free_buddy_page()
9142 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
9144 if (PageBuddy(page_head) && buddy_order(page_head) >= order) in is_free_buddy_page()
9149 return order < MAX_ORDER; in is_free_buddy_page()
9154 * Break down a higher-order page in sub-pages, and keep our target out of
9195 unsigned int order; in take_page_off_buddy() local
9199 for (order = 0; order < MAX_ORDER; order++) { in take_page_off_buddy()
9200 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
9203 if (PageBuddy(page_head) && page_order >= order) { in take_page_off_buddy()