Lines Matching +full:vm +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
44 #include "pgalloc-track.h"
67 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) in free_work()
158 * unmap_kernel_range_noflush - unmap kernel VM area
159 * @start: start of the VM area to unmap
160 * @size: size of the VM area to unmap
162 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
167 * for calling flush_cache_vunmap() on to-be-mapped areas before calling this
206 return -ENOMEM; in vmap_pte_range()
211 return -EBUSY; in vmap_pte_range()
213 return -ENOMEM; in vmap_pte_range()
230 return -ENOMEM; in vmap_pmd_range()
234 return -ENOMEM; in vmap_pmd_range()
248 return -ENOMEM; in vmap_pud_range()
252 return -ENOMEM; in vmap_pud_range()
266 return -ENOMEM; in vmap_p4d_range()
270 return -ENOMEM; in vmap_p4d_range()
276 * map_kernel_range_noflush - map kernel VM area with the specified pages
277 * @addr: start of the VM area to map
278 * @size: size of the VM area to map
280 * @pages: pages to map
282 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
287 * calling flush_cache_vmap() on to-be-mapped areas before calling this
291 * 0 on success, -errno on failure.
335 * ARM, x86-64 and sparc64 put modules in a special place, in is_vmalloc_or_module_addr()
399 * Map a vmalloc()-space virtual address to the physical page frame number.
437 * This augment red-black tree represents the free vmap space.
438 * All vmap_area objects in this tree are sorted by va->va_start
443 * of its sub-tree, right or left. Therefore it is possible to
458 return (va->va_end - va->va_start); in va_size()
467 return va ? va->subtree_max_size : 0; in get_subtree_max_size()
477 get_subtree_max_size(va->rb_node.rb_left), in compute_subtree_max_size()
478 get_subtree_max_size(va->rb_node.rb_right)); in compute_subtree_max_size()
504 if (addr < va->va_start) in __find_vmap_area()
505 n = n->rb_left; in __find_vmap_area()
506 else if (addr >= va->va_end) in __find_vmap_area()
507 n = n->rb_right; in __find_vmap_area()
532 link = &root->rb_node; in find_va_links()
544 * it link, where the new va->rb_node will be attached to. in find_va_links()
554 if (va->va_start < tmp_va->va_end && in find_va_links()
555 va->va_end <= tmp_va->va_start) in find_va_links()
556 link = &(*link)->rb_left; in find_va_links()
557 else if (va->va_end > tmp_va->va_start && in find_va_links()
558 va->va_start >= tmp_va->va_end) in find_va_links()
559 link = &(*link)->rb_right; in find_va_links()
561 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", in find_va_links()
562 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); in find_va_links()
568 *parent = &tmp_va->rb_node; in find_va_links()
579 * The red-black tree where we try to find VA neighbors in get_va_next_sibling()
586 list = &rb_entry(parent, struct vmap_area, rb_node)->list; in get_va_next_sibling()
587 return (&parent->rb_right == link ? list->next : list); in get_va_next_sibling()
599 head = &rb_entry(parent, struct vmap_area, rb_node)->list; in link_va()
600 if (&parent->rb_right != link) in link_va()
601 head = head->prev; in link_va()
604 /* Insert to the rb-tree */ in link_va()
605 rb_link_node(&va->rb_node, parent, link); in link_va()
609 * to the tree. We do not set va->subtree_max_size to in link_va()
618 rb_insert_augmented(&va->rb_node, in link_va()
620 va->subtree_max_size = 0; in link_va()
622 rb_insert_color(&va->rb_node, root); in link_va()
625 /* Address-sort this list */ in link_va()
626 list_add(&va->list, head); in link_va()
632 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) in unlink_va()
636 rb_erase_augmented(&va->rb_node, in unlink_va()
639 rb_erase(&va->rb_node, root); in unlink_va()
641 list_del(&va->list); in unlink_va()
642 RB_CLEAR_NODE(&va->rb_node); in unlink_va()
654 if (computed_size != va->subtree_max_size) in augment_tree_propagate_check()
656 va_size(va), va->subtree_max_size); in augment_tree_propagate_check()
668 * - After VA has been inserted to the tree(free path);
669 * - After VA has been shrunk(allocation path);
670 * - After VA has been increased(merging path).
676 * 4--8
680 * 2--2 8--8
686 * node becomes 4--6.
696 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); in augment_tree_propagate_from()
735 * Merge de-allocated chunk of VA memory with previous
773 * |<------VA------>|<-----Next----->| in merge_or_add_vmap_area()
779 if (sibling->va_start == va->va_end) { in merge_or_add_vmap_area()
780 sibling->va_start = va->va_start; in merge_or_add_vmap_area()
794 * |<-----Prev----->|<------VA------>| in merge_or_add_vmap_area()
798 if (next->prev != head) { in merge_or_add_vmap_area()
799 sibling = list_entry(next->prev, struct vmap_area, list); in merge_or_add_vmap_area()
800 if (sibling->va_end == va->va_start) { in merge_or_add_vmap_area()
811 sibling->va_end = va->va_end; in merge_or_add_vmap_area()
839 if (va->va_start > vstart) in is_within_this_va()
840 nva_start_addr = ALIGN(va->va_start, align); in is_within_this_va()
849 return (nva_start_addr + size <= va->va_end); in is_within_this_va()
869 length = size + align - 1; in find_vmap_lowest_match()
874 if (get_subtree_max_size(node->rb_left) >= length && in find_vmap_lowest_match()
875 vstart < va->va_start) { in find_vmap_lowest_match()
876 node = node->rb_left; in find_vmap_lowest_match()
883 * sub-tree if it does not have a free block that is in find_vmap_lowest_match()
886 if (get_subtree_max_size(node->rb_right) >= length) { in find_vmap_lowest_match()
887 node = node->rb_right; in find_vmap_lowest_match()
892 * OK. We roll back and find the first right sub-tree, in find_vmap_lowest_match()
901 if (get_subtree_max_size(node->rb_right) >= length && in find_vmap_lowest_match()
902 vstart <= va->va_start) { in find_vmap_lowest_match()
903 node = node->rb_right; in find_vmap_lowest_match()
966 if (nva_start_addr < va->va_start || in classify_va_fit_type()
967 nva_start_addr + size > va->va_end) in classify_va_fit_type()
971 if (va->va_start == nva_start_addr) { in classify_va_fit_type()
972 if (va->va_end == nva_start_addr + size) in classify_va_fit_type()
976 } else if (va->va_end == nva_start_addr + size) { in classify_va_fit_type()
998 * |---------------| in adjust_va_to_fit_type()
1008 * |-------|-------| in adjust_va_to_fit_type()
1010 va->va_start += size; in adjust_va_to_fit_type()
1017 * |-------|-------| in adjust_va_to_fit_type()
1019 va->va_end = nva_start_addr; in adjust_va_to_fit_type()
1026 * |---|-------|---| in adjust_va_to_fit_type()
1031 * For percpu allocator we do not do any pre-allocation in adjust_va_to_fit_type()
1057 return -1; in adjust_va_to_fit_type()
1063 lva->va_start = va->va_start; in adjust_va_to_fit_type()
1064 lva->va_end = nva_start_addr; in adjust_va_to_fit_type()
1069 va->va_start = nva_start_addr + size; in adjust_va_to_fit_type()
1071 return -1; in adjust_va_to_fit_type()
1078 insert_vmap_area_augment(lva, &va->rb_node, in adjust_va_to_fit_type()
1102 if (va->va_start > vstart) in __alloc_vmap_area()
1103 nva_start_addr = ALIGN(va->va_start, align); in __alloc_vmap_area()
1167 return ERR_PTR(-EBUSY); in alloc_vmap_area()
1174 return ERR_PTR(-ENOMEM); in alloc_vmap_area()
1180 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area()
1191 * The preload is done in non-atomic context, thus it allows us in alloc_vmap_area()
1223 va->va_start = addr; in alloc_vmap_area()
1224 va->va_end = addr + size; in alloc_vmap_area()
1225 va->vm = NULL; in alloc_vmap_area()
1232 BUG_ON(!IS_ALIGNED(va->va_start, align)); in alloc_vmap_area()
1233 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
1234 BUG_ON(va->va_end > vend); in alloc_vmap_area()
1265 return ERR_PTR(-EBUSY); in alloc_vmap_area()
1318 /* for per-CPU blocks */
1331 * Purges all lazily-freed vmap areas.
1351 if (va->va_start < start) in __purge_vmap_area_lazy()
1352 start = va->va_start; in __purge_vmap_area_lazy()
1353 if (va->va_end > end) in __purge_vmap_area_lazy()
1354 end = va->va_end; in __purge_vmap_area_lazy()
1362 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; in __purge_vmap_area_lazy()
1363 unsigned long orig_start = va->va_start; in __purge_vmap_area_lazy()
1364 unsigned long orig_end = va->va_end; in __purge_vmap_area_lazy()
1367 * Finally insert or merge lazily-freed area. It is in __purge_vmap_area_lazy()
1379 va->va_start, va->va_end); in __purge_vmap_area_lazy()
1426 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> in free_vmap_area_noflush()
1430 llist_add(&va->purge_list, &vmap_purge_list); in free_vmap_area_noflush()
1441 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
1442 unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start); in free_unmap_vmap_area()
1444 flush_tlb_kernel_range(va->va_start, va->va_end); in free_unmap_vmap_area()
1468 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1524 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); in addr_to_vb_idx()
1539 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1544 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1560 return ERR_PTR(-ENOMEM); in new_vmap_block()
1570 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
1571 spin_lock_init(&vb->lock); in new_vmap_block()
1572 vb->va = va; in new_vmap_block()
1575 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block()
1576 vb->dirty = 0; in new_vmap_block()
1577 vb->dirty_min = VMAP_BBMAP_BITS; in new_vmap_block()
1578 vb->dirty_max = 0; in new_vmap_block()
1579 INIT_LIST_HEAD(&vb->free_list); in new_vmap_block()
1581 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
1590 spin_lock(&vbq->lock); in new_vmap_block()
1591 list_add_tail_rcu(&vb->free_list, &vbq->free); in new_vmap_block()
1592 spin_unlock(&vbq->lock); in new_vmap_block()
1602 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block()
1605 free_vmap_area_noflush(vb->va); in free_vmap_block()
1617 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in purge_fragmented_blocks()
1619 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) in purge_fragmented_blocks()
1622 spin_lock(&vb->lock); in purge_fragmented_blocks()
1623 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { in purge_fragmented_blocks()
1624 vb->free = 0; /* prevent further allocs after releasing lock */ in purge_fragmented_blocks()
1625 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ in purge_fragmented_blocks()
1626 vb->dirty_min = 0; in purge_fragmented_blocks()
1627 vb->dirty_max = VMAP_BBMAP_BITS; in purge_fragmented_blocks()
1628 spin_lock(&vbq->lock); in purge_fragmented_blocks()
1629 list_del_rcu(&vb->free_list); in purge_fragmented_blocks()
1630 spin_unlock(&vbq->lock); in purge_fragmented_blocks()
1631 spin_unlock(&vb->lock); in purge_fragmented_blocks()
1632 list_add_tail(&vb->purge, &purge); in purge_fragmented_blocks()
1634 spin_unlock(&vb->lock); in purge_fragmented_blocks()
1639 list_del(&vb->purge); in purge_fragmented_blocks()
1673 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in vb_alloc()
1676 spin_lock(&vb->lock); in vb_alloc()
1677 if (vb->free < (1UL << order)) { in vb_alloc()
1678 spin_unlock(&vb->lock); in vb_alloc()
1682 pages_off = VMAP_BBMAP_BITS - vb->free; in vb_alloc()
1683 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
1684 vb->free -= 1UL << order; in vb_alloc()
1685 if (vb->free == 0) { in vb_alloc()
1686 spin_lock(&vbq->lock); in vb_alloc()
1687 list_del_rcu(&vb->free_list); in vb_alloc()
1688 spin_unlock(&vbq->lock); in vb_alloc()
1691 spin_unlock(&vb->lock); in vb_alloc()
1717 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; in vb_free()
1725 spin_lock(&vb->lock); in vb_free()
1728 vb->dirty_min = min(vb->dirty_min, offset); in vb_free()
1729 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); in vb_free()
1731 vb->dirty += 1UL << order; in vb_free()
1732 if (vb->dirty == VMAP_BBMAP_BITS) { in vb_free()
1733 BUG_ON(vb->free); in vb_free()
1734 spin_unlock(&vb->lock); in vb_free()
1737 spin_unlock(&vb->lock); in vb_free()
1754 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in _vm_unmap_aliases()
1755 spin_lock(&vb->lock); in _vm_unmap_aliases()
1756 if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { in _vm_unmap_aliases()
1757 unsigned long va_start = vb->va->va_start; in _vm_unmap_aliases()
1760 s = va_start + (vb->dirty_min << PAGE_SHIFT); in _vm_unmap_aliases()
1761 e = va_start + (vb->dirty_max << PAGE_SHIFT); in _vm_unmap_aliases()
1768 spin_unlock(&vb->lock); in _vm_unmap_aliases()
1781 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1803 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1829 debug_check_no_locks_freed((void *)va->va_start, in vm_unmap_ram()
1830 (va->va_end - va->va_start)); in vm_unmap_ram()
1836 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1842 * faster than vmap so it's good. But if you mix long-life and short-life
1845 * the end. Please use this function for short-lived objects.
1867 addr = va->va_start; in vm_map_ram()
1884 * vm_area_add_early - add vmap area early during boot
1885 * @vm: vm_struct to add
1887 * This function is used to add fixed kernel vm area to vmlist before
1888 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1893 void __init vm_area_add_early(struct vm_struct *vm) in vm_area_add_early() argument
1898 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { in vm_area_add_early()
1899 if (tmp->addr >= vm->addr) { in vm_area_add_early()
1900 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
1903 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
1905 vm->next = *p; in vm_area_add_early()
1906 *p = vm; in vm_area_add_early()
1910 * vm_area_register_early - register vmap area early during boot
1911 * @vm: vm_struct to register
1914 * This function is used to register kernel vm area before
1915 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1917 * vm->addr contains the allocated address.
1921 void __init vm_area_register_early(struct vm_struct *vm, size_t align) in vm_area_register_early() argument
1927 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; in vm_area_register_early()
1929 vm->addr = (void *)addr; in vm_area_register_early()
1931 vm_area_add_early(vm); in vm_area_register_early()
1942 * -|-----|.....|-----|-----|-----|.....|- in vmap_init_free_space()
1944 * |<--------------------------------->| in vmap_init_free_space()
1947 if (busy->va_start - vmap_start > 0) { in vmap_init_free_space()
1950 free->va_start = vmap_start; in vmap_init_free_space()
1951 free->va_end = busy->va_start; in vmap_init_free_space()
1959 vmap_start = busy->va_end; in vmap_init_free_space()
1962 if (vmap_end - vmap_start > 0) { in vmap_init_free_space()
1965 free->va_start = vmap_start; in vmap_init_free_space()
1966 free->va_end = vmap_end; in vmap_init_free_space()
1991 spin_lock_init(&vbq->lock); in vmalloc_init()
1992 INIT_LIST_HEAD(&vbq->free); in vmalloc_init()
1994 init_llist_head(&p->list); in vmalloc_init()
1995 INIT_WORK(&p->wq, free_work); in vmalloc_init()
1999 for (tmp = vmlist; tmp; tmp = tmp->next) { in vmalloc_init()
2004 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
2005 va->va_end = va->va_start + tmp->size; in vmalloc_init()
2006 va->vm = tmp; in vmalloc_init()
2018 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2019 * @addr: start of the VM area to unmap
2020 * @size: size of the VM area to unmap
2034 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, in setup_vmalloc_vm_locked() argument
2037 vm->flags = flags; in setup_vmalloc_vm_locked()
2038 vm->addr = (void *)va->va_start; in setup_vmalloc_vm_locked()
2039 vm->size = va->va_end - va->va_start; in setup_vmalloc_vm_locked()
2040 vm->caller = caller; in setup_vmalloc_vm_locked()
2041 va->vm = vm; in setup_vmalloc_vm_locked()
2042 trace_android_vh_save_vmalloc_stack(flags, vm); in setup_vmalloc_vm_locked()
2045 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, in setup_vmalloc_vm() argument
2049 setup_vmalloc_vm_locked(vm, va, flags, caller); in setup_vmalloc_vm()
2053 static void clear_vm_uninitialized_flag(struct vm_struct *vm) in clear_vm_uninitialized_flag() argument
2057 * we should make sure that vm has proper values. in clear_vm_uninitialized_flag()
2061 vm->flags &= ~VM_UNINITIALIZED; in clear_vm_uninitialized_flag()
2094 kasan_unpoison_vmalloc((void *)va->va_start, requested_size); in __get_vm_area_node()
2111 * get_vm_area - reserve a contiguous kernel virtual area
2136 * find_vm_area - find a continuous kernel virtual area
2139 * Search for the kernel VM area starting at @addr, and return it.
2153 return va->vm; in find_vm_area()
2157 * remove_vm_area - find and remove a continuous kernel virtual area
2160 * Search for the kernel VM area starting at @addr, and remove it.
2161 * This function returns the found VM area, but using it is NOT safe
2174 if (va && va->vm) { in remove_vm_area()
2175 struct vm_struct *vm = va->vm; in remove_vm_area() local
2177 trace_android_vh_remove_vmalloc_stack(vm); in remove_vm_area()
2178 va->vm = NULL; in remove_vm_area()
2181 kasan_free_shadow(vm); in remove_vm_area()
2184 return vm; in remove_vm_area()
2196 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2197 if (page_address(area->pages[i])) in set_area_direct_map()
2198 set_direct_map(area->pages[i]); in set_area_direct_map()
2201 /* Handle removing and resetting vm mappings related to the vm_struct. */
2205 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; in vm_remove_mappings()
2209 remove_vm_area(area->addr); in vm_remove_mappings()
2216 * If not deallocating pages, just do the flush of the VM area and in vm_remove_mappings()
2225 * If execution gets here, flush the vm mapping and reset the direct in vm_remove_mappings()
2226 * map. Find the start and end range of the direct mappings to make sure in vm_remove_mappings()
2227 * the vm_unmap_aliases() flush includes the direct map. in vm_remove_mappings()
2229 for (i = 0; i < area->nr_pages; i++) { in vm_remove_mappings()
2230 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_remove_mappings()
2239 * Set direct map to something invalid so that it won't be cached if in vm_remove_mappings()
2241 * reset the direct map permissions to the default. in vm_remove_mappings()
2261 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", in __vunmap()
2266 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2267 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2269 kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); in __vunmap()
2276 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
2277 struct page *page = area->pages[i]; in __vunmap()
2282 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2284 kvfree(area->pages); in __vunmap()
2301 if (llist_add((struct llist_node *)addr, &p->list)) in __vfree_deferred()
2302 schedule_work(&p->wq); in __vfree_deferred()
2306 * vfree_atomic - release memory allocated by vmalloc()
2332 * vfree - Release memory allocated by vmalloc()
2346 * conventions for vfree() arch-depenedent would be a really bad idea).
2364 * vunmap - release virtual mapping obtained by vmap()
2382 * vmap - map an array of pages into virtually contiguous space
2384 * @count: number of pages to map
2385 * @flags: vm_area->flags
2412 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), in vmap()
2414 vunmap(area->addr); in vmap()
2419 area->pages = pages; in vmap()
2420 area->nr_pages = count; in vmap()
2422 return area->addr; in vmap()
2437 if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) in vmap_pfn_apply()
2438 return -EINVAL; in vmap_pfn_apply()
2439 *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); in vmap_pfn_apply()
2444 * vmap_pfn - map an array of PFNs into virtually contiguous space
2446 * @count: number of pages to map
2461 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2466 return area->addr; in vmap_pfn()
2486 area->caller); in __vmalloc_area_node()
2492 remove_vm_area(area->addr); in __vmalloc_area_node()
2497 area->pages = pages; in __vmalloc_area_node()
2498 area->nr_pages = nr_pages; in __vmalloc_area_node()
2500 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node()
2510 area->nr_pages = i; in __vmalloc_area_node()
2511 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2514 area->pages[i] = page; in __vmalloc_area_node()
2518 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2520 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), in __vmalloc_area_node()
2524 return area->addr; in __vmalloc_area_node()
2529 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node()
2530 __vfree(area->addr); in __vmalloc_area_node()
2535 * __vmalloc_node_range - allocate virtually contiguous memory
2538 * @start: vm area range start
2539 * @end: vm area range end
2542 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2547 * allocator with @gfp_mask flags. Map them into contiguous
2592 * __vmalloc_node - allocate virtually contiguous memory
2600 * @gfp_mask flags. Map them into contiguous kernel virtual space.
2602 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2633 * vmalloc - allocate virtually contiguous memory
2637 * allocator and map them into contiguous kernel virtual space.
2652 * vzalloc - allocate virtually contiguous memory with zero fill
2656 * allocator and map them into contiguous kernel virtual space.
2672 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2690 * vmalloc_node - allocate memory on a specific node
2695 * allocator and map them into contiguous kernel virtual space.
2710 * vzalloc_node - allocate memory on a specific node with zero fill
2715 * allocator and map them into contiguous kernel virtual space.
2740 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2744 * page level allocator and map them into contiguous kernel virtual space.
2756 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2787 length = PAGE_SIZE - offset; in aligned_vread()
2803 void *map = kmap_atomic(p); in aligned_vread() local
2804 memcpy(buf, map + offset, length); in aligned_vread()
2805 kunmap_atomic(map); in aligned_vread()
2812 count -= length; in aligned_vread()
2826 length = PAGE_SIZE - offset; in aligned_vwrite()
2842 void *map = kmap_atomic(p); in aligned_vwrite() local
2843 memcpy(map + offset, buf, length); in aligned_vwrite()
2844 kunmap_atomic(map); in aligned_vwrite()
2849 count -= length; in aligned_vwrite()
2855 * vread() - read vmalloc area in a safe way.
2857 * @addr: vm address.
2863 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2881 struct vm_struct *vm; in vread() local
2888 count = -(unsigned long) addr; in vread()
2895 if (!va->vm) in vread()
2898 vm = va->vm; in vread()
2899 vaddr = (char *) vm->addr; in vread()
2900 if (addr >= vaddr + get_vm_area_size(vm)) in vread()
2908 count--; in vread()
2910 n = vaddr + get_vm_area_size(vm) - addr; in vread()
2913 if (!(vm->flags & VM_IOREMAP)) in vread()
2919 count -= n; in vread()
2926 /* zero-fill memory holes */ in vread()
2928 memset(buf, 0, buflen - (buf - buf_start)); in vread()
2934 * vwrite() - write vmalloc area in a safe way.
2936 * @addr: vm address.
2960 struct vm_struct *vm; in vwrite() local
2967 count = -(unsigned long) addr; in vwrite()
2975 if (!va->vm) in vwrite()
2978 vm = va->vm; in vwrite()
2979 vaddr = (char *) vm->addr; in vwrite()
2980 if (addr >= vaddr + get_vm_area_size(vm)) in vwrite()
2987 count--; in vwrite()
2989 n = vaddr + get_vm_area_size(vm) - addr; in vwrite()
2992 if (!(vm->flags & VM_IOREMAP)) { in vwrite()
2998 count -= n; in vwrite()
3008 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3013 * @size: size of map area
3015 * Returns: 0 for success, -Exxx on failure
3033 return -EINVAL; in remap_vmalloc_range_partial()
3038 return -EINVAL; in remap_vmalloc_range_partial()
3042 return -EINVAL; in remap_vmalloc_range_partial()
3044 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3045 return -EINVAL; in remap_vmalloc_range_partial()
3049 return -EINVAL; in remap_vmalloc_range_partial()
3062 size -= PAGE_SIZE; in remap_vmalloc_range_partial()
3065 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in remap_vmalloc_range_partial()
3072 * remap_vmalloc_range - map vmalloc pages to userspace
3073 * @vma: vma to cover (map full range of vma)
3075 * @pgoff: number of pages into addr before first page to map
3077 * Returns: 0 for success, -Exxx on failure
3088 return remap_vmalloc_range_partial(vma, vma->vm_start, in remap_vmalloc_range()
3090 vma->vm_end - vma->vm_start); in remap_vmalloc_range()
3097 ret = remove_vm_area(area->addr); in free_vm_area()
3110 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3115 * i.e. va->va_start < addr && va->va_end < addr or NULL
3129 if (tmp->va_start <= addr) { in pvm_find_va_enclose_addr()
3131 if (tmp->va_end >= addr) in pvm_find_va_enclose_addr()
3134 n = n->rb_right; in pvm_find_va_enclose_addr()
3136 n = n->rb_left; in pvm_find_va_enclose_addr()
3144 * pvm_determine_end_from_reverse - find the highest aligned address
3147 * in - the VA we start the search(reverse order);
3148 * out - the VA with the highest aligned end address.
3155 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); in pvm_determine_end_from_reverse()
3161 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
3162 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
3171 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3180 * Percpu allocator wants to use congruent vm areas so that it can
3188 * does everything top-down and scans free blocks from the end looking
3199 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); in pcpu_get_vm_areas()
3230 if (vmalloc_end - vmalloc_start < last_end) { in pcpu_get_vm_areas()
3249 /* start scanning - we scan from the top, begin with the last area */ in pcpu_get_vm_areas()
3255 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3275 if (base + end > va->va_end) { in pcpu_get_vm_areas()
3276 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3284 if (base + start < va->va_start) { in pcpu_get_vm_areas()
3285 va = node_to_va(rb_prev(&va->rb_node)); in pcpu_get_vm_areas()
3286 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3295 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
3327 va->va_start = start; in pcpu_get_vm_areas()
3328 va->va_end = start + size; in pcpu_get_vm_areas()
3335 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
3338 kasan_unpoison_vmalloc((void *)vas[area]->va_start, in pcpu_get_vm_areas()
3342 /* insert all vm's */ in pcpu_get_vm_areas()
3362 while (area--) { in pcpu_get_vm_areas()
3363 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3364 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3369 va->va_start, va->va_end); in pcpu_get_vm_areas()
3413 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3414 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3419 va->va_start, va->va_end); in pcpu_get_vm_areas()
3430 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3473 unsigned int nr, *counters = m->private; in show_numa_info()
3478 if (v->flags & VM_UNINITIALIZED) in show_numa_info()
3485 for (nr = 0; nr < v->nr_pages; nr++) in show_numa_info()
3486 counters[page_to_nid(v->pages[nr])]++; in show_numa_info()
3504 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", in show_purge_info()
3505 (void *)va->va_start, (void *)va->va_end, in show_purge_info()
3506 va->va_end - va->va_start); in show_purge_info()
3518 * s_show can encounter race with remove_vm_area, !vm on behalf in s_show()
3521 if (!va->vm) { in s_show()
3522 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", in s_show()
3523 (void *)va->va_start, (void *)va->va_end, in s_show()
3524 va->va_end - va->va_start); in s_show()
3529 v = va->vm; in s_show()
3531 seq_printf(m, "0x%pK-0x%pK %7ld", in s_show()
3532 v->addr, v->addr + v->size, v->size); in s_show()
3534 if (v->caller) in s_show()
3535 seq_printf(m, " %pS", v->caller); in s_show()
3537 if (v->nr_pages) in s_show()
3538 seq_printf(m, " pages=%d", v->nr_pages); in s_show()
3540 if (v->phys_addr) in s_show()
3541 seq_printf(m, " phys=%pa", &v->phys_addr); in s_show()
3543 if (v->flags & VM_IOREMAP) in s_show()
3546 if (v->flags & VM_ALLOC) in s_show()
3549 if (v->flags & VM_MAP) in s_show()
3552 if (v->flags & VM_USERMAP) in s_show()
3555 if (v->flags & VM_DMA_COHERENT) in s_show()
3556 seq_puts(m, " dma-coherent"); in s_show()
3558 if (is_vmalloc_addr(v->pages)) in s_show()
3571 if (list_is_last(&va->list, &vmap_area_list)) in s_show()