Lines Matching full:area

158  * unmap_kernel_range_noflush - unmap kernel VM area
159 * @start: start of the VM area to unmap
160 * @size: size of the VM area to unmap
162 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
276 * map_kernel_range_noflush - map kernel VM area with the specified pages
277 * @addr: start of the VM area to map
278 * @size: size of the VM area to map
282 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
444 * find a lowest match of free area.
737 * free area is inserted. If VA has been merged, it is
785 /* Point to the new merged area. */ in merge_or_add_vmap_area()
816 /* Point to the new merged area. */ in merge_or_add_vmap_area()
1086 * Returns a start address of the newly allocated area, if success.
1185 * when fit type of free area is NE_FIT_TYPE. Please note, it in alloc_vmap_area()
1367 * Finally insert or merge lazily-freed area. It is in __purge_vmap_area_lazy()
1414 * Free a vmap area, caller ensuring that the area has been unmapped
1437 * Free and unmap a vmap area
1884 * vm_area_add_early - add vmap area early during boot
1887 * This function is used to add fixed kernel vm area to vmlist before
1910 * vm_area_register_early - register vmap area early during boot
1914 * This function is used to register kernel vm area before
2018 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2019 * @addr: start of the VM area to unmap
2020 * @size: size of the VM area to unmap
2069 struct vm_struct *area; in __get_vm_area_node() local
2081 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
2082 if (unlikely(!area)) in __get_vm_area_node()
2090 kfree(area); in __get_vm_area_node()
2096 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
2098 return area; in __get_vm_area_node()
2111 * get_vm_area - reserve a contiguous kernel virtual area
2112 * @size: size of the area
2115 * Search an area of @size in the kernel virtual mapping area,
2116 * and reserved it for out purposes. Returns the area descriptor
2119 * Return: the area descriptor on success or %NULL on failure.
2136 * find_vm_area - find a continuous kernel virtual area
2139 * Search for the kernel VM area starting at @addr, and return it.
2143 * Return: the area descriptor on success or %NULL on failure.
2157 * remove_vm_area - find and remove a continuous kernel virtual area
2160 * Search for the kernel VM area starting at @addr, and remove it.
2161 * This function returns the found VM area, but using it is NOT safe
2164 * Return: the area descriptor on success or %NULL on failure.
2191 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument
2196 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2197 if (page_address(area->pages[i])) in set_area_direct_map()
2198 set_direct_map(area->pages[i]); in set_area_direct_map()
2202 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) in vm_remove_mappings() argument
2205 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; in vm_remove_mappings()
2209 remove_vm_area(area->addr); in vm_remove_mappings()
2216 * If not deallocating pages, just do the flush of the VM area and in vm_remove_mappings()
2229 for (i = 0; i < area->nr_pages; i++) { in vm_remove_mappings()
2230 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_remove_mappings()
2243 set_area_direct_map(area, set_direct_map_invalid_noflush); in vm_remove_mappings()
2245 set_area_direct_map(area, set_direct_map_default_noflush); in vm_remove_mappings()
2250 struct vm_struct *area; in __vunmap() local
2259 area = find_vm_area(addr); in __vunmap()
2260 if (unlikely(!area)) { in __vunmap()
2261 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", in __vunmap()
2266 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2267 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2269 kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); in __vunmap()
2271 vm_remove_mappings(area, deallocate_pages); in __vunmap()
2276 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
2277 struct page *page = area->pages[i]; in __vunmap()
2282 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2284 kvfree(area->pages); in __vunmap()
2287 kfree(area); in __vunmap()
2335 * Free the virtually continuous memory area starting at @addr, as obtained
2367 * Free the virtually contiguous memory area starting at @addr,
2394 * Return: the address of the area or %NULL on failure
2399 struct vm_struct *area; in vmap() local
2408 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); in vmap()
2409 if (!area) in vmap()
2412 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), in vmap()
2414 vunmap(area->addr); in vmap()
2419 area->pages = pages; in vmap()
2420 area->nr_pages = count; in vmap()
2422 return area->addr; in vmap()
2455 struct vm_struct *area; in vmap_pfn() local
2457 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, in vmap_pfn()
2459 if (!area) in vmap_pfn()
2461 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2463 free_vm_area(area); in vmap_pfn()
2466 return area->addr; in vmap_pfn()
2471 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
2475 unsigned int nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; in __vmalloc_area_node()
2486 area->caller); in __vmalloc_area_node()
2492 remove_vm_area(area->addr); in __vmalloc_area_node()
2493 kfree(area); in __vmalloc_area_node()
2497 area->pages = pages; in __vmalloc_area_node()
2498 area->nr_pages = nr_pages; in __vmalloc_area_node()
2500 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node()
2510 area->nr_pages = i; in __vmalloc_area_node()
2511 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2514 area->pages[i] = page; in __vmalloc_area_node()
2518 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2520 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), in __vmalloc_area_node()
2524 return area->addr; in __vmalloc_area_node()
2529 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node()
2530 __vfree(area->addr); in __vmalloc_area_node()
2538 * @start: vm area range start
2539 * @end: vm area range end
2542 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2550 * Return: the address of the area or %NULL on failure
2557 struct vm_struct *area; in __vmalloc_node_range() local
2565 area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED | in __vmalloc_node_range()
2567 if (!area) in __vmalloc_node_range()
2570 addr = __vmalloc_area_node(area, gfp_mask, prot, node); in __vmalloc_node_range()
2579 clear_vm_uninitialized_flag(area); in __vmalloc_node_range()
2581 kmemleak_vmalloc(area, size, gfp_mask); in __vmalloc_node_range()
2675 * The resulting memory area is zeroed so it can be mapped to userspace
2759 * The resulting memory area is 32bit addressable and zeroed so it can be
2792 * To do safe access to this _mapped_ area, we need in aligned_vread()
2831 * To do safe access to this _mapped_ area, we need in aligned_vwrite()
2855 * vread() - read vmalloc area in a safe way.
2860 * This function checks that addr is a valid vmalloc'ed area, and
2861 * copy data from that area to a given buffer. If the given memory range
2863 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2864 * IOREMAP area is treated as memory hole and no copy is done.
2867 * vm_struct area, returns 0. @buf should be kernel's buffer.
2870 * should know vmalloc() area is valid and can use memcpy().
2871 * This is for routines which have to access vmalloc area without
2876 * include any intersection with valid vmalloc area
2915 else /* IOREMAP area is treated as memory hole */ in vread()
2934 * vwrite() - write vmalloc area in a safe way.
2939 * This function checks that addr is a valid vmalloc'ed area, and
2942 * proper area of @buf. If there are memory holes, no copy to hole.
2943 * IOREMAP area is treated as memory hole and no copy is done.
2946 * vm_struct area, returns 0. @buf should be kernel's buffer.
2949 * should know vmalloc() area is valid and can use memcpy().
2950 * This is for routines which have to access vmalloc area without
2955 * doesn't include any intersection with valid vmalloc area
3013 * @size: size of map area
3017 * This function checks that @kaddr is a valid vmalloc'ed area,
3028 struct vm_struct *area; in remap_vmalloc_range_partial() local
3040 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
3041 if (!area) in remap_vmalloc_range_partial()
3044 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3048 end_index > get_vm_area_size(area)) in remap_vmalloc_range_partial()
3079 * This function checks that addr is a valid vmalloc'ed area, and
3094 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
3097 ret = remove_vm_area(area->addr); in free_vm_area()
3098 BUG_ON(ret != area); in free_vm_area()
3099 kfree(area); in free_vm_area()
3113 * Returns: vmap_area if it is found. If there is no such area
3172 * @offsets: array containing offset of each area
3173 * @sizes: array containing size of each area
3190 * base address is pulled down to fit the area. Scanning is repeated till
3202 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
3209 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3210 start = offsets[area]; in pcpu_get_vm_areas()
3211 end = start + sizes[area]; in pcpu_get_vm_areas()
3214 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
3215 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
3217 /* detect the area with the highest address */ in pcpu_get_vm_areas()
3219 last_area = area; in pcpu_get_vm_areas()
3221 for (area2 = area + 1; area2 < nr_vms; area2++) { in pcpu_get_vm_areas()
3240 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3241 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); in pcpu_get_vm_areas()
3242 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
3243 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
3249 /* start scanning - we scan from the top, begin with the last area */ in pcpu_get_vm_areas()
3250 area = term_area = last_area; in pcpu_get_vm_areas()
3251 start = offsets[area]; in pcpu_get_vm_areas()
3252 end = start + sizes[area]; in pcpu_get_vm_areas()
3277 term_area = area; in pcpu_get_vm_areas()
3287 term_area = area; in pcpu_get_vm_areas()
3292 * This area fits, move on to the previous one. If in pcpu_get_vm_areas()
3295 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
3296 if (area == term_area) in pcpu_get_vm_areas()
3299 start = offsets[area]; in pcpu_get_vm_areas()
3300 end = start + sizes[area]; in pcpu_get_vm_areas()
3305 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3308 start = base + offsets[area]; in pcpu_get_vm_areas()
3309 size = sizes[area]; in pcpu_get_vm_areas()
3325 /* Allocated area. */ in pcpu_get_vm_areas()
3326 va = vas[area]; in pcpu_get_vm_areas()
3334 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3335 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
3338 kasan_unpoison_vmalloc((void *)vas[area]->va_start, in pcpu_get_vm_areas()
3339 sizes[area]); in pcpu_get_vm_areas()
3344 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3345 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); in pcpu_get_vm_areas()
3347 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
3362 while (area--) { in pcpu_get_vm_areas()
3363 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3364 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3365 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
3370 vas[area] = NULL; in pcpu_get_vm_areas()
3380 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3381 if (vas[area]) in pcpu_get_vm_areas()
3384 vas[area] = kmem_cache_zalloc( in pcpu_get_vm_areas()
3386 if (!vas[area]) in pcpu_get_vm_areas()
3394 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3395 if (vas[area]) in pcpu_get_vm_areas()
3396 kmem_cache_free(vmap_area_cachep, vas[area]); in pcpu_get_vm_areas()
3398 kfree(vms[area]); in pcpu_get_vm_areas()
3412 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3413 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3414 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3415 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
3420 vas[area] = NULL; in pcpu_get_vm_areas()
3421 kfree(vms[area]); in pcpu_get_vm_areas()
3519 * of vmap area is being tear down or vm_map_ram allocation. in s_show()