Lines Matching refs:resv

249 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)  in get_file_region_entry_from_cache()  argument
253 VM_BUG_ON(resv->region_cache_count <= 0); in get_file_region_entry_from_cache()
255 resv->region_cache_count--; in get_file_region_entry_from_cache()
256 nrg = list_first_entry(&resv->region_cache, struct file_region, link); in get_file_region_entry_from_cache()
279 struct resv_map *resv, in record_hugetlb_cgroup_uncharge_info() argument
298 if (!resv->pages_per_hpage) in record_hugetlb_cgroup_uncharge_info()
299 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
303 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
332 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) in coalesce_file_region() argument
337 if (&prg->link != &resv->regions && prg->to == rg->from && in coalesce_file_region()
349 if (&nrg->link != &resv->regions && nrg->from == rg->to && in coalesce_file_region()
367 static long add_reservation_in_range(struct resv_map *resv, long f, long t, in add_reservation_in_range() argument
372 struct list_head *head = &resv->regions; in add_reservation_in_range()
407 resv, last_accounted_offset, rg->from); in add_reservation_in_range()
409 resv, nrg); in add_reservation_in_range()
411 coalesce_file_region(resv, nrg); in add_reservation_in_range()
426 resv, last_accounted_offset, t); in add_reservation_in_range()
427 record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg); in add_reservation_in_range()
429 coalesce_file_region(resv, nrg); in add_reservation_in_range()
440 static int allocate_file_region_entries(struct resv_map *resv, in allocate_file_region_entries() argument
442 __must_hold(&resv->lock) in allocate_file_region_entries()
461 while (resv->region_cache_count < in allocate_file_region_entries()
462 (resv->adds_in_progress + regions_needed)) { in allocate_file_region_entries()
463 to_allocate = resv->adds_in_progress + regions_needed - in allocate_file_region_entries()
464 resv->region_cache_count; in allocate_file_region_entries()
470 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); in allocate_file_region_entries()
472 spin_unlock(&resv->lock); in allocate_file_region_entries()
480 spin_lock(&resv->lock); in allocate_file_region_entries()
482 list_splice(&allocated_regions, &resv->region_cache); in allocate_file_region_entries()
483 resv->region_cache_count += to_allocate; in allocate_file_region_entries()
513 static long region_add(struct resv_map *resv, long f, long t, in region_add() argument
519 spin_lock(&resv->lock); in region_add()
523 add_reservation_in_range(resv, f, t, NULL, NULL, in region_add()
536 resv->region_cache_count < in region_add()
537 resv->adds_in_progress + in region_add()
545 resv, actual_regions_needed - in_regions_needed)) { in region_add()
552 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add()
554 resv->adds_in_progress -= in_regions_needed; in region_add()
556 spin_unlock(&resv->lock); in region_add()
581 static long region_chg(struct resv_map *resv, long f, long t, in region_chg() argument
586 spin_lock(&resv->lock); in region_chg()
589 chg = add_reservation_in_range(resv, f, t, NULL, NULL, in region_chg()
595 if (allocate_file_region_entries(resv, *out_regions_needed)) in region_chg()
598 resv->adds_in_progress += *out_regions_needed; in region_chg()
600 spin_unlock(&resv->lock); in region_chg()
617 static void region_abort(struct resv_map *resv, long f, long t, in region_abort() argument
620 spin_lock(&resv->lock); in region_abort()
621 VM_BUG_ON(!resv->region_cache_count); in region_abort()
622 resv->adds_in_progress -= regions_needed; in region_abort()
623 spin_unlock(&resv->lock); in region_abort()
640 static long region_del(struct resv_map *resv, long f, long t) in region_del() argument
642 struct list_head *head = &resv->regions; in region_del()
648 spin_lock(&resv->lock); in region_del()
669 resv->region_cache_count > resv->adds_in_progress) { in region_del()
670 nrg = list_first_entry(&resv->region_cache, in region_del()
674 resv->region_cache_count--; in region_del()
678 spin_unlock(&resv->lock); in region_del()
687 resv, rg, t - f, false); in region_del()
707 hugetlb_cgroup_uncharge_file_region(resv, rg, in region_del()
715 hugetlb_cgroup_uncharge_file_region(resv, rg, in region_del()
721 hugetlb_cgroup_uncharge_file_region(resv, rg, in region_del()
729 spin_unlock(&resv->lock); in region_del()
767 static long region_count(struct resv_map *resv, long f, long t) in region_count() argument
769 struct list_head *head = &resv->regions; in region_count()
773 spin_lock(&resv->lock); in region_count()
789 spin_unlock(&resv->lock); in region_count()
2182 struct resv_map *resv; in __vma_reservation_common() local
2187 resv = vma_resv_map(vma); in __vma_reservation_common()
2188 if (!resv) in __vma_reservation_common()
2194 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); in __vma_reservation_common()
2202 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); in __vma_reservation_common()
2207 region_abort(resv, idx, idx + 1, 1); in __vma_reservation_common()
2212 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); in __vma_reservation_common()
2216 region_abort(resv, idx, idx + 1, 1); in __vma_reservation_common()
2217 ret = region_del(resv, idx, idx + 1); in __vma_reservation_common()
3653 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_open() local
3663 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { in hugetlb_vm_op_open()
3664 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); in hugetlb_vm_op_open()
3665 kref_get(&resv->refs); in hugetlb_vm_op_open()
3672 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_close() local
3677 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_close()
3683 reserve = (end - start) - region_count(resv, start, end); in hugetlb_vm_op_close()
3684 hugetlb_cgroup_uncharge_counter(resv, start, end); in hugetlb_vm_op_close()
3694 kref_put(&resv->refs, resv_map_release); in hugetlb_vm_op_close()