Lines Matching refs:si
128 static int __try_to_reclaim_swap(struct swap_info_struct *si, in __try_to_reclaim_swap() argument
131 swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap()
172 static int discard_swap(struct swap_info_struct *si) in discard_swap() argument
180 se = first_se(si); in discard_swap()
184 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
195 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
242 static void discard_swap_cluster(struct swap_info_struct *si, in discard_swap_cluster() argument
245 struct swap_extent *se = offset_to_swap_extent(si, start_page); in discard_swap_cluster()
259 if (blkdev_issue_discard(si->bdev, start_block, in discard_swap_cluster()
352 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, in lock_cluster() argument
357 ci = si->cluster_info; in lock_cluster()
376 struct swap_info_struct *si, unsigned long offset) in lock_cluster_or_swap_info() argument
381 ci = lock_cluster(si, offset); in lock_cluster_or_swap_info()
384 spin_lock(&si->lock); in lock_cluster_or_swap_info()
389 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, in unlock_cluster_or_swap_info() argument
395 spin_unlock(&si->lock); in unlock_cluster_or_swap_info()
454 static void swap_cluster_schedule_discard(struct swap_info_struct *si, in swap_cluster_schedule_discard() argument
463 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_cluster_schedule_discard()
466 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); in swap_cluster_schedule_discard()
468 schedule_work(&si->discard_work); in swap_cluster_schedule_discard()
471 static void __free_cluster(struct swap_info_struct *si, unsigned long idx) in __free_cluster() argument
473 struct swap_cluster_info *ci = si->cluster_info; in __free_cluster()
476 cluster_list_add_tail(&si->free_clusters, ci, idx); in __free_cluster()
483 static void swap_do_scheduled_discard(struct swap_info_struct *si) in swap_do_scheduled_discard() argument
488 info = si->cluster_info; in swap_do_scheduled_discard()
490 while (!cluster_list_empty(&si->discard_clusters)) { in swap_do_scheduled_discard()
491 idx = cluster_list_del_first(&si->discard_clusters, info); in swap_do_scheduled_discard()
492 spin_unlock(&si->lock); in swap_do_scheduled_discard()
494 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
497 spin_lock(&si->lock); in swap_do_scheduled_discard()
498 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); in swap_do_scheduled_discard()
499 __free_cluster(si, idx); in swap_do_scheduled_discard()
500 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
508 struct swap_info_struct *si; in swap_discard_work() local
510 si = container_of(work, struct swap_info_struct, discard_work); in swap_discard_work()
512 spin_lock(&si->lock); in swap_discard_work()
513 swap_do_scheduled_discard(si); in swap_discard_work()
514 spin_unlock(&si->lock); in swap_discard_work()
517 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) in alloc_cluster() argument
519 struct swap_cluster_info *ci = si->cluster_info; in alloc_cluster()
521 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); in alloc_cluster()
522 cluster_list_del_first(&si->free_clusters, ci); in alloc_cluster()
526 static void free_cluster(struct swap_info_struct *si, unsigned long idx) in free_cluster() argument
528 struct swap_cluster_info *ci = si->cluster_info + idx; in free_cluster()
536 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == in free_cluster()
538 swap_cluster_schedule_discard(si, idx); in free_cluster()
542 __free_cluster(si, idx); in free_cluster()
590 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, in scan_swap_map_ssd_cluster_conflict() argument
597 conflict = !cluster_list_empty(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
598 offset != cluster_list_first(&si->free_clusters) && in scan_swap_map_ssd_cluster_conflict()
599 cluster_is_free(&si->cluster_info[offset]); in scan_swap_map_ssd_cluster_conflict()
604 percpu_cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_ssd_cluster_conflict()
613 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, in scan_swap_map_try_ssd_cluster() argument
621 cluster = this_cpu_ptr(si->percpu_cluster); in scan_swap_map_try_ssd_cluster()
623 if (!cluster_list_empty(&si->free_clusters)) { in scan_swap_map_try_ssd_cluster()
624 cluster->index = si->free_clusters.head; in scan_swap_map_try_ssd_cluster()
627 } else if (!cluster_list_empty(&si->discard_clusters)) { in scan_swap_map_try_ssd_cluster()
633 swap_do_scheduled_discard(si); in scan_swap_map_try_ssd_cluster()
634 *scan_base = this_cpu_read(*si->cluster_next_cpu); in scan_swap_map_try_ssd_cluster()
646 max = min_t(unsigned long, si->max, in scan_swap_map_try_ssd_cluster()
649 ci = lock_cluster(si, tmp); in scan_swap_map_try_ssd_cluster()
651 if (!si->swap_map[tmp]) in scan_swap_map_try_ssd_cluster()
682 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, in swap_range_alloc() argument
687 if (offset == si->lowest_bit) in swap_range_alloc()
688 si->lowest_bit += nr_entries; in swap_range_alloc()
689 if (end == si->highest_bit) in swap_range_alloc()
690 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries); in swap_range_alloc()
691 si->inuse_pages += nr_entries; in swap_range_alloc()
692 if (si->inuse_pages == si->pages) { in swap_range_alloc()
693 si->lowest_bit = si->max; in swap_range_alloc()
694 si->highest_bit = 0; in swap_range_alloc()
695 del_from_avail_list(si); in swap_range_alloc()
711 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, in swap_range_free() argument
719 if (offset < si->lowest_bit) in swap_range_free()
720 si->lowest_bit = offset; in swap_range_free()
721 if (end > si->highest_bit) { in swap_range_free()
722 bool was_full = !si->highest_bit; in swap_range_free()
724 WRITE_ONCE(si->highest_bit, end); in swap_range_free()
725 if (was_full && (si->flags & SWP_WRITEOK)) in swap_range_free()
726 add_to_avail_list(si); in swap_range_free()
728 trace_android_vh_account_swap_pages(si, &skip); in swap_range_free()
731 si->inuse_pages -= nr_entries; in swap_range_free()
732 if (si->flags & SWP_BLKDEV) in swap_range_free()
734 si->bdev->bd_disk->fops->swap_slot_free_notify; in swap_range_free()
738 arch_swap_invalidate_page(si->type, offset); in swap_range_free()
739 frontswap_invalidate_page(si->type, offset); in swap_range_free()
741 swap_slot_free_notify(si->bdev, offset); in swap_range_free()
744 clear_shadow_from_swap_cache(si->type, begin, end); in swap_range_free()
747 static void set_cluster_next(struct swap_info_struct *si, unsigned long next) in set_cluster_next() argument
751 if (!(si->flags & SWP_SOLIDSTATE)) { in set_cluster_next()
752 si->cluster_next = next; in set_cluster_next()
756 prev = this_cpu_read(*si->cluster_next_cpu); in set_cluster_next()
765 if (si->highest_bit <= si->lowest_bit) in set_cluster_next()
767 next = si->lowest_bit + in set_cluster_next()
768 prandom_u32_max(si->highest_bit - si->lowest_bit + 1); in set_cluster_next()
770 next = max_t(unsigned int, next, si->lowest_bit); in set_cluster_next()
772 this_cpu_write(*si->cluster_next_cpu, next); in set_cluster_next()
775 int scan_swap_map_slots(struct swap_info_struct *si, in scan_swap_map_slots() argument
798 si->flags += SWP_SCANNING; in scan_swap_map_slots()
804 if (si->flags & SWP_SOLIDSTATE) in scan_swap_map_slots()
805 scan_base = this_cpu_read(*si->cluster_next_cpu); in scan_swap_map_slots()
807 scan_base = si->cluster_next; in scan_swap_map_slots()
811 if (si->cluster_info) { in scan_swap_map_slots()
812 if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) in scan_swap_map_slots()
814 } else if (unlikely(!si->cluster_nr--)) { in scan_swap_map_slots()
815 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { in scan_swap_map_slots()
816 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
820 spin_unlock(&si->lock); in scan_swap_map_slots()
828 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
832 for (; last_in_cluster <= si->highest_bit; offset++) { in scan_swap_map_slots()
833 if (si->swap_map[offset]) in scan_swap_map_slots()
836 spin_lock(&si->lock); in scan_swap_map_slots()
838 si->cluster_next = offset; in scan_swap_map_slots()
839 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
849 spin_lock(&si->lock); in scan_swap_map_slots()
850 si->cluster_nr = SWAPFILE_CLUSTER - 1; in scan_swap_map_slots()
854 if (si->cluster_info) { in scan_swap_map_slots()
855 while (scan_swap_map_ssd_cluster_conflict(si, offset)) { in scan_swap_map_slots()
859 if (!scan_swap_map_try_ssd_cluster(si, &offset, in scan_swap_map_slots()
864 if (!(si->flags & SWP_WRITEOK)) in scan_swap_map_slots()
866 if (!si->highest_bit) in scan_swap_map_slots()
868 if (offset > si->highest_bit) in scan_swap_map_slots()
869 scan_base = offset = si->lowest_bit; in scan_swap_map_slots()
871 ci = lock_cluster(si, offset); in scan_swap_map_slots()
873 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { in scan_swap_map_slots()
876 spin_unlock(&si->lock); in scan_swap_map_slots()
877 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); in scan_swap_map_slots()
878 spin_lock(&si->lock); in scan_swap_map_slots()
885 if (si->swap_map[offset]) { in scan_swap_map_slots()
892 WRITE_ONCE(si->swap_map[offset], usage); in scan_swap_map_slots()
893 inc_cluster_info_page(si, si->cluster_info, offset); in scan_swap_map_slots()
896 swap_range_alloc(si, offset, 1); in scan_swap_map_slots()
897 slots[n_ret++] = swp_entry(si->type, offset); in scan_swap_map_slots()
900 if ((n_ret == nr) || (offset >= si->highest_bit)) in scan_swap_map_slots()
909 spin_unlock(&si->lock); in scan_swap_map_slots()
911 spin_lock(&si->lock); in scan_swap_map_slots()
916 if (si->cluster_info) { in scan_swap_map_slots()
917 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) in scan_swap_map_slots()
919 } else if (si->cluster_nr && !si->swap_map[++offset]) { in scan_swap_map_slots()
921 --si->cluster_nr; in scan_swap_map_slots()
936 scan_limit = si->highest_bit; in scan_swap_map_slots()
939 if (!si->swap_map[offset]) in scan_swap_map_slots()
945 set_cluster_next(si, offset + 1); in scan_swap_map_slots()
946 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
950 spin_unlock(&si->lock); in scan_swap_map_slots()
951 while (++offset <= READ_ONCE(si->highest_bit)) { in scan_swap_map_slots()
952 if (data_race(!si->swap_map[offset])) { in scan_swap_map_slots()
953 spin_lock(&si->lock); in scan_swap_map_slots()
957 READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { in scan_swap_map_slots()
958 spin_lock(&si->lock); in scan_swap_map_slots()
967 offset = si->lowest_bit; in scan_swap_map_slots()
969 if (data_race(!si->swap_map[offset])) { in scan_swap_map_slots()
970 spin_lock(&si->lock); in scan_swap_map_slots()
974 READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { in scan_swap_map_slots()
975 spin_lock(&si->lock); in scan_swap_map_slots()
985 spin_lock(&si->lock); in scan_swap_map_slots()
988 si->flags -= SWP_SCANNING; in scan_swap_map_slots()
993 int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) in swap_alloc_cluster() argument
1009 if (cluster_list_empty(&si->free_clusters)) in swap_alloc_cluster()
1012 idx = cluster_list_first(&si->free_clusters); in swap_alloc_cluster()
1014 ci = lock_cluster(si, offset); in swap_alloc_cluster()
1015 alloc_cluster(si, idx); in swap_alloc_cluster()
1018 map = si->swap_map + offset; in swap_alloc_cluster()
1022 swap_range_alloc(si, offset, SWAPFILE_CLUSTER); in swap_alloc_cluster()
1023 *slot = swp_entry(si->type, offset); in swap_alloc_cluster()
1029 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) in swap_free_cluster() argument
1034 ci = lock_cluster(si, offset); in swap_free_cluster()
1035 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); in swap_free_cluster()
1037 free_cluster(si, idx); in swap_free_cluster()
1039 swap_range_free(si, offset, SWAPFILE_CLUSTER); in swap_free_cluster()
1042 static unsigned long scan_swap_map(struct swap_info_struct *si, in scan_swap_map() argument
1048 n_ret = scan_swap_map_slots(si, usage, 1, &entry); in scan_swap_map()
1060 struct swap_info_struct *si, *next; in get_swap_pages() local
1082 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { in get_swap_pages()
1084 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); in get_swap_pages()
1086 spin_lock(&si->lock); in get_swap_pages()
1087 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { in get_swap_pages()
1089 if (plist_node_empty(&si->avail_lists[node])) { in get_swap_pages()
1090 spin_unlock(&si->lock); in get_swap_pages()
1093 WARN(!si->highest_bit, in get_swap_pages()
1095 si->type); in get_swap_pages()
1096 WARN(!(si->flags & SWP_WRITEOK), in get_swap_pages()
1098 si->type); in get_swap_pages()
1099 __del_from_avail_list(si); in get_swap_pages()
1100 spin_unlock(&si->lock); in get_swap_pages()
1104 if (si->flags & SWP_BLKDEV) in get_swap_pages()
1105 n_ret = swap_alloc_cluster(si, swp_entries); in get_swap_pages()
1107 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, in get_swap_pages()
1109 spin_unlock(&si->lock); in get_swap_pages()
1113 si->type); in get_swap_pages()
1145 struct swap_info_struct *si = swap_type_to_swap_info(type); in get_swap_page_of_type() local
1149 if (!si) in get_swap_page_of_type()
1152 spin_lock(&si->lock); in get_swap_page_of_type()
1153 if (si->flags & SWP_WRITEOK) { in get_swap_page_of_type()
1155 offset = scan_swap_map(si, 1); in get_swap_page_of_type()
1157 trace_android_vh_account_swap_pages(si, &skip); in get_swap_page_of_type()
1160 spin_unlock(&si->lock); in get_swap_page_of_type()
1164 spin_unlock(&si->lock); in get_swap_page_of_type()
1318 struct swap_info_struct *si; in get_swap_device() local
1323 si = swp_swap_info(entry); in get_swap_device()
1324 if (!si) in get_swap_device()
1328 if (data_race(!(si->flags & SWP_VALID))) in get_swap_device()
1331 if (offset >= si->max) in get_swap_device()
1334 return si; in get_swap_device()
1398 struct swap_info_struct *si; in put_swap_page() local
1404 si = _swap_info_get(entry); in put_swap_page()
1405 if (!si) in put_swap_page()
1408 ci = lock_cluster_or_swap_info(si, offset); in put_swap_page()
1411 map = si->swap_map + offset; in put_swap_page()
1420 unlock_cluster_or_swap_info(si, ci); in put_swap_page()
1421 spin_lock(&si->lock); in put_swap_page()
1423 swap_free_cluster(si, idx); in put_swap_page()
1424 spin_unlock(&si->lock); in put_swap_page()
1429 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { in put_swap_page()
1430 unlock_cluster_or_swap_info(si, ci); in put_swap_page()
1434 lock_cluster_or_swap_info(si, offset); in put_swap_page()
1437 unlock_cluster_or_swap_info(si, ci); in put_swap_page()
1443 struct swap_info_struct *si; in split_swap_cluster() local
1447 si = _swap_info_get(entry); in split_swap_cluster()
1448 if (!si) in split_swap_cluster()
1450 ci = lock_cluster(si, offset); in split_swap_cluster()
1519 struct swap_info_struct *si; in __swap_count() local
1523 si = get_swap_device(entry); in __swap_count()
1524 if (si) { in __swap_count()
1525 count = swap_count(si->swap_map[offset]); in __swap_count()
1526 put_swap_device(si); in __swap_count()
1531 static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) in swap_swapcount() argument
1537 ci = lock_cluster_or_swap_info(si, offset); in swap_swapcount()
1538 count = swap_count(si->swap_map[offset]); in swap_swapcount()
1539 unlock_cluster_or_swap_info(si, ci); in swap_swapcount()
1551 struct swap_info_struct *si; in __swp_swapcount() local
1553 si = get_swap_device(entry); in __swp_swapcount()
1554 if (si) { in __swp_swapcount()
1555 count = swap_swapcount(si, entry); in __swp_swapcount()
1556 put_swap_device(si); in __swp_swapcount()
1607 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, in swap_page_trans_huge_swapped() argument
1611 unsigned char *map = si->swap_map; in swap_page_trans_huge_swapped()
1617 ci = lock_cluster_or_swap_info(si, offset); in swap_page_trans_huge_swapped()
1630 unlock_cluster_or_swap_info(si, ci); in swap_page_trans_huge_swapped()
1637 struct swap_info_struct *si; in page_swapped() local
1644 si = _swap_info_get(entry); in page_swapped()
1645 if (si) in page_swapped()
1646 return swap_page_trans_huge_swapped(si, entry); in page_swapped()
1655 struct swap_info_struct *si; in page_trans_huge_map_swapcount() local
1679 si = _swap_info_get(entry); in page_trans_huge_map_swapcount()
1680 if (si) { in page_trans_huge_map_swapcount()
1681 map = si->swap_map; in page_trans_huge_map_swapcount()
1686 ci = lock_cluster(si, offset); in page_trans_huge_map_swapcount()
1881 struct swap_info_struct *si = swap_type_to_swap_info(type); in swapdev_block() local
1883 if (!si || !(si->flags & SWP_WRITEOK)) in swapdev_block()
1973 struct swap_info_struct *si; in unuse_pte_range() local
1978 si = swap_info[type]; in unuse_pte_range()
1989 if (frontswap && !frontswap_test(si, offset)) in unuse_pte_range()
1993 swap_map = &si->swap_map[offset]; in unuse_pte_range()
2021 trace_android_vh_unuse_swap_page(si, page); in unuse_pte_range()
2154 static unsigned int find_next_to_unuse(struct swap_info_struct *si, in find_next_to_unuse() argument
2166 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
2167 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
2169 if (!frontswap || frontswap_test(si, i)) in find_next_to_unuse()
2175 if (i == si->max) in find_next_to_unuse()
2192 struct swap_info_struct *si = swap_info[type]; in try_to_unuse() local
2197 if (!READ_ONCE(si->inuse_pages)) in try_to_unuse()
2213 while (READ_ONCE(si->inuse_pages) && in try_to_unuse()
2242 while (READ_ONCE(si->inuse_pages) && in try_to_unuse()
2244 (i = find_next_to_unuse(si, i, frontswap)) != 0) { in try_to_unuse()
2260 trace_android_vh_unuse_swap_page(si, page); in try_to_unuse()
2285 if (READ_ONCE(si->inuse_pages)) { in try_to_unuse()
2635 struct swap_info_struct *si = p; in SYSCALL_DEFINE1() local
2638 plist_for_each_entry_continue(si, &swap_active_head, list) { in SYSCALL_DEFINE1()
2639 si->prio++; in SYSCALL_DEFINE1()
2640 si->list.prio--; in SYSCALL_DEFINE1()
2642 if (si->avail_lists[nid].prio != 1) in SYSCALL_DEFINE1()
2643 si->avail_lists[nid].prio--; in SYSCALL_DEFINE1()
2785 struct swap_info_struct *si; in swap_start() local
2794 for (type = 0; (si = swap_type_to_swap_info(type)); type++) { in swap_start()
2795 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_start()
2798 return si; in swap_start()
2806 struct swap_info_struct *si = v; in swap_next() local
2812 type = si->type + 1; in swap_next()
2815 for (; (si = swap_type_to_swap_info(type)); type++) { in swap_next()
2816 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_next()
2818 return si; in swap_next()
2831 struct swap_info_struct *si = v; in swap_show() local
2836 if (si == SEQ_START_TOKEN) { in swap_show()
2841 bytes = si->pages << (PAGE_SHIFT - 10); in swap_show()
2842 inuse = si->inuse_pages << (PAGE_SHIFT - 10); in swap_show()
2844 file = si->swap_file; in swap_show()
2852 si->prio); in swap_show()
3180 static bool swap_discardable(struct swap_info_struct *si) in swap_discardable() argument
3182 struct request_queue *q = bdev_get_queue(si->bdev); in swap_discardable()
3461 struct swap_info_struct *si = swap_info[type]; in si_swapinfo() local
3464 trace_android_vh_si_swapinfo(si, &skip); in si_swapinfo()
3465 if (!skip && (si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in si_swapinfo()
3466 nr_to_be_unused += si->inuse_pages; in si_swapinfo()
3632 struct swap_info_struct *si; in add_swap_count_continuation() local
3647 si = get_swap_device(entry); in add_swap_count_continuation()
3648 if (!si) { in add_swap_count_continuation()
3655 spin_lock(&si->lock); in add_swap_count_continuation()
3659 ci = lock_cluster(si, offset); in add_swap_count_continuation()
3661 count = si->swap_map[offset] & ~SWAP_HAS_CACHE; in add_swap_count_continuation()
3682 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
3685 spin_lock(&si->cont_lock); in add_swap_count_continuation()
3694 si->flags |= SWP_CONTINUED; in add_swap_count_continuation()
3722 spin_unlock(&si->cont_lock); in add_swap_count_continuation()
3725 spin_unlock(&si->lock); in add_swap_count_continuation()
3726 put_swap_device(si); in add_swap_count_continuation()
3742 static bool swap_count_continued(struct swap_info_struct *si, in swap_count_continued() argument
3750 head = vmalloc_to_page(si->swap_map + offset); in swap_count_continued()
3756 spin_lock(&si->cont_lock); in swap_count_continued()
3818 spin_unlock(&si->cont_lock); in swap_count_continued()
3826 static void free_swap_count_continuations(struct swap_info_struct *si) in free_swap_count_continuations() argument
3830 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { in free_swap_count_continuations()
3832 head = vmalloc_to_page(si->swap_map + offset); in free_swap_count_continuations()
3847 struct swap_info_struct *si, *next; in __cgroup_throttle_swaprate() local
3864 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], in __cgroup_throttle_swaprate()
3866 if (si->bdev) { in __cgroup_throttle_swaprate()
3867 blkcg_schedule_throttle(bdev_get_queue(si->bdev), true); in __cgroup_throttle_swaprate()