Lines Matching refs:rgn
70 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn, in ufshpb_is_valid_srgn() argument
73 return rgn->rgn_state != HPB_RGN_INACTIVE && in ufshpb_is_valid_srgn()
166 struct ufshpb_region *rgn; in ufshpb_iterate_rgn() local
173 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_iterate_rgn()
174 srgn = rgn->srgn_tbl + srgn_idx; in ufshpb_iterate_rgn()
187 if (rgn->rgn_state != HPB_RGN_INACTIVE) { in ufshpb_iterate_rgn()
194 rgn->read_timeout = ktime_add_ms(ktime_get(), in ufshpb_iterate_rgn()
195 rgn->hpb->params.read_timeout_ms); in ufshpb_iterate_rgn()
196 rgn->read_timeout_expiries = in ufshpb_iterate_rgn()
197 rgn->hpb->params.read_timeout_expiries; in ufshpb_iterate_rgn()
205 spin_lock(&rgn->rgn_lock); in ufshpb_iterate_rgn()
207 rgn->reads -= srgn->reads; in ufshpb_iterate_rgn()
209 set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); in ufshpb_iterate_rgn()
212 rgn->reads++; in ufshpb_iterate_rgn()
216 spin_unlock(&rgn->rgn_lock); in ufshpb_iterate_rgn()
219 test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) { in ufshpb_iterate_rgn()
244 struct ufshpb_region *rgn; in ufshpb_test_ppn_dirty() local
250 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_test_ppn_dirty()
251 srgn = rgn->srgn_tbl + srgn_idx; in ufshpb_test_ppn_dirty()
258 if (!ufshpb_is_valid_srgn(rgn, srgn)) in ufshpb_test_ppn_dirty()
295 static inline bool is_rgn_dirty(struct ufshpb_region *rgn) in is_rgn_dirty() argument
297 return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); in is_rgn_dirty()
439 struct ufshpb_region *rgn; in ufshpb_prep_entry() local
454 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_prep_entry()
455 srgn = rgn->srgn_tbl + srgn_idx; in ufshpb_prep_entry()
457 if (!ufshpb_is_valid_srgn(rgn, srgn)) in ufshpb_prep_entry()
612 struct ufshpb_region *rgn; in ufshpb_prep() local
647 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_prep()
648 srgn = rgn->srgn_tbl + srgn_idx; in ufshpb_prep()
671 if (rgn->reads > hpb->entries_per_srgn) in ufshpb_prep()
815 struct ufshpb_region *rgn; in ufshpb_clear_dirty_bitmap() local
830 rgn = hpb->rgn_tbl + srgn->rgn_idx; in ufshpb_clear_dirty_bitmap()
831 clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); in ufshpb_clear_dirty_bitmap()
839 struct ufshpb_region *rgn; in ufshpb_update_active_info() local
842 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_update_active_info()
843 srgn = rgn->srgn_tbl + srgn_idx; in ufshpb_update_active_info()
845 list_del_init(&rgn->list_inact_rgn); in ufshpb_update_active_info()
855 struct ufshpb_region *rgn; in ufshpb_update_inactive_info() local
859 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_update_inactive_info()
861 for_each_sub_region(rgn, srgn_idx, srgn) in ufshpb_update_inactive_info()
864 if (list_empty(&rgn->list_inact_rgn)) in ufshpb_update_inactive_info()
865 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); in ufshpb_update_inactive_info()
873 struct ufshpb_region *rgn; in ufshpb_activate_subregion() local
889 rgn = hpb->rgn_tbl + srgn->rgn_idx; in ufshpb_activate_subregion()
891 if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) { in ufshpb_activate_subregion()
926 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn) in ufshpb_set_unmap_cmd() argument
929 cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID : in ufshpb_set_unmap_cmd()
931 if (rgn) in ufshpb_set_unmap_cmd()
932 put_unaligned_be16(rgn->rgn_idx, &cdb[2]); in ufshpb_set_unmap_cmd()
951 struct ufshpb_region *rgn) in ufshpb_execute_umap_req() argument
960 ufshpb_set_unmap_cmd(rq->cmd, rgn); in ufshpb_execute_umap_req()
1068 struct ufshpb_region *rgn) in ufshpb_check_srgns_issue_state() argument
1073 for_each_sub_region(rgn, srgn_idx, srgn) in ufshpb_check_srgns_issue_state()
1085 struct ufshpb_region *rgn, *next_rgn; in ufshpb_read_to_handler() local
1095 list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn, in ufshpb_read_to_handler()
1097 bool timedout = ktime_after(ktime_get(), rgn->read_timeout); in ufshpb_read_to_handler()
1100 rgn->read_timeout_expiries--; in ufshpb_read_to_handler()
1101 if (is_rgn_dirty(rgn) || in ufshpb_read_to_handler()
1102 rgn->read_timeout_expiries == 0) in ufshpb_read_to_handler()
1103 list_add(&rgn->list_expired_rgn, &expired_list); in ufshpb_read_to_handler()
1105 rgn->read_timeout = ktime_add_ms(ktime_get(), in ufshpb_read_to_handler()
1112 list_for_each_entry_safe(rgn, next_rgn, &expired_list, in ufshpb_read_to_handler()
1114 list_del_init(&rgn->list_expired_rgn); in ufshpb_read_to_handler()
1116 ufshpb_update_inactive_info(hpb, rgn->rgn_idx); in ufshpb_read_to_handler()
1130 struct ufshpb_region *rgn) in ufshpb_add_lru_info() argument
1132 rgn->rgn_state = HPB_RGN_ACTIVE; in ufshpb_add_lru_info()
1133 list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); in ufshpb_add_lru_info()
1135 if (rgn->hpb->is_hcm) { in ufshpb_add_lru_info()
1136 rgn->read_timeout = in ufshpb_add_lru_info()
1138 rgn->hpb->params.read_timeout_ms); in ufshpb_add_lru_info()
1139 rgn->read_timeout_expiries = in ufshpb_add_lru_info()
1140 rgn->hpb->params.read_timeout_expiries; in ufshpb_add_lru_info()
1145 struct ufshpb_region *rgn) in ufshpb_hit_lru_info() argument
1147 list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); in ufshpb_hit_lru_info()
1153 struct ufshpb_region *rgn, *victim_rgn = NULL; in ufshpb_victim_lru_info() local
1155 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { in ufshpb_victim_lru_info()
1156 if (!rgn) { in ufshpb_victim_lru_info()
1162 if (ufshpb_check_srgns_issue_state(hpb, rgn)) in ufshpb_victim_lru_info()
1170 rgn->reads > hpb->params.eviction_thld_exit) in ufshpb_victim_lru_info()
1173 victim_rgn = rgn; in ufshpb_victim_lru_info()
1181 struct ufshpb_region *rgn) in ufshpb_cleanup_lru_info() argument
1183 list_del_init(&rgn->list_lru_rgn); in ufshpb_cleanup_lru_info()
1184 rgn->rgn_state = HPB_RGN_INACTIVE; in ufshpb_cleanup_lru_info()
1199 struct ufshpb_region *rgn, in ufshpb_issue_umap_req() argument
1203 int rgn_idx = rgn ? rgn->rgn_idx : 0; in ufshpb_issue_umap_req()
1209 ufshpb_execute_umap_req(hpb, umap_req, rgn); in ufshpb_issue_umap_req()
1215 struct ufshpb_region *rgn) in ufshpb_issue_umap_single_req() argument
1217 return ufshpb_issue_umap_req(hpb, rgn, true); in ufshpb_issue_umap_single_req()
1226 struct ufshpb_region *rgn) in __ufshpb_evict_region() argument
1234 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx); in __ufshpb_evict_region()
1236 ufshpb_cleanup_lru_info(lru_info, rgn); in __ufshpb_evict_region()
1238 for_each_sub_region(rgn, srgn_idx, srgn) in __ufshpb_evict_region()
1242 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) in ufshpb_evict_region() argument
1248 if (rgn->rgn_state == HPB_RGN_PINNED) { in ufshpb_evict_region()
1251 rgn->rgn_idx); in ufshpb_evict_region()
1254 if (!list_empty(&rgn->list_lru_rgn)) { in ufshpb_evict_region()
1255 if (ufshpb_check_srgns_issue_state(hpb, rgn)) { in ufshpb_evict_region()
1262 ret = ufshpb_issue_umap_single_req(hpb, rgn); in ufshpb_evict_region()
1268 __ufshpb_evict_region(hpb, rgn); in ufshpb_evict_region()
1276 struct ufshpb_region *rgn, in ufshpb_issue_map_req() argument
1294 if ((rgn->rgn_state == HPB_RGN_INACTIVE) && in ufshpb_issue_map_req()
1322 rgn->rgn_idx, srgn->srgn_idx); in ufshpb_issue_map_req()
1352 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) in ufshpb_add_region() argument
1365 if (!list_empty(&rgn->list_lru_rgn)) { in ufshpb_add_region()
1366 ufshpb_hit_lru_info(lru_info, rgn); in ufshpb_add_region()
1370 if (rgn->rgn_state == HPB_RGN_INACTIVE) { in ufshpb_add_region()
1386 rgn->reads < hpb->params.eviction_thld_enter) { in ufshpb_add_region()
1425 ufshpb_add_lru_info(lru_info, rgn); in ufshpb_add_region()
1435 struct ufshpb_region *rgn; in ufshpb_rsp_req_region_update() local
1452 rgn = hpb->rgn_tbl + rgn_i; in ufshpb_rsp_req_region_update()
1454 (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) { in ufshpb_rsp_req_region_update()
1471 srgn = rgn->srgn_tbl + srgn_i; in ufshpb_rsp_req_region_update()
1497 rgn = hpb->rgn_tbl + rgn_i; in ufshpb_rsp_req_region_update()
1500 if (rgn->rgn_state != HPB_RGN_INACTIVE) { in ufshpb_rsp_req_region_update()
1501 for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) { in ufshpb_rsp_req_region_update()
1502 srgn = rgn->srgn_tbl + srgn_i; in ufshpb_rsp_req_region_update()
1521 struct ufshpb_region *rgn; in ufshpb_dev_reset_handler() local
1526 list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) in ufshpb_dev_reset_handler()
1527 set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags); in ufshpb_dev_reset_handler()
1629 struct ufshpb_region *rgn, in ufshpb_add_active_list() argument
1632 if (!list_empty(&rgn->list_inact_rgn)) in ufshpb_add_active_list()
1644 struct ufshpb_region *rgn, in ufshpb_add_pending_evict_list() argument
1650 if (!list_empty(&rgn->list_inact_rgn)) in ufshpb_add_pending_evict_list()
1653 for_each_sub_region(rgn, srgn_idx, srgn) in ufshpb_add_pending_evict_list()
1657 list_add_tail(&rgn->list_inact_rgn, pending_list); in ufshpb_add_pending_evict_list()
1662 struct ufshpb_region *rgn; in ufshpb_run_active_subregion_list() local
1677 rgn = hpb->rgn_tbl + srgn->rgn_idx; in ufshpb_run_active_subregion_list()
1678 ret = ufshpb_add_region(hpb, rgn); in ufshpb_run_active_subregion_list()
1682 ret = ufshpb_issue_map_req(hpb, rgn, srgn); in ufshpb_run_active_subregion_list()
1686 ret, rgn->rgn_idx, srgn->srgn_idx); in ufshpb_run_active_subregion_list()
1696 rgn->rgn_idx, srgn->srgn_idx); in ufshpb_run_active_subregion_list()
1698 ufshpb_add_active_list(hpb, rgn, srgn); in ufshpb_run_active_subregion_list()
1704 struct ufshpb_region *rgn; in ufshpb_run_inactive_region_list() local
1710 while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn, in ufshpb_run_inactive_region_list()
1716 list_del_init(&rgn->list_inact_rgn); in ufshpb_run_inactive_region_list()
1719 ret = ufshpb_evict_region(hpb, rgn); in ufshpb_run_inactive_region_list()
1722 ufshpb_add_pending_evict_list(hpb, rgn, &pending_list); in ufshpb_run_inactive_region_list()
1741 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_normalization_work_handler() local
1744 spin_lock(&rgn->rgn_lock); in ufshpb_normalization_work_handler()
1745 rgn->reads = 0; in ufshpb_normalization_work_handler()
1747 struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx; in ufshpb_normalization_work_handler()
1750 rgn->reads += srgn->reads; in ufshpb_normalization_work_handler()
1752 spin_unlock(&rgn->rgn_lock); in ufshpb_normalization_work_handler()
1754 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads) in ufshpb_normalization_work_handler()
1759 ufshpb_update_inactive_info(hpb, rgn->rgn_idx); in ufshpb_normalization_work_handler()
1784 struct ufshpb_region *rgn) in ufshpb_init_pinned_active_region() argument
1790 for_each_sub_region(rgn, srgn_idx, srgn) { in ufshpb_init_pinned_active_region()
1803 rgn->rgn_state = HPB_RGN_PINNED; in ufshpb_init_pinned_active_region()
1808 srgn = rgn->srgn_tbl + i; in ufshpb_init_pinned_active_region()
1815 struct ufshpb_region *rgn, bool last) in ufshpb_init_subregion_tbl() argument
1820 for_each_sub_region(rgn, srgn_idx, srgn) { in ufshpb_init_subregion_tbl()
1823 srgn->rgn_idx = rgn->rgn_idx; in ufshpb_init_subregion_tbl()
1833 struct ufshpb_region *rgn, int srgn_cnt) in ufshpb_alloc_subregion_tbl() argument
1835 rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion), in ufshpb_alloc_subregion_tbl()
1837 if (!rgn->srgn_tbl) in ufshpb_alloc_subregion_tbl()
1840 rgn->srgn_cnt = srgn_cnt; in ufshpb_alloc_subregion_tbl()
1905 struct ufshpb_region *rgn_table, *rgn; in ufshpb_alloc_region_tbl() local
1920 rgn = rgn_table + rgn_idx; in ufshpb_alloc_region_tbl()
1921 rgn->rgn_idx = rgn_idx; in ufshpb_alloc_region_tbl()
1923 spin_lock_init(&rgn->rgn_lock); in ufshpb_alloc_region_tbl()
1925 INIT_LIST_HEAD(&rgn->list_inact_rgn); in ufshpb_alloc_region_tbl()
1926 INIT_LIST_HEAD(&rgn->list_lru_rgn); in ufshpb_alloc_region_tbl()
1927 INIT_LIST_HEAD(&rgn->list_expired_rgn); in ufshpb_alloc_region_tbl()
1935 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt); in ufshpb_alloc_region_tbl()
1938 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn); in ufshpb_alloc_region_tbl()
1941 ret = ufshpb_init_pinned_active_region(hba, hpb, rgn); in ufshpb_alloc_region_tbl()
1945 rgn->rgn_state = HPB_RGN_INACTIVE; in ufshpb_alloc_region_tbl()
1948 rgn->rgn_flags = 0; in ufshpb_alloc_region_tbl()
1949 rgn->hpb = hpb; in ufshpb_alloc_region_tbl()
1956 rgn = rgn_table + i; in ufshpb_alloc_region_tbl()
1957 kvfree(rgn->srgn_tbl); in ufshpb_alloc_region_tbl()
1964 struct ufshpb_region *rgn) in ufshpb_destroy_subregion_tbl() argument
1969 for_each_sub_region(rgn, srgn_idx, srgn) in ufshpb_destroy_subregion_tbl()
1981 struct ufshpb_region *rgn; in ufshpb_destroy_region_tbl() local
1983 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_destroy_region_tbl()
1984 if (rgn->rgn_state != HPB_RGN_INACTIVE) { in ufshpb_destroy_region_tbl()
1985 rgn->rgn_state = HPB_RGN_INACTIVE; in ufshpb_destroy_region_tbl()
1987 ufshpb_destroy_subregion_tbl(hpb, rgn); in ufshpb_destroy_region_tbl()
1990 kvfree(rgn->srgn_tbl); in ufshpb_destroy_region_tbl()
2508 struct ufshpb_region *rgn, *next_rgn; in ufshpb_discard_rsp_lists() local
2518 list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn, in ufshpb_discard_rsp_lists()
2520 list_del_init(&rgn->list_inact_rgn); in ufshpb_discard_rsp_lists()