Lines Matching refs:hpb

36 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
60 static int ufshpb_get_state(struct ufshpb_lu *hpb) in ufshpb_get_state() argument
62 return atomic_read(&hpb->hpb_state); in ufshpb_get_state()
65 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state) in ufshpb_set_state() argument
67 atomic_set(&hpb->hpb_state, state); in ufshpb_set_state()
88 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len) in ufshpb_is_supported_chunk() argument
90 return transfer_len <= hpb->pre_req_max_tr_len; in ufshpb_is_supported_chunk()
97 static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len) in ufshpb_is_required_wb() argument
99 return len > hpb->pre_req_min_tr_len && in ufshpb_is_required_wb()
100 len <= hpb->pre_req_max_tr_len; in ufshpb_is_required_wb()
108 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) in ufshpb_is_pinned_region() argument
110 if (hpb->lu_pinned_end != PINNED_NOT_SET && in ufshpb_is_pinned_region()
111 rgn_idx >= hpb->lu_pinned_start && in ufshpb_is_pinned_region()
112 rgn_idx <= hpb->lu_pinned_end) in ufshpb_is_pinned_region()
118 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb) in ufshpb_kick_map_work() argument
123 if (ufshpb_get_state(hpb) != HPB_PRESENT) in ufshpb_kick_map_work()
126 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_kick_map_work()
127 if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn)) in ufshpb_kick_map_work()
129 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_kick_map_work()
132 queue_work(ufshpb_wq, &hpb->map_work); in ufshpb_kick_map_work()
163 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx, in ufshpb_iterate_rgn() argument
173 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_iterate_rgn()
177 bitmap_len = hpb->entries_per_srgn; in ufshpb_iterate_rgn()
179 bitmap_len = hpb->last_srgn_entries; in ufshpb_iterate_rgn()
186 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_iterate_rgn()
192 } else if (hpb->is_hcm) { in ufshpb_iterate_rgn()
195 rgn->hpb->params.read_timeout_ms); in ufshpb_iterate_rgn()
197 rgn->hpb->params.read_timeout_expiries; in ufshpb_iterate_rgn()
200 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_iterate_rgn()
202 if (hpb->is_hcm && prev_srgn != srgn) { in ufshpb_iterate_rgn()
213 if (srgn->reads == hpb->params.activation_thld) in ufshpb_iterate_rgn()
220 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_iterate_rgn()
221 ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); in ufshpb_iterate_rgn()
222 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_iterate_rgn()
223 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_iterate_rgn()
231 if (++srgn_idx == hpb->srgns_per_rgn) { in ufshpb_iterate_rgn()
241 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, in ufshpb_test_ppn_dirty() argument
250 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_test_ppn_dirty()
254 bitmap_len = hpb->entries_per_srgn; in ufshpb_test_ppn_dirty()
256 bitmap_len = hpb->last_srgn_entries; in ufshpb_test_ppn_dirty()
267 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_test_ppn_dirty()
283 if (++srgn_idx == hpb->srgns_per_rgn) { in ufshpb_test_ppn_dirty()
300 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb, in ufshpb_fill_ppn_from_page() argument
318 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_fill_ppn_from_page()
330 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx, in ufshpb_get_pos_from_lpn() argument
335 *rgn_idx = lpn >> hpb->entries_per_rgn_shift; in ufshpb_get_pos_from_lpn()
336 rgn_offset = lpn & hpb->entries_per_rgn_mask; in ufshpb_get_pos_from_lpn()
337 *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift; in ufshpb_get_pos_from_lpn()
338 *offset = rgn_offset & hpb->entries_per_srgn_mask; in ufshpb_get_pos_from_lpn()
342 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb, in ufshpb_set_hpb_read_to_upiu() argument
375 static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb) in ufshpb_get_pre_req() argument
379 if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) { in ufshpb_get_pre_req()
380 dev_info(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_get_pre_req()
382 hpb->num_inflight_pre_req, hpb->throttle_pre_req); in ufshpb_get_pre_req()
386 pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free, in ufshpb_get_pre_req()
389 dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req"); in ufshpb_get_pre_req()
394 hpb->num_inflight_pre_req++; in ufshpb_get_pre_req()
399 static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb, in ufshpb_put_pre_req() argument
404 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); in ufshpb_put_pre_req()
405 hpb->num_inflight_pre_req--; in ufshpb_put_pre_req()
411 struct ufshpb_lu *hpb = pre_req->hpb; in ufshpb_pre_req_compl_fn() local
418 dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error); in ufshpb_pre_req_compl_fn()
420 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_pre_req_compl_fn()
424 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_pre_req_compl_fn()
431 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_pre_req_compl_fn()
432 ufshpb_put_pre_req(pre_req->hpb, pre_req); in ufshpb_pre_req_compl_fn()
433 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_pre_req_compl_fn()
438 struct ufshpb_lu *hpb = pre_req->hpb; in ufshpb_prep_entry() local
449 ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); in ufshpb_prep_entry()
451 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_prep_entry()
454 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_prep_entry()
463 copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, in ufshpb_prep_entry()
473 if (srgn_offset == hpb->entries_per_srgn) { in ufshpb_prep_entry()
476 if (++srgn_idx == hpb->srgns_per_rgn) { in ufshpb_prep_entry()
485 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_prep_entry()
488 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_prep_entry()
492 static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb, in ufshpb_pre_req_add_bio_page() argument
510 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_pre_req_add_bio_page()
517 static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb) in ufshpb_get_read_id() argument
519 if (++hpb->cur_read_id >= MAX_HPB_READ_ID) in ufshpb_get_read_id()
520 hpb->cur_read_id = 1; in ufshpb_get_read_id()
521 return hpb->cur_read_id; in ufshpb_get_read_id()
524 static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd, in ufshpb_execute_pre_req() argument
533 pre_req->hpb = hpb; in ufshpb_execute_pre_req()
538 if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req)) in ufshpb_execute_pre_req()
560 hpb->stats.pre_req_cnt++; in ufshpb_execute_pre_req()
565 static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd, in ufshpb_issue_pre_req() argument
579 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_issue_pre_req()
580 pre_req = ufshpb_get_pre_req(hpb); in ufshpb_issue_pre_req()
585 _read_id = ufshpb_get_read_id(hpb); in ufshpb_issue_pre_req()
586 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_issue_pre_req()
590 ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id); in ufshpb_issue_pre_req()
598 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_issue_pre_req()
599 ufshpb_put_pre_req(hpb, pre_req); in ufshpb_issue_pre_req()
601 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_issue_pre_req()
611 struct ufshpb_lu *hpb; in ufshpb_prep() local
622 hpb = ufshpb_get_hpb_data(cmd->device); in ufshpb_prep()
623 if (!hpb) in ufshpb_prep()
626 if (ufshpb_get_state(hpb) == HPB_INIT) in ufshpb_prep()
629 if (ufshpb_get_state(hpb) != HPB_PRESENT) { in ufshpb_prep()
630 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_prep()
646 ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); in ufshpb_prep()
647 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_prep()
652 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, in ufshpb_prep()
657 if (!ufshpb_is_supported_chunk(hpb, transfer_len)) in ufshpb_prep()
662 if (hpb->is_hcm) { in ufshpb_prep()
667 ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, in ufshpb_prep()
671 if (rgn->reads > hpb->entries_per_srgn) in ufshpb_prep()
672 schedule_work(&hpb->ufshpb_normalization_work); in ufshpb_prep()
675 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_prep()
676 if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, in ufshpb_prep()
678 hpb->stats.miss_cnt++; in ufshpb_prep()
679 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_prep()
683 err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn); in ufshpb_prep()
684 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_prep()
697 ufshpb_is_required_wb(hpb, transfer_len)) { in ufshpb_prep()
698 err = ufshpb_issue_pre_req(hpb, cmd, &read_id); in ufshpb_prep()
703 hpb->params.requeue_timeout_ms); in ufshpb_prep()
708 hpb->stats.miss_cnt++; in ufshpb_prep()
713 ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len, in ufshpb_prep()
716 hpb->stats.hit_cnt++; in ufshpb_prep()
720 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, in ufshpb_get_req() argument
728 rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL); in ufshpb_get_req()
733 req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir, in ufshpb_get_req()
744 rq->hpb = hpb; in ufshpb_get_req()
751 kmem_cache_free(hpb->map_req_cache, rq); in ufshpb_get_req()
755 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq) in ufshpb_put_req() argument
758 kmem_cache_free(hpb->map_req_cache, rq); in ufshpb_put_req()
761 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, in ufshpb_get_map_req() argument
768 if (hpb->is_hcm && in ufshpb_get_map_req()
769 hpb->num_inflight_map_req >= hpb->params.inflight_map_req) { in ufshpb_get_map_req()
770 dev_info(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_get_map_req()
772 hpb->num_inflight_map_req, in ufshpb_get_map_req()
773 hpb->params.inflight_map_req); in ufshpb_get_map_req()
777 map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN, false); in ufshpb_get_map_req()
781 bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn); in ufshpb_get_map_req()
783 ufshpb_put_req(hpb, map_req); in ufshpb_get_map_req()
792 spin_lock_irqsave(&hpb->param_lock, flags); in ufshpb_get_map_req()
793 hpb->num_inflight_map_req++; in ufshpb_get_map_req()
794 spin_unlock_irqrestore(&hpb->param_lock, flags); in ufshpb_get_map_req()
799 static void ufshpb_put_map_req(struct ufshpb_lu *hpb, in ufshpb_put_map_req() argument
805 ufshpb_put_req(hpb, map_req); in ufshpb_put_map_req()
807 spin_lock_irqsave(&hpb->param_lock, flags); in ufshpb_put_map_req()
808 hpb->num_inflight_map_req--; in ufshpb_put_map_req()
809 spin_unlock_irqrestore(&hpb->param_lock, flags); in ufshpb_put_map_req()
812 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, in ufshpb_clear_dirty_bitmap() argument
816 u32 num_entries = hpb->entries_per_srgn; in ufshpb_clear_dirty_bitmap()
819 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_clear_dirty_bitmap()
826 num_entries = hpb->last_srgn_entries; in ufshpb_clear_dirty_bitmap()
830 rgn = hpb->rgn_tbl + srgn->rgn_idx; in ufshpb_clear_dirty_bitmap()
836 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, in ufshpb_update_active_info() argument
842 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_update_active_info()
848 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); in ufshpb_update_active_info()
850 hpb->stats.rb_active_cnt++; in ufshpb_update_active_info()
853 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) in ufshpb_update_inactive_info() argument
859 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_update_inactive_info()
865 list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); in ufshpb_update_inactive_info()
867 hpb->stats.rb_inactive_cnt++; in ufshpb_update_inactive_info()
870 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, in ufshpb_activate_subregion() argument
882 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_activate_subregion()
889 rgn = hpb->rgn_tbl + srgn->rgn_idx; in ufshpb_activate_subregion()
892 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_activate_subregion()
905 ufshpb_put_req(umap_req->hpb, umap_req); in ufshpb_umap_req_compl_fn()
911 struct ufshpb_lu *hpb = map_req->hpb; in ufshpb_map_req_compl_fn() local
915 srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl + in ufshpb_map_req_compl_fn()
918 ufshpb_clear_dirty_bitmap(hpb, srgn); in ufshpb_map_req_compl_fn()
919 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_map_req_compl_fn()
920 ufshpb_activate_subregion(hpb, srgn); in ufshpb_map_req_compl_fn()
921 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_map_req_compl_fn()
923 ufshpb_put_map_req(map_req->hpb, map_req); in ufshpb_map_req_compl_fn()
949 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, in ufshpb_execute_umap_req() argument
965 hpb->stats.umap_req_cnt++; in ufshpb_execute_umap_req()
968 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, in ufshpb_execute_map_req() argument
974 int mem_size = hpb->srgn_mem_size; in ufshpb_execute_map_req()
978 q = hpb->sdev_ufs_lu->request_queue; in ufshpb_execute_map_req()
979 for (i = 0; i < hpb->pages_per_srgn; i++) { in ufshpb_execute_map_req()
983 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_execute_map_req()
999 mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE; in ufshpb_execute_map_req()
1007 hpb->stats.map_req_cnt++; in ufshpb_execute_map_req()
1011 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb, in ufshpb_get_map_ctx() argument
1015 u32 num_entries = hpb->entries_per_srgn; in ufshpb_get_map_ctx()
1022 mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL); in ufshpb_get_map_ctx()
1027 num_entries = hpb->last_srgn_entries; in ufshpb_get_map_ctx()
1033 for (i = 0; i < hpb->pages_per_srgn; i++) { in ufshpb_get_map_ctx()
1048 kmem_cache_free(hpb->m_page_cache, mctx->m_page); in ufshpb_get_map_ctx()
1054 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb, in ufshpb_put_map_ctx() argument
1059 for (i = 0; i < hpb->pages_per_srgn; i++) in ufshpb_put_map_ctx()
1063 kmem_cache_free(hpb->m_page_cache, mctx->m_page); in ufshpb_put_map_ctx()
1067 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb, in ufshpb_check_srgns_issue_state() argument
1082 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, in ufshpb_read_to_handler() local
1084 struct victim_select_info *lru_info = &hpb->lru_info; in ufshpb_read_to_handler()
1090 if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits)) in ufshpb_read_to_handler()
1093 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_read_to_handler()
1106 hpb->params.read_timeout_ms); in ufshpb_read_to_handler()
1110 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_read_to_handler()
1115 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_read_to_handler()
1116 ufshpb_update_inactive_info(hpb, rgn->rgn_idx); in ufshpb_read_to_handler()
1117 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_read_to_handler()
1120 ufshpb_kick_map_work(hpb); in ufshpb_read_to_handler()
1122 clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits); in ufshpb_read_to_handler()
1124 poll = hpb->params.timeout_polling_interval_ms; in ufshpb_read_to_handler()
1125 schedule_delayed_work(&hpb->ufshpb_read_to_work, in ufshpb_read_to_handler()
1135 if (rgn->hpb->is_hcm) { in ufshpb_add_lru_info()
1138 rgn->hpb->params.read_timeout_ms); in ufshpb_add_lru_info()
1140 rgn->hpb->params.read_timeout_expiries; in ufshpb_add_lru_info()
1150 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) in ufshpb_victim_lru_info() argument
1152 struct victim_select_info *lru_info = &hpb->lru_info; in ufshpb_victim_lru_info()
1157 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_victim_lru_info()
1162 if (ufshpb_check_srgns_issue_state(hpb, rgn)) in ufshpb_victim_lru_info()
1169 if (hpb->is_hcm && in ufshpb_victim_lru_info()
1170 rgn->reads > hpb->params.eviction_thld_exit) in ufshpb_victim_lru_info()
1188 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, in ufshpb_purge_active_subregion() argument
1192 ufshpb_put_map_ctx(hpb, srgn->mctx); in ufshpb_purge_active_subregion()
1198 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, in ufshpb_issue_umap_req() argument
1205 umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT, atomic); in ufshpb_issue_umap_req()
1209 ufshpb_execute_umap_req(hpb, umap_req, rgn); in ufshpb_issue_umap_req()
1214 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb, in ufshpb_issue_umap_single_req() argument
1217 return ufshpb_issue_umap_req(hpb, rgn, true); in ufshpb_issue_umap_single_req()
1220 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb) in ufshpb_issue_umap_all_req() argument
1222 return ufshpb_issue_umap_req(hpb, NULL, false); in ufshpb_issue_umap_all_req()
1225 static void __ufshpb_evict_region(struct ufshpb_lu *hpb, in __ufshpb_evict_region() argument
1232 lru_info = &hpb->lru_info; in __ufshpb_evict_region()
1234 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx); in __ufshpb_evict_region()
1239 ufshpb_purge_active_subregion(hpb, srgn); in __ufshpb_evict_region()
1242 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) in ufshpb_evict_region() argument
1247 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_evict_region()
1249 dev_warn(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_evict_region()
1255 if (ufshpb_check_srgns_issue_state(hpb, rgn)) { in ufshpb_evict_region()
1260 if (hpb->is_hcm) { in ufshpb_evict_region()
1261 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_evict_region()
1262 ret = ufshpb_issue_umap_single_req(hpb, rgn); in ufshpb_evict_region()
1263 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_evict_region()
1268 __ufshpb_evict_region(hpb, rgn); in ufshpb_evict_region()
1271 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_evict_region()
1275 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb, in ufshpb_issue_map_req() argument
1286 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_issue_map_req()
1288 if (ufshpb_get_state(hpb) != HPB_PRESENT) { in ufshpb_issue_map_req()
1289 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_issue_map_req()
1315 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_issue_map_req()
1318 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); in ufshpb_issue_map_req()
1320 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_issue_map_req()
1328 map_req = ufshpb_get_map_req(hpb, srgn); in ufshpb_issue_map_req()
1333 ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last); in ufshpb_issue_map_req()
1335 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_issue_map_req()
1343 ufshpb_put_map_req(hpb, map_req); in ufshpb_issue_map_req()
1345 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_issue_map_req()
1348 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_issue_map_req()
1352 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) in ufshpb_add_region() argument
1355 struct victim_select_info *lru_info = &hpb->lru_info; in ufshpb_add_region()
1359 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_add_region()
1385 if (hpb->is_hcm && in ufshpb_add_region()
1386 rgn->reads < hpb->params.eviction_thld_enter) { in ufshpb_add_region()
1391 victim_rgn = ufshpb_victim_lru_info(hpb); in ufshpb_add_region()
1393 dev_warn(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_add_region()
1395 hpb->is_hcm ? "" : "error"); in ufshpb_add_region()
1400 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_add_region()
1405 if (hpb->is_hcm) { in ufshpb_add_region()
1406 spin_unlock_irqrestore(&hpb->rgn_state_lock, in ufshpb_add_region()
1408 ret = ufshpb_issue_umap_single_req(hpb, in ufshpb_add_region()
1410 spin_lock_irqsave(&hpb->rgn_state_lock, in ufshpb_add_region()
1416 __ufshpb_evict_region(hpb, victim_rgn); in ufshpb_add_region()
1428 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_add_region()
1432 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, in ufshpb_rsp_req_region_update() argument
1452 rgn = hpb->rgn_tbl + rgn_i; in ufshpb_rsp_req_region_update()
1453 if (hpb->is_hcm && in ufshpb_rsp_req_region_update()
1464 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_req_region_update()
1467 spin_lock(&hpb->rsp_list_lock); in ufshpb_rsp_req_region_update()
1468 ufshpb_update_active_info(hpb, rgn_i, srgn_i); in ufshpb_rsp_req_region_update()
1469 spin_unlock(&hpb->rsp_list_lock); in ufshpb_rsp_req_region_update()
1474 spin_lock(&hpb->rgn_state_lock); in ufshpb_rsp_req_region_update()
1477 spin_unlock(&hpb->rgn_state_lock); in ufshpb_rsp_req_region_update()
1480 if (hpb->is_hcm) { in ufshpb_rsp_req_region_update()
1490 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_req_region_update()
1493 spin_lock(&hpb->rsp_list_lock); in ufshpb_rsp_req_region_update()
1494 ufshpb_update_inactive_info(hpb, rgn_i); in ufshpb_rsp_req_region_update()
1495 spin_unlock(&hpb->rsp_list_lock); in ufshpb_rsp_req_region_update()
1497 rgn = hpb->rgn_tbl + rgn_i; in ufshpb_rsp_req_region_update()
1499 spin_lock(&hpb->rgn_state_lock); in ufshpb_rsp_req_region_update()
1507 spin_unlock(&hpb->rgn_state_lock); in ufshpb_rsp_req_region_update()
1511 dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n", in ufshpb_rsp_req_region_update()
1514 if (ufshpb_get_state(hpb) == HPB_PRESENT) in ufshpb_rsp_req_region_update()
1515 queue_work(ufshpb_wq, &hpb->map_work); in ufshpb_rsp_req_region_update()
1518 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb) in ufshpb_dev_reset_handler() argument
1520 struct victim_select_info *lru_info = &hpb->lru_info; in ufshpb_dev_reset_handler()
1524 spin_lock_irqsave(&hpb->rgn_state_lock, flags); in ufshpb_dev_reset_handler()
1529 spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); in ufshpb_dev_reset_handler()
1539 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device); in ufshpb_rsp_upiu() local
1548 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_rsp_upiu()
1550 if (!hpb) in ufshpb_rsp_upiu()
1553 if (rsp_field->lun == hpb->lun) { in ufshpb_rsp_upiu()
1563 if (!hpb) in ufshpb_rsp_upiu()
1566 if (ufshpb_get_state(hpb) == HPB_INIT) in ufshpb_rsp_upiu()
1569 if ((ufshpb_get_state(hpb) != HPB_PRESENT) && in ufshpb_rsp_upiu()
1570 (ufshpb_get_state(hpb) != HPB_SUSPEND)) { in ufshpb_rsp_upiu()
1571 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_upiu()
1582 if (!ufshpb_is_general_lun(hpb->lun)) in ufshpb_rsp_upiu()
1585 ufshpb_kick_map_work(hpb); in ufshpb_rsp_upiu()
1594 hpb->stats.rb_noti_cnt++; in ufshpb_rsp_upiu()
1599 dev_warn(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_upiu()
1602 ufshpb_rsp_req_region_update(hpb, rsp_field); in ufshpb_rsp_upiu()
1605 dev_warn(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_upiu()
1608 if (hpb->is_hcm) { in ufshpb_rsp_upiu()
1621 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_rsp_upiu()
1628 static void ufshpb_add_active_list(struct ufshpb_lu *hpb, in ufshpb_add_active_list() argument
1636 list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn); in ufshpb_add_active_list()
1640 list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn); in ufshpb_add_active_list()
1643 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb, in ufshpb_add_pending_evict_list() argument
1660 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb) in ufshpb_run_active_subregion_list() argument
1667 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1668 while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn, in ufshpb_run_active_subregion_list()
1671 if (ufshpb_get_state(hpb) == HPB_SUSPEND) in ufshpb_run_active_subregion_list()
1675 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1677 rgn = hpb->rgn_tbl + srgn->rgn_idx; in ufshpb_run_active_subregion_list()
1678 ret = ufshpb_add_region(hpb, rgn); in ufshpb_run_active_subregion_list()
1682 ret = ufshpb_issue_map_req(hpb, rgn, srgn); in ufshpb_run_active_subregion_list()
1684 dev_err(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_run_active_subregion_list()
1689 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1691 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1695 dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n", in ufshpb_run_active_subregion_list()
1697 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1698 ufshpb_add_active_list(hpb, rgn, srgn); in ufshpb_run_active_subregion_list()
1699 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_active_subregion_list()
1702 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb) in ufshpb_run_inactive_region_list() argument
1709 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1710 while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn, in ufshpb_run_inactive_region_list()
1713 if (ufshpb_get_state(hpb) == HPB_SUSPEND) in ufshpb_run_inactive_region_list()
1717 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1719 ret = ufshpb_evict_region(hpb, rgn); in ufshpb_run_inactive_region_list()
1721 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1722 ufshpb_add_pending_evict_list(hpb, rgn, &pending_list); in ufshpb_run_inactive_region_list()
1723 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1726 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1729 list_splice(&pending_list, &hpb->lh_inact_rgn); in ufshpb_run_inactive_region_list()
1730 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_run_inactive_region_list()
1735 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, in ufshpb_normalization_work_handler() local
1738 u8 factor = hpb->params.normalization_factor; in ufshpb_normalization_work_handler()
1740 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { in ufshpb_normalization_work_handler()
1741 struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_normalization_work_handler()
1746 for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) { in ufshpb_normalization_work_handler()
1758 spin_lock(&hpb->rsp_list_lock); in ufshpb_normalization_work_handler()
1759 ufshpb_update_inactive_info(hpb, rgn->rgn_idx); in ufshpb_normalization_work_handler()
1760 spin_unlock(&hpb->rsp_list_lock); in ufshpb_normalization_work_handler()
1766 struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work); in ufshpb_map_work_handler() local
1768 if (ufshpb_get_state(hpb) != HPB_PRESENT) { in ufshpb_map_work_handler()
1769 dev_notice(&hpb->sdev_ufs_lu->sdev_dev, in ufshpb_map_work_handler()
1774 ufshpb_run_inactive_region_list(hpb); in ufshpb_map_work_handler()
1775 ufshpb_run_active_subregion_list(hpb); in ufshpb_map_work_handler()
1783 struct ufshpb_lu *hpb, in ufshpb_init_pinned_active_region() argument
1791 srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); in ufshpb_init_pinned_active_region()
1800 list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); in ufshpb_init_pinned_active_region()
1809 ufshpb_put_map_ctx(hpb, srgn->mctx); in ufshpb_init_pinned_active_region()
1814 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb, in ufshpb_init_subregion_tbl() argument
1828 if (unlikely(last && hpb->last_srgn_entries)) in ufshpb_init_subregion_tbl()
1832 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb, in ufshpb_alloc_subregion_tbl() argument
1845 struct ufshpb_lu *hpb, in ufshpb_lu_parameter_init() argument
1853 hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1; in ufshpb_lu_parameter_init()
1856 hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH; in ufshpb_lu_parameter_init()
1858 hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH; in ufshpb_lu_parameter_init()
1861 hpb->cur_read_id = 0; in ufshpb_lu_parameter_init()
1863 hpb->lu_pinned_start = hpb_lu_info->pinned_start; in ufshpb_lu_parameter_init()
1864 hpb->lu_pinned_end = hpb_lu_info->num_pinned ? in ufshpb_lu_parameter_init()
1867 hpb->lru_info.max_lru_active_cnt = in ufshpb_lu_parameter_init()
1873 hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size) in ufshpb_lu_parameter_init()
1879 hpb->entries_per_rgn_shift = ilog2(entries_per_rgn); in ufshpb_lu_parameter_init()
1880 hpb->entries_per_rgn_mask = entries_per_rgn - 1; in ufshpb_lu_parameter_init()
1882 hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE; in ufshpb_lu_parameter_init()
1883 hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn); in ufshpb_lu_parameter_init()
1884 hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1; in ufshpb_lu_parameter_init()
1887 do_div(tmp, hpb->srgn_mem_size); in ufshpb_lu_parameter_init()
1888 hpb->srgns_per_rgn = (int)tmp; in ufshpb_lu_parameter_init()
1890 hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, in ufshpb_lu_parameter_init()
1892 hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, in ufshpb_lu_parameter_init()
1893 (hpb->srgn_mem_size / HPB_ENTRY_SIZE)); in ufshpb_lu_parameter_init()
1894 hpb->last_srgn_entries = hpb_lu_info->num_blocks in ufshpb_lu_parameter_init()
1895 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE); in ufshpb_lu_parameter_init()
1897 hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE); in ufshpb_lu_parameter_init()
1900 hpb->is_hcm = true; in ufshpb_lu_parameter_init()
1903 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) in ufshpb_alloc_region_tbl() argument
1909 rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region), in ufshpb_alloc_region_tbl()
1914 hpb->rgn_tbl = rgn_table; in ufshpb_alloc_region_tbl()
1916 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { in ufshpb_alloc_region_tbl()
1917 int srgn_cnt = hpb->srgns_per_rgn; in ufshpb_alloc_region_tbl()
1929 if (rgn_idx == hpb->rgns_per_lu - 1) { in ufshpb_alloc_region_tbl()
1930 srgn_cnt = ((hpb->srgns_per_lu - 1) % in ufshpb_alloc_region_tbl()
1931 hpb->srgns_per_rgn) + 1; in ufshpb_alloc_region_tbl()
1935 ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt); in ufshpb_alloc_region_tbl()
1938 ufshpb_init_subregion_tbl(hpb, rgn, last_srgn); in ufshpb_alloc_region_tbl()
1940 if (ufshpb_is_pinned_region(hpb, rgn_idx)) { in ufshpb_alloc_region_tbl()
1941 ret = ufshpb_init_pinned_active_region(hba, hpb, rgn); in ufshpb_alloc_region_tbl()
1949 rgn->hpb = hpb; in ufshpb_alloc_region_tbl()
1963 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb, in ufshpb_destroy_subregion_tbl() argument
1972 ufshpb_put_map_ctx(hpb, srgn->mctx); in ufshpb_destroy_subregion_tbl()
1976 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb) in ufshpb_destroy_region_tbl() argument
1980 for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { in ufshpb_destroy_region_tbl()
1983 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_destroy_region_tbl()
1987 ufshpb_destroy_subregion_tbl(hpb, rgn); in ufshpb_destroy_region_tbl()
1993 kvfree(hpb->rgn_tbl); in ufshpb_destroy_region_tbl()
2002 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
2004 if (!hpb) \
2007 return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
2042 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
2044 if (!hpb) \
2047 return sysfs_emit(buf, "%d\n", hpb->params.__name); \
2056 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in requeue_timeout_ms_store() local
2059 if (!hpb) in requeue_timeout_ms_store()
2068 hpb->params.requeue_timeout_ms = val; in requeue_timeout_ms_store()
2080 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in activation_thld_store() local
2083 if (!hpb) in activation_thld_store()
2086 if (!hpb->is_hcm) in activation_thld_store()
2095 hpb->params.activation_thld = val; in activation_thld_store()
2107 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in normalization_factor_store() local
2110 if (!hpb) in normalization_factor_store()
2113 if (!hpb->is_hcm) in normalization_factor_store()
2119 if (val <= 0 || val > ilog2(hpb->entries_per_srgn)) in normalization_factor_store()
2122 hpb->params.normalization_factor = val; in normalization_factor_store()
2134 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in eviction_thld_enter_store() local
2137 if (!hpb) in eviction_thld_enter_store()
2140 if (!hpb->is_hcm) in eviction_thld_enter_store()
2146 if (val <= hpb->params.eviction_thld_exit) in eviction_thld_enter_store()
2149 hpb->params.eviction_thld_enter = val; in eviction_thld_enter_store()
2161 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in eviction_thld_exit_store() local
2164 if (!hpb) in eviction_thld_exit_store()
2167 if (!hpb->is_hcm) in eviction_thld_exit_store()
2173 if (val <= hpb->params.activation_thld) in eviction_thld_exit_store()
2176 hpb->params.eviction_thld_exit = val; in eviction_thld_exit_store()
2188 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in read_timeout_ms_store() local
2191 if (!hpb) in read_timeout_ms_store()
2194 if (!hpb->is_hcm) in read_timeout_ms_store()
2201 if (val < hpb->params.timeout_polling_interval_ms * 2) in read_timeout_ms_store()
2204 hpb->params.read_timeout_ms = val; in read_timeout_ms_store()
2216 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in read_timeout_expiries_store() local
2219 if (!hpb) in read_timeout_expiries_store()
2222 if (!hpb->is_hcm) in read_timeout_expiries_store()
2231 hpb->params.read_timeout_expiries = val; in read_timeout_expiries_store()
2244 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in timeout_polling_interval_ms_store() local
2247 if (!hpb) in timeout_polling_interval_ms_store()
2250 if (!hpb->is_hcm) in timeout_polling_interval_ms_store()
2257 if (val <= 0 || val > hpb->params.read_timeout_ms / 2) in timeout_polling_interval_ms_store()
2260 hpb->params.timeout_polling_interval_ms = val; in timeout_polling_interval_ms_store()
2272 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in inflight_map_req_store() local
2275 if (!hpb) in inflight_map_req_store()
2278 if (!hpb->is_hcm) in inflight_map_req_store()
2284 if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1) in inflight_map_req_store()
2287 hpb->params.inflight_map_req = val; in inflight_map_req_store()
2294 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb) in ufshpb_hcm_param_init() argument
2296 hpb->params.activation_thld = ACTIVATION_THRESHOLD; in ufshpb_hcm_param_init()
2297 hpb->params.normalization_factor = 1; in ufshpb_hcm_param_init()
2298 hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5); in ufshpb_hcm_param_init()
2299 hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4); in ufshpb_hcm_param_init()
2300 hpb->params.read_timeout_ms = READ_TO_MS; in ufshpb_hcm_param_init()
2301 hpb->params.read_timeout_expiries = READ_TO_EXPIRIES; in ufshpb_hcm_param_init()
2302 hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS; in ufshpb_hcm_param_init()
2303 hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT; in ufshpb_hcm_param_init()
2324 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) in ufshpb_pre_req_mempool_init() argument
2327 int qd = hpb->sdev_ufs_lu->queue_depth / 2; in ufshpb_pre_req_mempool_init()
2330 INIT_LIST_HEAD(&hpb->lh_pre_req_free); in ufshpb_pre_req_mempool_init()
2332 hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL); in ufshpb_pre_req_mempool_init()
2333 hpb->throttle_pre_req = qd; in ufshpb_pre_req_mempool_init()
2334 hpb->num_inflight_pre_req = 0; in ufshpb_pre_req_mempool_init()
2336 if (!hpb->pre_req) in ufshpb_pre_req_mempool_init()
2340 pre_req = hpb->pre_req + i; in ufshpb_pre_req_mempool_init()
2354 list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); in ufshpb_pre_req_mempool_init()
2359 list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) { in ufshpb_pre_req_mempool_init()
2365 kfree(hpb->pre_req); in ufshpb_pre_req_mempool_init()
2369 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb) in ufshpb_pre_req_mempool_destroy() argument
2374 for (i = 0; i < hpb->throttle_pre_req; i++) { in ufshpb_pre_req_mempool_destroy()
2375 pre_req = hpb->pre_req + i; in ufshpb_pre_req_mempool_destroy()
2376 bio_put(hpb->pre_req[i].bio); in ufshpb_pre_req_mempool_destroy()
2378 __free_page(hpb->pre_req[i].wb.m_page); in ufshpb_pre_req_mempool_destroy()
2382 kfree(hpb->pre_req); in ufshpb_pre_req_mempool_destroy()
2385 static void ufshpb_stat_init(struct ufshpb_lu *hpb) in ufshpb_stat_init() argument
2387 hpb->stats.hit_cnt = 0; in ufshpb_stat_init()
2388 hpb->stats.miss_cnt = 0; in ufshpb_stat_init()
2389 hpb->stats.rb_noti_cnt = 0; in ufshpb_stat_init()
2390 hpb->stats.rb_active_cnt = 0; in ufshpb_stat_init()
2391 hpb->stats.rb_inactive_cnt = 0; in ufshpb_stat_init()
2392 hpb->stats.map_req_cnt = 0; in ufshpb_stat_init()
2393 hpb->stats.umap_req_cnt = 0; in ufshpb_stat_init()
2396 static void ufshpb_param_init(struct ufshpb_lu *hpb) in ufshpb_param_init() argument
2398 hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS; in ufshpb_param_init()
2399 if (hpb->is_hcm) in ufshpb_param_init()
2400 ufshpb_hcm_param_init(hpb); in ufshpb_param_init()
2403 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) in ufshpb_lu_hpb_init() argument
2407 spin_lock_init(&hpb->rgn_state_lock); in ufshpb_lu_hpb_init()
2408 spin_lock_init(&hpb->rsp_list_lock); in ufshpb_lu_hpb_init()
2409 spin_lock_init(&hpb->param_lock); in ufshpb_lu_hpb_init()
2411 INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn); in ufshpb_lu_hpb_init()
2412 INIT_LIST_HEAD(&hpb->lh_act_srgn); in ufshpb_lu_hpb_init()
2413 INIT_LIST_HEAD(&hpb->lh_inact_rgn); in ufshpb_lu_hpb_init()
2414 INIT_LIST_HEAD(&hpb->list_hpb_lu); in ufshpb_lu_hpb_init()
2416 INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); in ufshpb_lu_hpb_init()
2417 if (hpb->is_hcm) { in ufshpb_lu_hpb_init()
2418 INIT_WORK(&hpb->ufshpb_normalization_work, in ufshpb_lu_hpb_init()
2420 INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work, in ufshpb_lu_hpb_init()
2424 hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", in ufshpb_lu_hpb_init()
2426 if (!hpb->map_req_cache) { in ufshpb_lu_hpb_init()
2428 hpb->lun); in ufshpb_lu_hpb_init()
2432 hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache", in ufshpb_lu_hpb_init()
2433 sizeof(struct page *) * hpb->pages_per_srgn, in ufshpb_lu_hpb_init()
2435 if (!hpb->m_page_cache) { in ufshpb_lu_hpb_init()
2437 hpb->lun); in ufshpb_lu_hpb_init()
2442 ret = ufshpb_pre_req_mempool_init(hpb); in ufshpb_lu_hpb_init()
2445 hpb->lun); in ufshpb_lu_hpb_init()
2449 ret = ufshpb_alloc_region_tbl(hba, hpb); in ufshpb_lu_hpb_init()
2453 ufshpb_stat_init(hpb); in ufshpb_lu_hpb_init()
2454 ufshpb_param_init(hpb); in ufshpb_lu_hpb_init()
2456 if (hpb->is_hcm) { in ufshpb_lu_hpb_init()
2459 poll = hpb->params.timeout_polling_interval_ms; in ufshpb_lu_hpb_init()
2460 schedule_delayed_work(&hpb->ufshpb_read_to_work, in ufshpb_lu_hpb_init()
2467 ufshpb_pre_req_mempool_destroy(hpb); in ufshpb_lu_hpb_init()
2469 kmem_cache_destroy(hpb->m_page_cache); in ufshpb_lu_hpb_init()
2471 kmem_cache_destroy(hpb->map_req_cache); in ufshpb_lu_hpb_init()
2480 struct ufshpb_lu *hpb; in ufshpb_alloc_hpb_lu() local
2483 hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL); in ufshpb_alloc_hpb_lu()
2484 if (!hpb) in ufshpb_alloc_hpb_lu()
2487 hpb->lun = sdev->lun; in ufshpb_alloc_hpb_lu()
2488 hpb->sdev_ufs_lu = sdev; in ufshpb_alloc_hpb_lu()
2490 ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info); in ufshpb_alloc_hpb_lu()
2492 ret = ufshpb_lu_hpb_init(hba, hpb); in ufshpb_alloc_hpb_lu()
2498 sdev->hostdata = hpb; in ufshpb_alloc_hpb_lu()
2499 return hpb; in ufshpb_alloc_hpb_lu()
2502 kfree(hpb); in ufshpb_alloc_hpb_lu()
2506 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb) in ufshpb_discard_rsp_lists() argument
2517 spin_lock_irqsave(&hpb->rsp_list_lock, flags); in ufshpb_discard_rsp_lists()
2518 list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn, in ufshpb_discard_rsp_lists()
2522 list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn, in ufshpb_discard_rsp_lists()
2525 spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); in ufshpb_discard_rsp_lists()
2528 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) in ufshpb_cancel_jobs() argument
2530 if (hpb->is_hcm) { in ufshpb_cancel_jobs()
2531 cancel_delayed_work_sync(&hpb->ufshpb_read_to_work); in ufshpb_cancel_jobs()
2532 cancel_work_sync(&hpb->ufshpb_normalization_work); in ufshpb_cancel_jobs()
2534 cancel_work_sync(&hpb->map_work); in ufshpb_cancel_jobs()
2576 struct ufshpb_lu *hpb; in ufshpb_reset() local
2580 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_reset()
2581 if (!hpb) in ufshpb_reset()
2584 if (ufshpb_get_state(hpb) != HPB_RESET) in ufshpb_reset()
2587 ufshpb_set_state(hpb, HPB_PRESENT); in ufshpb_reset()
2593 struct ufshpb_lu *hpb; in ufshpb_reset_host() local
2597 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_reset_host()
2598 if (!hpb) in ufshpb_reset_host()
2601 if (ufshpb_get_state(hpb) != HPB_PRESENT) in ufshpb_reset_host()
2603 ufshpb_set_state(hpb, HPB_RESET); in ufshpb_reset_host()
2604 ufshpb_cancel_jobs(hpb); in ufshpb_reset_host()
2605 ufshpb_discard_rsp_lists(hpb); in ufshpb_reset_host()
2611 struct ufshpb_lu *hpb; in ufshpb_suspend() local
2615 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_suspend()
2616 if (!hpb) in ufshpb_suspend()
2619 if (ufshpb_get_state(hpb) != HPB_PRESENT) in ufshpb_suspend()
2621 ufshpb_set_state(hpb, HPB_SUSPEND); in ufshpb_suspend()
2622 ufshpb_cancel_jobs(hpb); in ufshpb_suspend()
2628 struct ufshpb_lu *hpb; in ufshpb_resume() local
2632 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_resume()
2633 if (!hpb) in ufshpb_resume()
2636 if ((ufshpb_get_state(hpb) != HPB_PRESENT) && in ufshpb_resume()
2637 (ufshpb_get_state(hpb) != HPB_SUSPEND)) in ufshpb_resume()
2639 ufshpb_set_state(hpb, HPB_PRESENT); in ufshpb_resume()
2640 ufshpb_kick_map_work(hpb); in ufshpb_resume()
2641 if (hpb->is_hcm) { in ufshpb_resume()
2643 hpb->params.timeout_polling_interval_ms; in ufshpb_resume()
2645 schedule_delayed_work(&hpb->ufshpb_read_to_work, in ufshpb_resume()
2700 struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); in ufshpb_destroy_lu() local
2702 if (!hpb) in ufshpb_destroy_lu()
2705 ufshpb_set_state(hpb, HPB_FAILED); in ufshpb_destroy_lu()
2707 sdev = hpb->sdev_ufs_lu; in ufshpb_destroy_lu()
2710 ufshpb_cancel_jobs(hpb); in ufshpb_destroy_lu()
2712 ufshpb_pre_req_mempool_destroy(hpb); in ufshpb_destroy_lu()
2713 ufshpb_destroy_region_tbl(hpb); in ufshpb_destroy_lu()
2715 kmem_cache_destroy(hpb->map_req_cache); in ufshpb_destroy_lu()
2716 kmem_cache_destroy(hpb->m_page_cache); in ufshpb_destroy_lu()
2718 list_del_init(&hpb->list_hpb_lu); in ufshpb_destroy_lu()
2720 kfree(hpb); in ufshpb_destroy_lu()
2726 struct ufshpb_lu *hpb; in ufshpb_hpb_lu_prepared() local
2744 hpb = ufshpb_get_hpb_data(sdev); in ufshpb_hpb_lu_prepared()
2745 if (!hpb) in ufshpb_hpb_lu_prepared()
2749 ufshpb_set_state(hpb, HPB_PRESENT); in ufshpb_hpb_lu_prepared()
2750 if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0) in ufshpb_hpb_lu_prepared()
2751 queue_work(ufshpb_wq, &hpb->map_work); in ufshpb_hpb_lu_prepared()
2752 if (!hpb->is_hcm) in ufshpb_hpb_lu_prepared()
2753 ufshpb_issue_umap_all_req(hpb); in ufshpb_hpb_lu_prepared()
2755 dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); in ufshpb_hpb_lu_prepared()
2766 struct ufshpb_lu *hpb; in ufshpb_init_hpb_lu() local
2778 hpb = ufshpb_alloc_hpb_lu(hba, sdev, ufs_hba_to_hpb(hba), &hpb_lu_info); in ufshpb_init_hpb_lu()
2779 if (!hpb) in ufshpb_init_hpb_lu()
2783 hpb->srgns_per_rgn * hpb->pages_per_srgn; in ufshpb_init_hpb_lu()