xref: /OK3568_Linux_fs/kernel/drivers/scsi/ufs/ufshpb.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Universal Flash Storage Host Performance Booster
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors:
8*4882a593Smuzhiyun  *	Yongmyung Lee <ymhungry.lee@samsung.com>
9*4882a593Smuzhiyun  *	Jinyoung Choi <j-young.choi@samsung.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <asm/unaligned.h>
13*4882a593Smuzhiyun #include <linux/async.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "ufshcd.h"
16*4882a593Smuzhiyun #include "ufshcd-add-info.h"
17*4882a593Smuzhiyun #include "ufshpb.h"
18*4882a593Smuzhiyun #include "../sd.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
21*4882a593Smuzhiyun #define READ_TO_MS 1000
22*4882a593Smuzhiyun #define READ_TO_EXPIRIES 100
23*4882a593Smuzhiyun #define POLLING_INTERVAL_MS 200
24*4882a593Smuzhiyun #define THROTTLE_MAP_REQ_DEFAULT 1
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* memory management */
27*4882a593Smuzhiyun static struct kmem_cache *ufshpb_mctx_cache;
28*4882a593Smuzhiyun static mempool_t *ufshpb_mctx_pool;
29*4882a593Smuzhiyun static mempool_t *ufshpb_page_pool;
30*4882a593Smuzhiyun /* A cache size of 2MB can cache ppn in the 1GB range. */
31*4882a593Smuzhiyun static unsigned int ufshpb_host_map_kbytes = 2048;
32*4882a593Smuzhiyun static int tot_active_srgn_pages;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static struct workqueue_struct *ufshpb_wq;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
37*4882a593Smuzhiyun 				      int srgn_idx);
38*4882a593Smuzhiyun 
ufs_hba_to_hpb(struct ufs_hba * hba)39*4882a593Smuzhiyun static inline struct ufshpb_dev_info *ufs_hba_to_hpb(struct ufs_hba *hba)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	return &ufs_hba_add_info(hba)->hpb_dev;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
ufshpb_is_allowed(struct ufs_hba * hba)44*4882a593Smuzhiyun bool ufshpb_is_allowed(struct ufs_hba *hba)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	return !(ufs_hba_to_hpb(hba)->hpb_disabled);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* HPB version 1.0 is called as legacy version. */
ufshpb_is_legacy(struct ufs_hba * hba)50*4882a593Smuzhiyun bool ufshpb_is_legacy(struct ufs_hba *hba)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	return ufs_hba_to_hpb(hba)->is_legacy;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
ufshpb_get_hpb_data(struct scsi_device * sdev)55*4882a593Smuzhiyun static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	return sdev->hostdata;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
ufshpb_get_state(struct ufshpb_lu * hpb)60*4882a593Smuzhiyun static int ufshpb_get_state(struct ufshpb_lu *hpb)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	return atomic_read(&hpb->hpb_state);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
ufshpb_set_state(struct ufshpb_lu * hpb,int state)65*4882a593Smuzhiyun static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	atomic_set(&hpb->hpb_state, state);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
ufshpb_is_valid_srgn(struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)70*4882a593Smuzhiyun static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
71*4882a593Smuzhiyun 				struct ufshpb_subregion *srgn)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	return rgn->rgn_state != HPB_RGN_INACTIVE &&
74*4882a593Smuzhiyun 		srgn->srgn_state == HPB_SRGN_VALID;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
ufshpb_is_read_cmd(struct scsi_cmnd * cmd)77*4882a593Smuzhiyun static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	return req_op(cmd->request) == REQ_OP_READ;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
ufshpb_is_write_or_discard(struct scsi_cmnd * cmd)82*4882a593Smuzhiyun static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	return op_is_write(req_op(cmd->request)) ||
85*4882a593Smuzhiyun 	       op_is_discard(req_op(cmd->request));
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
ufshpb_is_supported_chunk(struct ufshpb_lu * hpb,int transfer_len)88*4882a593Smuzhiyun static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	return transfer_len <= hpb->pre_req_max_tr_len;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
95*4882a593Smuzhiyun  * default. It is possible to change range of transfer_len through sysfs.
96*4882a593Smuzhiyun  */
ufshpb_is_required_wb(struct ufshpb_lu * hpb,int len)97*4882a593Smuzhiyun static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	return len > hpb->pre_req_min_tr_len &&
100*4882a593Smuzhiyun 	       len <= hpb->pre_req_max_tr_len;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
ufshpb_is_general_lun(int lun)103*4882a593Smuzhiyun static bool ufshpb_is_general_lun(int lun)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
ufshpb_is_pinned_region(struct ufshpb_lu * hpb,int rgn_idx)108*4882a593Smuzhiyun static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	if (hpb->lu_pinned_end != PINNED_NOT_SET &&
111*4882a593Smuzhiyun 	    rgn_idx >= hpb->lu_pinned_start &&
112*4882a593Smuzhiyun 	    rgn_idx <= hpb->lu_pinned_end)
113*4882a593Smuzhiyun 		return true;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return false;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
ufshpb_kick_map_work(struct ufshpb_lu * hpb)118*4882a593Smuzhiyun static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	bool ret = false;
121*4882a593Smuzhiyun 	unsigned long flags;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (ufshpb_get_state(hpb) != HPB_PRESENT)
124*4882a593Smuzhiyun 		return;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
127*4882a593Smuzhiyun 	if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
128*4882a593Smuzhiyun 		ret = true;
129*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (ret)
132*4882a593Smuzhiyun 		queue_work(ufshpb_wq, &hpb->map_work);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
ufshpb_is_hpb_rsp_valid(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,struct utp_hpb_rsp * rsp_field)135*4882a593Smuzhiyun static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
136*4882a593Smuzhiyun 				    struct ufshcd_lrb *lrbp,
137*4882a593Smuzhiyun 				    struct utp_hpb_rsp *rsp_field)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	/* Check HPB_UPDATE_ALERT */
140*4882a593Smuzhiyun 	if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
141*4882a593Smuzhiyun 	      UPIU_HEADER_DWORD(0, 2, 0, 0)))
142*4882a593Smuzhiyun 		return false;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
145*4882a593Smuzhiyun 	    rsp_field->desc_type != DEV_DES_TYPE ||
146*4882a593Smuzhiyun 	    rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
147*4882a593Smuzhiyun 	    rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
148*4882a593Smuzhiyun 	    rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
149*4882a593Smuzhiyun 	    rsp_field->hpb_op == HPB_RSP_NONE ||
150*4882a593Smuzhiyun 	    (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
151*4882a593Smuzhiyun 	     !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
152*4882a593Smuzhiyun 		return false;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (!ufshpb_is_general_lun(rsp_field->lun)) {
155*4882a593Smuzhiyun 		dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
156*4882a593Smuzhiyun 			 lrbp->lun);
157*4882a593Smuzhiyun 		return false;
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	return true;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
ufshpb_iterate_rgn(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx,int srgn_offset,int cnt,bool set_dirty)163*4882a593Smuzhiyun static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
164*4882a593Smuzhiyun 			       int srgn_offset, int cnt, bool set_dirty)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
167*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn, *prev_srgn = NULL;
168*4882a593Smuzhiyun 	int set_bit_len;
169*4882a593Smuzhiyun 	int bitmap_len;
170*4882a593Smuzhiyun 	unsigned long flags;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun next_srgn:
173*4882a593Smuzhiyun 	rgn = hpb->rgn_tbl + rgn_idx;
174*4882a593Smuzhiyun 	srgn = rgn->srgn_tbl + srgn_idx;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (likely(!srgn->is_last))
177*4882a593Smuzhiyun 		bitmap_len = hpb->entries_per_srgn;
178*4882a593Smuzhiyun 	else
179*4882a593Smuzhiyun 		bitmap_len = hpb->last_srgn_entries;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	if ((srgn_offset + cnt) > bitmap_len)
182*4882a593Smuzhiyun 		set_bit_len = bitmap_len - srgn_offset;
183*4882a593Smuzhiyun 	else
184*4882a593Smuzhiyun 		set_bit_len = cnt;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
187*4882a593Smuzhiyun 	if (rgn->rgn_state != HPB_RGN_INACTIVE) {
188*4882a593Smuzhiyun 		if (set_dirty) {
189*4882a593Smuzhiyun 		    if (srgn->srgn_state == HPB_SRGN_VALID)
190*4882a593Smuzhiyun 			    bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
191*4882a593Smuzhiyun 				       set_bit_len);
192*4882a593Smuzhiyun 		} else if (hpb->is_hcm) {
193*4882a593Smuzhiyun 			/* rewind the read timer for lru regions */
194*4882a593Smuzhiyun 			rgn->read_timeout = ktime_add_ms(ktime_get(),
195*4882a593Smuzhiyun 					rgn->hpb->params.read_timeout_ms);
196*4882a593Smuzhiyun 			rgn->read_timeout_expiries =
197*4882a593Smuzhiyun 				rgn->hpb->params.read_timeout_expiries;
198*4882a593Smuzhiyun 		}
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (hpb->is_hcm && prev_srgn != srgn) {
203*4882a593Smuzhiyun 		bool activate = false;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 		spin_lock(&rgn->rgn_lock);
206*4882a593Smuzhiyun 		if (set_dirty) {
207*4882a593Smuzhiyun 			rgn->reads -= srgn->reads;
208*4882a593Smuzhiyun 			srgn->reads = 0;
209*4882a593Smuzhiyun 			set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
210*4882a593Smuzhiyun 		} else {
211*4882a593Smuzhiyun 			srgn->reads++;
212*4882a593Smuzhiyun 			rgn->reads++;
213*4882a593Smuzhiyun 			if (srgn->reads == hpb->params.activation_thld)
214*4882a593Smuzhiyun 				activate = true;
215*4882a593Smuzhiyun 		}
216*4882a593Smuzhiyun 		spin_unlock(&rgn->rgn_lock);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		if (activate ||
219*4882a593Smuzhiyun 		    test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
220*4882a593Smuzhiyun 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
221*4882a593Smuzhiyun 			ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
222*4882a593Smuzhiyun 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
223*4882a593Smuzhiyun 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
224*4882a593Smuzhiyun 				"activate region %d-%d\n", rgn_idx, srgn_idx);
225*4882a593Smuzhiyun 		}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		prev_srgn = srgn;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	srgn_offset = 0;
231*4882a593Smuzhiyun 	if (++srgn_idx == hpb->srgns_per_rgn) {
232*4882a593Smuzhiyun 		srgn_idx = 0;
233*4882a593Smuzhiyun 		rgn_idx++;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	cnt -= set_bit_len;
237*4882a593Smuzhiyun 	if (cnt > 0)
238*4882a593Smuzhiyun 		goto next_srgn;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
ufshpb_test_ppn_dirty(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx,int srgn_offset,int cnt)241*4882a593Smuzhiyun static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
242*4882a593Smuzhiyun 				  int srgn_idx, int srgn_offset, int cnt)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
245*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
246*4882a593Smuzhiyun 	int bitmap_len;
247*4882a593Smuzhiyun 	int bit_len;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun next_srgn:
250*4882a593Smuzhiyun 	rgn = hpb->rgn_tbl + rgn_idx;
251*4882a593Smuzhiyun 	srgn = rgn->srgn_tbl + srgn_idx;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (likely(!srgn->is_last))
254*4882a593Smuzhiyun 		bitmap_len = hpb->entries_per_srgn;
255*4882a593Smuzhiyun 	else
256*4882a593Smuzhiyun 		bitmap_len = hpb->last_srgn_entries;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if (!ufshpb_is_valid_srgn(rgn, srgn))
259*4882a593Smuzhiyun 		return true;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	/*
262*4882a593Smuzhiyun 	 * If the region state is active, mctx must be allocated.
263*4882a593Smuzhiyun 	 * In this case, check whether the region is evicted or
264*4882a593Smuzhiyun 	 * mctx allcation fail.
265*4882a593Smuzhiyun 	 */
266*4882a593Smuzhiyun 	if (unlikely(!srgn->mctx)) {
267*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
268*4882a593Smuzhiyun 			"no mctx in region %d subregion %d.\n",
269*4882a593Smuzhiyun 			srgn->rgn_idx, srgn->srgn_idx);
270*4882a593Smuzhiyun 		return true;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if ((srgn_offset + cnt) > bitmap_len)
274*4882a593Smuzhiyun 		bit_len = bitmap_len - srgn_offset;
275*4882a593Smuzhiyun 	else
276*4882a593Smuzhiyun 		bit_len = cnt;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
279*4882a593Smuzhiyun 			  srgn_offset) < bit_len + srgn_offset)
280*4882a593Smuzhiyun 		return true;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	srgn_offset = 0;
283*4882a593Smuzhiyun 	if (++srgn_idx == hpb->srgns_per_rgn) {
284*4882a593Smuzhiyun 		srgn_idx = 0;
285*4882a593Smuzhiyun 		rgn_idx++;
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	cnt -= bit_len;
289*4882a593Smuzhiyun 	if (cnt > 0)
290*4882a593Smuzhiyun 		goto next_srgn;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	return false;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
is_rgn_dirty(struct ufshpb_region * rgn)295*4882a593Smuzhiyun static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
ufshpb_fill_ppn_from_page(struct ufshpb_lu * hpb,struct ufshpb_map_ctx * mctx,int pos,int len,__be64 * ppn_buf)300*4882a593Smuzhiyun static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
301*4882a593Smuzhiyun 				     struct ufshpb_map_ctx *mctx, int pos,
302*4882a593Smuzhiyun 				     int len, __be64 *ppn_buf)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct page *page;
305*4882a593Smuzhiyun 	int index, offset;
306*4882a593Smuzhiyun 	int copied;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
309*4882a593Smuzhiyun 	offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
312*4882a593Smuzhiyun 		copied = len;
313*4882a593Smuzhiyun 	else
314*4882a593Smuzhiyun 		copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	page = mctx->m_page[index];
317*4882a593Smuzhiyun 	if (unlikely(!page)) {
318*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
319*4882a593Smuzhiyun 			"error. cannot find page in mctx\n");
320*4882a593Smuzhiyun 		return -ENOMEM;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
324*4882a593Smuzhiyun 	       copied * HPB_ENTRY_SIZE);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	return copied;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun static void
ufshpb_get_pos_from_lpn(struct ufshpb_lu * hpb,unsigned long lpn,int * rgn_idx,int * srgn_idx,int * offset)330*4882a593Smuzhiyun ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
331*4882a593Smuzhiyun 			int *srgn_idx, int *offset)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	int rgn_offset;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	*rgn_idx = lpn >> hpb->entries_per_rgn_shift;
336*4882a593Smuzhiyun 	rgn_offset = lpn & hpb->entries_per_rgn_mask;
337*4882a593Smuzhiyun 	*srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
338*4882a593Smuzhiyun 	*offset = rgn_offset & hpb->entries_per_srgn_mask;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun static void
ufshpb_set_hpb_read_to_upiu(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshcd_lrb * lrbp,u32 lpn,__be64 ppn,u8 transfer_len,int read_id)342*4882a593Smuzhiyun ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
343*4882a593Smuzhiyun 			    struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
344*4882a593Smuzhiyun 			    u8 transfer_len, int read_id)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	unsigned char *cdb = lrbp->cmd->cmnd;
347*4882a593Smuzhiyun 	__be64 ppn_tmp = ppn;
348*4882a593Smuzhiyun 	cdb[0] = UFSHPB_READ;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
351*4882a593Smuzhiyun 		ppn_tmp = swab64(ppn);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* ppn value is stored as big-endian in the host memory */
354*4882a593Smuzhiyun 	memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
355*4882a593Smuzhiyun 	cdb[14] = transfer_len;
356*4882a593Smuzhiyun 	cdb[15] = read_id;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	lrbp->cmd->cmd_len = UFS_CDB_SIZE;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
ufshpb_set_write_buf_cmd(unsigned char * cdb,unsigned long lpn,unsigned int len,int read_id)361*4882a593Smuzhiyun static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
362*4882a593Smuzhiyun 					    unsigned long lpn, unsigned int len,
363*4882a593Smuzhiyun 					    int read_id)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	cdb[0] = UFSHPB_WRITE_BUFFER;
366*4882a593Smuzhiyun 	cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	put_unaligned_be32(lpn, &cdb[2]);
369*4882a593Smuzhiyun 	cdb[6] = read_id;
370*4882a593Smuzhiyun 	put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	cdb[9] = 0x00;	/* Control = 0x00 */
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
ufshpb_get_pre_req(struct ufshpb_lu * hpb)375*4882a593Smuzhiyun static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct ufshpb_req *pre_req;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
380*4882a593Smuzhiyun 		dev_info(&hpb->sdev_ufs_lu->sdev_dev,
381*4882a593Smuzhiyun 			 "pre_req throttle. inflight %d throttle %d",
382*4882a593Smuzhiyun 			 hpb->num_inflight_pre_req, hpb->throttle_pre_req);
383*4882a593Smuzhiyun 		return NULL;
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
387*4882a593Smuzhiyun 					   struct ufshpb_req, list_req);
388*4882a593Smuzhiyun 	if (!pre_req) {
389*4882a593Smuzhiyun 		dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
390*4882a593Smuzhiyun 		return NULL;
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	list_del_init(&pre_req->list_req);
394*4882a593Smuzhiyun 	hpb->num_inflight_pre_req++;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	return pre_req;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
ufshpb_put_pre_req(struct ufshpb_lu * hpb,struct ufshpb_req * pre_req)399*4882a593Smuzhiyun static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
400*4882a593Smuzhiyun 				      struct ufshpb_req *pre_req)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	pre_req->req = NULL;
403*4882a593Smuzhiyun 	bio_reset(pre_req->bio);
404*4882a593Smuzhiyun 	list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
405*4882a593Smuzhiyun 	hpb->num_inflight_pre_req--;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
ufshpb_pre_req_compl_fn(struct request * req,blk_status_t error)408*4882a593Smuzhiyun static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
411*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = pre_req->hpb;
412*4882a593Smuzhiyun 	unsigned long flags;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	if (error) {
415*4882a593Smuzhiyun 		struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
416*4882a593Smuzhiyun 		struct scsi_sense_hdr sshdr;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
419*4882a593Smuzhiyun 		scsi_command_normalize_sense(cmd, &sshdr);
420*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
421*4882a593Smuzhiyun 			"code %x sense_key %x asc %x ascq %x",
422*4882a593Smuzhiyun 			sshdr.response_code,
423*4882a593Smuzhiyun 			sshdr.sense_key, sshdr.asc, sshdr.ascq);
424*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
425*4882a593Smuzhiyun 			"byte4 %x byte5 %x byte6 %x additional_len %x",
426*4882a593Smuzhiyun 			sshdr.byte4, sshdr.byte5,
427*4882a593Smuzhiyun 			sshdr.byte6, sshdr.additional_length);
428*4882a593Smuzhiyun 	}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	blk_mq_free_request(req);
431*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
432*4882a593Smuzhiyun 	ufshpb_put_pre_req(pre_req->hpb, pre_req);
433*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
ufshpb_prep_entry(struct ufshpb_req * pre_req,struct page * page)436*4882a593Smuzhiyun static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = pre_req->hpb;
439*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
440*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
441*4882a593Smuzhiyun 	__be64 *addr;
442*4882a593Smuzhiyun 	int offset = 0;
443*4882a593Smuzhiyun 	int copied;
444*4882a593Smuzhiyun 	unsigned long lpn = pre_req->wb.lpn;
445*4882a593Smuzhiyun 	int rgn_idx, srgn_idx, srgn_offset;
446*4882a593Smuzhiyun 	unsigned long flags;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	addr = page_address(page);
449*4882a593Smuzhiyun 	ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun next_offset:
454*4882a593Smuzhiyun 	rgn = hpb->rgn_tbl + rgn_idx;
455*4882a593Smuzhiyun 	srgn = rgn->srgn_tbl + srgn_idx;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (!ufshpb_is_valid_srgn(rgn, srgn))
458*4882a593Smuzhiyun 		goto mctx_error;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	if (!srgn->mctx)
461*4882a593Smuzhiyun 		goto mctx_error;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
464*4882a593Smuzhiyun 					   pre_req->wb.len - offset,
465*4882a593Smuzhiyun 					   &addr[offset]);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (copied < 0)
468*4882a593Smuzhiyun 		goto mctx_error;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	offset += copied;
471*4882a593Smuzhiyun 	srgn_offset += copied;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (srgn_offset == hpb->entries_per_srgn) {
474*4882a593Smuzhiyun 		srgn_offset = 0;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		if (++srgn_idx == hpb->srgns_per_rgn) {
477*4882a593Smuzhiyun 			srgn_idx = 0;
478*4882a593Smuzhiyun 			rgn_idx++;
479*4882a593Smuzhiyun 		}
480*4882a593Smuzhiyun 	}
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	if (offset < pre_req->wb.len)
483*4882a593Smuzhiyun 		goto next_offset;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
486*4882a593Smuzhiyun 	return 0;
487*4882a593Smuzhiyun mctx_error:
488*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
489*4882a593Smuzhiyun 	return -ENOMEM;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
ufshpb_pre_req_add_bio_page(struct ufshpb_lu * hpb,struct request_queue * q,struct ufshpb_req * pre_req)492*4882a593Smuzhiyun static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
493*4882a593Smuzhiyun 				       struct request_queue *q,
494*4882a593Smuzhiyun 				       struct ufshpb_req *pre_req)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	struct page *page = pre_req->wb.m_page;
497*4882a593Smuzhiyun 	struct bio *bio = pre_req->bio;
498*4882a593Smuzhiyun 	int entries_bytes, ret;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if (!page)
501*4882a593Smuzhiyun 		return -ENOMEM;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	if (ufshpb_prep_entry(pre_req, page))
504*4882a593Smuzhiyun 		return -ENOMEM;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	entries_bytes = pre_req->wb.len * sizeof(__be64);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
509*4882a593Smuzhiyun 	if (ret != entries_bytes) {
510*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
511*4882a593Smuzhiyun 			"bio_add_pc_page fail: %d", ret);
512*4882a593Smuzhiyun 		return -ENOMEM;
513*4882a593Smuzhiyun 	}
514*4882a593Smuzhiyun 	return 0;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
ufshpb_get_read_id(struct ufshpb_lu * hpb)517*4882a593Smuzhiyun static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
520*4882a593Smuzhiyun 		hpb->cur_read_id = 1;
521*4882a593Smuzhiyun 	return hpb->cur_read_id;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
ufshpb_execute_pre_req(struct ufshpb_lu * hpb,struct scsi_cmnd * cmd,struct ufshpb_req * pre_req,int read_id)524*4882a593Smuzhiyun static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
525*4882a593Smuzhiyun 				  struct ufshpb_req *pre_req, int read_id)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	struct scsi_device *sdev = cmd->device;
528*4882a593Smuzhiyun 	struct request_queue *q = sdev->request_queue;
529*4882a593Smuzhiyun 	struct request *req;
530*4882a593Smuzhiyun 	struct scsi_request *rq;
531*4882a593Smuzhiyun 	struct bio *bio = pre_req->bio;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	pre_req->hpb = hpb;
534*4882a593Smuzhiyun 	pre_req->wb.lpn = sectors_to_logical(cmd->device,
535*4882a593Smuzhiyun 					     blk_rq_pos(cmd->request));
536*4882a593Smuzhiyun 	pre_req->wb.len = sectors_to_logical(cmd->device,
537*4882a593Smuzhiyun 					     blk_rq_sectors(cmd->request));
538*4882a593Smuzhiyun 	if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
539*4882a593Smuzhiyun 		return -ENOMEM;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	req = pre_req->req;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* 1. request setup */
544*4882a593Smuzhiyun 	blk_rq_append_bio(req, &bio);
545*4882a593Smuzhiyun 	req->rq_disk = NULL;
546*4882a593Smuzhiyun 	req->end_io_data = (void *)pre_req;
547*4882a593Smuzhiyun 	req->end_io = ufshpb_pre_req_compl_fn;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/* 2. scsi_request setup */
550*4882a593Smuzhiyun 	rq = scsi_req(req);
551*4882a593Smuzhiyun 	rq->retries = 1;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
554*4882a593Smuzhiyun 				 read_id);
555*4882a593Smuzhiyun 	rq->cmd_len = scsi_command_size(rq->cmd);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
558*4882a593Smuzhiyun 		return -EAGAIN;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	hpb->stats.pre_req_cnt++;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	return 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
ufshpb_issue_pre_req(struct ufshpb_lu * hpb,struct scsi_cmnd * cmd,int * read_id)565*4882a593Smuzhiyun static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
566*4882a593Smuzhiyun 				int *read_id)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun 	struct ufshpb_req *pre_req;
569*4882a593Smuzhiyun 	struct request *req = NULL;
570*4882a593Smuzhiyun 	unsigned long flags;
571*4882a593Smuzhiyun 	int _read_id;
572*4882a593Smuzhiyun 	int ret = 0;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	req = blk_get_request(cmd->device->request_queue,
575*4882a593Smuzhiyun 			      REQ_OP_SCSI_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
576*4882a593Smuzhiyun 	if (IS_ERR(req))
577*4882a593Smuzhiyun 		return -EAGAIN;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
580*4882a593Smuzhiyun 	pre_req = ufshpb_get_pre_req(hpb);
581*4882a593Smuzhiyun 	if (!pre_req) {
582*4882a593Smuzhiyun 		ret = -EAGAIN;
583*4882a593Smuzhiyun 		goto unlock_out;
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 	_read_id = ufshpb_get_read_id(hpb);
586*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	pre_req->req = req;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
591*4882a593Smuzhiyun 	if (ret)
592*4882a593Smuzhiyun 		goto free_pre_req;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	*read_id = _read_id;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	return ret;
597*4882a593Smuzhiyun free_pre_req:
598*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
599*4882a593Smuzhiyun 	ufshpb_put_pre_req(hpb, pre_req);
600*4882a593Smuzhiyun unlock_out:
601*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
602*4882a593Smuzhiyun 	blk_put_request(req);
603*4882a593Smuzhiyun 	return ret;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun /*
607*4882a593Smuzhiyun  * This function will set up HPB read command using host-side L2P map data.
608*4882a593Smuzhiyun  */
ufshpb_prep(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)609*4882a593Smuzhiyun int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	struct ufshpb_lu *hpb;
612*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
613*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
614*4882a593Smuzhiyun 	struct scsi_cmnd *cmd = lrbp->cmd;
615*4882a593Smuzhiyun 	u32 lpn;
616*4882a593Smuzhiyun 	__be64 ppn;
617*4882a593Smuzhiyun 	unsigned long flags;
618*4882a593Smuzhiyun 	int transfer_len, rgn_idx, srgn_idx, srgn_offset;
619*4882a593Smuzhiyun 	int read_id = 0;
620*4882a593Smuzhiyun 	int err = 0;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	hpb = ufshpb_get_hpb_data(cmd->device);
623*4882a593Smuzhiyun 	if (!hpb)
624*4882a593Smuzhiyun 		return -ENODEV;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (ufshpb_get_state(hpb) == HPB_INIT)
627*4882a593Smuzhiyun 		return -ENODEV;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
630*4882a593Smuzhiyun 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
631*4882a593Smuzhiyun 			   "%s: ufshpb state is not PRESENT", __func__);
632*4882a593Smuzhiyun 		return -ENODEV;
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	if (blk_rq_is_scsi(cmd->request) ||
636*4882a593Smuzhiyun 	    (!ufshpb_is_write_or_discard(cmd) &&
637*4882a593Smuzhiyun 	     !ufshpb_is_read_cmd(cmd)))
638*4882a593Smuzhiyun 		return 0;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	transfer_len = sectors_to_logical(cmd->device,
641*4882a593Smuzhiyun 					  blk_rq_sectors(cmd->request));
642*4882a593Smuzhiyun 	if (unlikely(!transfer_len))
643*4882a593Smuzhiyun 		return 0;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	lpn = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
646*4882a593Smuzhiyun 	ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
647*4882a593Smuzhiyun 	rgn = hpb->rgn_tbl + rgn_idx;
648*4882a593Smuzhiyun 	srgn = rgn->srgn_tbl + srgn_idx;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/* If command type is WRITE or DISCARD, set bitmap as drity */
651*4882a593Smuzhiyun 	if (ufshpb_is_write_or_discard(cmd)) {
652*4882a593Smuzhiyun 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
653*4882a593Smuzhiyun 				   transfer_len, true);
654*4882a593Smuzhiyun 		return 0;
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	if (!ufshpb_is_supported_chunk(hpb, transfer_len))
658*4882a593Smuzhiyun 		return 0;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	if (hpb->is_hcm) {
663*4882a593Smuzhiyun 		/*
664*4882a593Smuzhiyun 		 * in host control mode, reads are the main source for
665*4882a593Smuzhiyun 		 * activation trials.
666*4882a593Smuzhiyun 		 */
667*4882a593Smuzhiyun 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
668*4882a593Smuzhiyun 				   transfer_len, false);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 		/* keep those counters normalized */
671*4882a593Smuzhiyun 		if (rgn->reads > hpb->entries_per_srgn)
672*4882a593Smuzhiyun 			schedule_work(&hpb->ufshpb_normalization_work);
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
676*4882a593Smuzhiyun 	if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
677*4882a593Smuzhiyun 				   transfer_len)) {
678*4882a593Smuzhiyun 		hpb->stats.miss_cnt++;
679*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
680*4882a593Smuzhiyun 		return 0;
681*4882a593Smuzhiyun 	}
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
684*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
685*4882a593Smuzhiyun 	if (unlikely(err < 0)) {
686*4882a593Smuzhiyun 		/*
687*4882a593Smuzhiyun 		 * In this case, the region state is active,
688*4882a593Smuzhiyun 		 * but the ppn table is not allocated.
689*4882a593Smuzhiyun 		 * Make sure that ppn table must be allocated on
690*4882a593Smuzhiyun 		 * active state.
691*4882a593Smuzhiyun 		 */
692*4882a593Smuzhiyun 		dev_err(hba->dev, "get ppn failed. err %d\n", err);
693*4882a593Smuzhiyun 		return err;
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	if (!ufshpb_is_legacy(hba) &&
697*4882a593Smuzhiyun 	    ufshpb_is_required_wb(hpb, transfer_len)) {
698*4882a593Smuzhiyun 		err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
699*4882a593Smuzhiyun 		if (err) {
700*4882a593Smuzhiyun 			unsigned long timeout;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 			timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
703*4882a593Smuzhiyun 				  hpb->params.requeue_timeout_ms);
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 			if (time_before(jiffies, timeout))
706*4882a593Smuzhiyun 				return -EAGAIN;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 			hpb->stats.miss_cnt++;
709*4882a593Smuzhiyun 			return 0;
710*4882a593Smuzhiyun 		}
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
714*4882a593Smuzhiyun 				    read_id);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	hpb->stats.hit_cnt++;
717*4882a593Smuzhiyun 	return 0;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
ufshpb_get_req(struct ufshpb_lu * hpb,int rgn_idx,enum req_opf dir,bool atomic)720*4882a593Smuzhiyun static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
721*4882a593Smuzhiyun 					 int rgn_idx, enum req_opf dir,
722*4882a593Smuzhiyun 					 bool atomic)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun 	struct ufshpb_req *rq;
725*4882a593Smuzhiyun 	struct request *req;
726*4882a593Smuzhiyun 	int retries = HPB_MAP_REQ_RETRIES;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
729*4882a593Smuzhiyun 	if (!rq)
730*4882a593Smuzhiyun 		return NULL;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun retry:
733*4882a593Smuzhiyun 	req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
734*4882a593Smuzhiyun 			      BLK_MQ_REQ_NOWAIT);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
737*4882a593Smuzhiyun 		usleep_range(3000, 3100);
738*4882a593Smuzhiyun 		goto retry;
739*4882a593Smuzhiyun 	}
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	if (IS_ERR(req))
742*4882a593Smuzhiyun 		goto free_rq;
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	rq->hpb = hpb;
745*4882a593Smuzhiyun 	rq->req = req;
746*4882a593Smuzhiyun 	rq->rb.rgn_idx = rgn_idx;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	return rq;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun free_rq:
751*4882a593Smuzhiyun 	kmem_cache_free(hpb->map_req_cache, rq);
752*4882a593Smuzhiyun 	return NULL;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun 
ufshpb_put_req(struct ufshpb_lu * hpb,struct ufshpb_req * rq)755*4882a593Smuzhiyun static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun 	blk_put_request(rq->req);
758*4882a593Smuzhiyun 	kmem_cache_free(hpb->map_req_cache, rq);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun 
ufshpb_get_map_req(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)761*4882a593Smuzhiyun static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
762*4882a593Smuzhiyun 					     struct ufshpb_subregion *srgn)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct ufshpb_req *map_req;
765*4882a593Smuzhiyun 	struct bio *bio;
766*4882a593Smuzhiyun 	unsigned long flags;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	if (hpb->is_hcm &&
769*4882a593Smuzhiyun 	    hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
770*4882a593Smuzhiyun 		dev_info(&hpb->sdev_ufs_lu->sdev_dev,
771*4882a593Smuzhiyun 			 "map_req throttle. inflight %d throttle %d",
772*4882a593Smuzhiyun 			 hpb->num_inflight_map_req,
773*4882a593Smuzhiyun 			 hpb->params.inflight_map_req);
774*4882a593Smuzhiyun 		return NULL;
775*4882a593Smuzhiyun 	}
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN, false);
778*4882a593Smuzhiyun 	if (!map_req)
779*4882a593Smuzhiyun 		return NULL;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
782*4882a593Smuzhiyun 	if (!bio) {
783*4882a593Smuzhiyun 		ufshpb_put_req(hpb, map_req);
784*4882a593Smuzhiyun 		return NULL;
785*4882a593Smuzhiyun 	}
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	map_req->bio = bio;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	map_req->rb.srgn_idx = srgn->srgn_idx;
790*4882a593Smuzhiyun 	map_req->rb.mctx = srgn->mctx;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->param_lock, flags);
793*4882a593Smuzhiyun 	hpb->num_inflight_map_req++;
794*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->param_lock, flags);
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	return map_req;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun 
ufshpb_put_map_req(struct ufshpb_lu * hpb,struct ufshpb_req * map_req)799*4882a593Smuzhiyun static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
800*4882a593Smuzhiyun 			       struct ufshpb_req *map_req)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	unsigned long flags;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	bio_put(map_req->bio);
805*4882a593Smuzhiyun 	ufshpb_put_req(hpb, map_req);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->param_lock, flags);
808*4882a593Smuzhiyun 	hpb->num_inflight_map_req--;
809*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->param_lock, flags);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
ufshpb_clear_dirty_bitmap(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)812*4882a593Smuzhiyun static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
813*4882a593Smuzhiyun 				     struct ufshpb_subregion *srgn)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
816*4882a593Smuzhiyun 	u32 num_entries = hpb->entries_per_srgn;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	if (!srgn->mctx) {
819*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
820*4882a593Smuzhiyun 			"no mctx in region %d subregion %d.\n",
821*4882a593Smuzhiyun 			srgn->rgn_idx, srgn->srgn_idx);
822*4882a593Smuzhiyun 		return -1;
823*4882a593Smuzhiyun 	}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	if (unlikely(srgn->is_last))
826*4882a593Smuzhiyun 		num_entries = hpb->last_srgn_entries;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
831*4882a593Smuzhiyun 	clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	return 0;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
ufshpb_update_active_info(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx)836*4882a593Smuzhiyun static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
837*4882a593Smuzhiyun 				      int srgn_idx)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
840*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	rgn = hpb->rgn_tbl + rgn_idx;
843*4882a593Smuzhiyun 	srgn = rgn->srgn_tbl + srgn_idx;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	list_del_init(&rgn->list_inact_rgn);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	if (list_empty(&srgn->list_act_srgn))
848*4882a593Smuzhiyun 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	hpb->stats.rb_active_cnt++;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun 
ufshpb_update_inactive_info(struct ufshpb_lu * hpb,int rgn_idx)853*4882a593Smuzhiyun static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
856*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
857*4882a593Smuzhiyun 	int srgn_idx;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	rgn = hpb->rgn_tbl + rgn_idx;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	for_each_sub_region(rgn, srgn_idx, srgn)
862*4882a593Smuzhiyun 		list_del_init(&srgn->list_act_srgn);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	if (list_empty(&rgn->list_inact_rgn))
865*4882a593Smuzhiyun 		list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	hpb->stats.rb_inactive_cnt++;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun 
ufshpb_activate_subregion(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)870*4882a593Smuzhiyun static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
871*4882a593Smuzhiyun 				      struct ufshpb_subregion *srgn)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	/*
876*4882a593Smuzhiyun 	 * If there is no mctx in subregion
877*4882a593Smuzhiyun 	 * after I/O progress for HPB_READ_BUFFER, the region to which the
878*4882a593Smuzhiyun 	 * subregion belongs was evicted.
879*4882a593Smuzhiyun 	 * Make sure the region must not evict in I/O progress
880*4882a593Smuzhiyun 	 */
881*4882a593Smuzhiyun 	if (!srgn->mctx) {
882*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
883*4882a593Smuzhiyun 			"no mctx in region %d subregion %d.\n",
884*4882a593Smuzhiyun 			srgn->rgn_idx, srgn->srgn_idx);
885*4882a593Smuzhiyun 		srgn->srgn_state = HPB_SRGN_INVALID;
886*4882a593Smuzhiyun 		return;
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
892*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
893*4882a593Smuzhiyun 			"region %d subregion %d evicted\n",
894*4882a593Smuzhiyun 			srgn->rgn_idx, srgn->srgn_idx);
895*4882a593Smuzhiyun 		srgn->srgn_state = HPB_SRGN_INVALID;
896*4882a593Smuzhiyun 		return;
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun 	srgn->srgn_state = HPB_SRGN_VALID;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
ufshpb_umap_req_compl_fn(struct request * req,blk_status_t error)901*4882a593Smuzhiyun static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	ufshpb_put_req(umap_req->hpb, umap_req);
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
ufshpb_map_req_compl_fn(struct request * req,blk_status_t error)908*4882a593Smuzhiyun static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
911*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = map_req->hpb;
912*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
913*4882a593Smuzhiyun 	unsigned long flags;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
916*4882a593Smuzhiyun 		map_req->rb.srgn_idx;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	ufshpb_clear_dirty_bitmap(hpb, srgn);
919*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
920*4882a593Smuzhiyun 	ufshpb_activate_subregion(hpb, srgn);
921*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	ufshpb_put_map_req(map_req->hpb, map_req);
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
ufshpb_set_unmap_cmd(unsigned char * cdb,struct ufshpb_region * rgn)926*4882a593Smuzhiyun static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun 	cdb[0] = UFSHPB_WRITE_BUFFER;
929*4882a593Smuzhiyun 	cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
930*4882a593Smuzhiyun 			  UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
931*4882a593Smuzhiyun 	if (rgn)
932*4882a593Smuzhiyun 		put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
933*4882a593Smuzhiyun 	cdb[9] = 0x00;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun 
ufshpb_set_read_buf_cmd(unsigned char * cdb,int rgn_idx,int srgn_idx,int srgn_mem_size)936*4882a593Smuzhiyun static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
937*4882a593Smuzhiyun 				    int srgn_idx, int srgn_mem_size)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	cdb[0] = UFSHPB_READ_BUFFER;
940*4882a593Smuzhiyun 	cdb[1] = UFSHPB_READ_BUFFER_ID;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	put_unaligned_be16(rgn_idx, &cdb[2]);
943*4882a593Smuzhiyun 	put_unaligned_be16(srgn_idx, &cdb[4]);
944*4882a593Smuzhiyun 	put_unaligned_be24(srgn_mem_size, &cdb[6]);
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	cdb[9] = 0x00;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun 
ufshpb_execute_umap_req(struct ufshpb_lu * hpb,struct ufshpb_req * umap_req,struct ufshpb_region * rgn)949*4882a593Smuzhiyun static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
950*4882a593Smuzhiyun 				   struct ufshpb_req *umap_req,
951*4882a593Smuzhiyun 				   struct ufshpb_region *rgn)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun 	struct request *req;
954*4882a593Smuzhiyun 	struct scsi_request *rq;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	req = umap_req->req;
957*4882a593Smuzhiyun 	req->timeout = 0;
958*4882a593Smuzhiyun 	req->end_io_data = (void *)umap_req;
959*4882a593Smuzhiyun 	rq = scsi_req(req);
960*4882a593Smuzhiyun 	ufshpb_set_unmap_cmd(rq->cmd, rgn);
961*4882a593Smuzhiyun 	rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	blk_execute_rq_nowait(req->q, NULL, req, 1, ufshpb_umap_req_compl_fn);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	hpb->stats.umap_req_cnt++;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
ufshpb_execute_map_req(struct ufshpb_lu * hpb,struct ufshpb_req * map_req,bool last)968*4882a593Smuzhiyun static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
969*4882a593Smuzhiyun 				  struct ufshpb_req *map_req, bool last)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun 	struct request_queue *q;
972*4882a593Smuzhiyun 	struct request *req;
973*4882a593Smuzhiyun 	struct scsi_request *rq;
974*4882a593Smuzhiyun 	int mem_size = hpb->srgn_mem_size;
975*4882a593Smuzhiyun 	int ret = 0;
976*4882a593Smuzhiyun 	int i;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	q = hpb->sdev_ufs_lu->request_queue;
979*4882a593Smuzhiyun 	for (i = 0; i < hpb->pages_per_srgn; i++) {
980*4882a593Smuzhiyun 		ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
981*4882a593Smuzhiyun 				      PAGE_SIZE, 0);
982*4882a593Smuzhiyun 		if (ret != PAGE_SIZE) {
983*4882a593Smuzhiyun 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
984*4882a593Smuzhiyun 				   "bio_add_pc_page fail %d - %d\n",
985*4882a593Smuzhiyun 				   map_req->rb.rgn_idx, map_req->rb.srgn_idx);
986*4882a593Smuzhiyun 			return ret;
987*4882a593Smuzhiyun 		}
988*4882a593Smuzhiyun 	}
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	req = map_req->req;
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	blk_rq_append_bio(req, &map_req->bio);
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	req->end_io_data = map_req;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	rq = scsi_req(req);
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	if (unlikely(last))
999*4882a593Smuzhiyun 		mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
1002*4882a593Smuzhiyun 				map_req->rb.srgn_idx, mem_size);
1003*4882a593Smuzhiyun 	rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn);
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	hpb->stats.map_req_cnt++;
1008*4882a593Smuzhiyun 	return 0;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun 
ufshpb_get_map_ctx(struct ufshpb_lu * hpb,bool last)1011*4882a593Smuzhiyun static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
1012*4882a593Smuzhiyun 						 bool last)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun 	struct ufshpb_map_ctx *mctx;
1015*4882a593Smuzhiyun 	u32 num_entries = hpb->entries_per_srgn;
1016*4882a593Smuzhiyun 	int i, j;
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
1019*4882a593Smuzhiyun 	if (!mctx)
1020*4882a593Smuzhiyun 		return NULL;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
1023*4882a593Smuzhiyun 	if (!mctx->m_page)
1024*4882a593Smuzhiyun 		goto release_mctx;
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	if (unlikely(last))
1027*4882a593Smuzhiyun 		num_entries = hpb->last_srgn_entries;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
1030*4882a593Smuzhiyun 	if (!mctx->ppn_dirty)
1031*4882a593Smuzhiyun 		goto release_m_page;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	for (i = 0; i < hpb->pages_per_srgn; i++) {
1034*4882a593Smuzhiyun 		mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
1035*4882a593Smuzhiyun 		if (!mctx->m_page[i]) {
1036*4882a593Smuzhiyun 			for (j = 0; j < i; j++)
1037*4882a593Smuzhiyun 				mempool_free(mctx->m_page[j], ufshpb_page_pool);
1038*4882a593Smuzhiyun 			goto release_ppn_dirty;
1039*4882a593Smuzhiyun 		}
1040*4882a593Smuzhiyun 		clear_page(page_address(mctx->m_page[i]));
1041*4882a593Smuzhiyun 	}
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	return mctx;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun release_ppn_dirty:
1046*4882a593Smuzhiyun 	bitmap_free(mctx->ppn_dirty);
1047*4882a593Smuzhiyun release_m_page:
1048*4882a593Smuzhiyun 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
1049*4882a593Smuzhiyun release_mctx:
1050*4882a593Smuzhiyun 	mempool_free(mctx, ufshpb_mctx_pool);
1051*4882a593Smuzhiyun 	return NULL;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun 
ufshpb_put_map_ctx(struct ufshpb_lu * hpb,struct ufshpb_map_ctx * mctx)1054*4882a593Smuzhiyun static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
1055*4882a593Smuzhiyun 			       struct ufshpb_map_ctx *mctx)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun 	int i;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	for (i = 0; i < hpb->pages_per_srgn; i++)
1060*4882a593Smuzhiyun 		mempool_free(mctx->m_page[i], ufshpb_page_pool);
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	bitmap_free(mctx->ppn_dirty);
1063*4882a593Smuzhiyun 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
1064*4882a593Smuzhiyun 	mempool_free(mctx, ufshpb_mctx_pool);
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun 
ufshpb_check_srgns_issue_state(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1067*4882a593Smuzhiyun static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
1068*4882a593Smuzhiyun 					  struct ufshpb_region *rgn)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
1071*4882a593Smuzhiyun 	int srgn_idx;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	for_each_sub_region(rgn, srgn_idx, srgn)
1074*4882a593Smuzhiyun 		if (srgn->srgn_state == HPB_SRGN_ISSUED)
1075*4882a593Smuzhiyun 			return -EPERM;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	return 0;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
ufshpb_read_to_handler(struct work_struct * work)1080*4882a593Smuzhiyun static void ufshpb_read_to_handler(struct work_struct *work)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1083*4882a593Smuzhiyun 					     ufshpb_read_to_work.work);
1084*4882a593Smuzhiyun 	struct victim_select_info *lru_info = &hpb->lru_info;
1085*4882a593Smuzhiyun 	struct ufshpb_region *rgn, *next_rgn;
1086*4882a593Smuzhiyun 	unsigned long flags;
1087*4882a593Smuzhiyun 	unsigned int poll;
1088*4882a593Smuzhiyun 	LIST_HEAD(expired_list);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
1091*4882a593Smuzhiyun 		return;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
1096*4882a593Smuzhiyun 				 list_lru_rgn) {
1097*4882a593Smuzhiyun 		bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 		if (timedout) {
1100*4882a593Smuzhiyun 			rgn->read_timeout_expiries--;
1101*4882a593Smuzhiyun 			if (is_rgn_dirty(rgn) ||
1102*4882a593Smuzhiyun 			    rgn->read_timeout_expiries == 0)
1103*4882a593Smuzhiyun 				list_add(&rgn->list_expired_rgn, &expired_list);
1104*4882a593Smuzhiyun 			else
1105*4882a593Smuzhiyun 				rgn->read_timeout = ktime_add_ms(ktime_get(),
1106*4882a593Smuzhiyun 						hpb->params.read_timeout_ms);
1107*4882a593Smuzhiyun 		}
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	list_for_each_entry_safe(rgn, next_rgn, &expired_list,
1113*4882a593Smuzhiyun 				 list_expired_rgn) {
1114*4882a593Smuzhiyun 		list_del_init(&rgn->list_expired_rgn);
1115*4882a593Smuzhiyun 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1116*4882a593Smuzhiyun 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1117*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1118*4882a593Smuzhiyun 	}
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	ufshpb_kick_map_work(hpb);
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	poll = hpb->params.timeout_polling_interval_ms;
1125*4882a593Smuzhiyun 	schedule_delayed_work(&hpb->ufshpb_read_to_work,
1126*4882a593Smuzhiyun 			      msecs_to_jiffies(poll));
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun 
ufshpb_add_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)1129*4882a593Smuzhiyun static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
1130*4882a593Smuzhiyun 				struct ufshpb_region *rgn)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun 	rgn->rgn_state = HPB_RGN_ACTIVE;
1133*4882a593Smuzhiyun 	list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
1134*4882a593Smuzhiyun 	atomic_inc(&lru_info->active_cnt);
1135*4882a593Smuzhiyun 	if (rgn->hpb->is_hcm) {
1136*4882a593Smuzhiyun 		rgn->read_timeout =
1137*4882a593Smuzhiyun 			ktime_add_ms(ktime_get(),
1138*4882a593Smuzhiyun 				     rgn->hpb->params.read_timeout_ms);
1139*4882a593Smuzhiyun 		rgn->read_timeout_expiries =
1140*4882a593Smuzhiyun 			rgn->hpb->params.read_timeout_expiries;
1141*4882a593Smuzhiyun 	}
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun 
ufshpb_hit_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)1144*4882a593Smuzhiyun static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
1145*4882a593Smuzhiyun 				struct ufshpb_region *rgn)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun 	list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun 
ufshpb_victim_lru_info(struct ufshpb_lu * hpb)1150*4882a593Smuzhiyun static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun 	struct victim_select_info *lru_info = &hpb->lru_info;
1153*4882a593Smuzhiyun 	struct ufshpb_region *rgn, *victim_rgn = NULL;
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
1156*4882a593Smuzhiyun 		if (!rgn) {
1157*4882a593Smuzhiyun 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1158*4882a593Smuzhiyun 				"%s: no region allocated\n",
1159*4882a593Smuzhiyun 				__func__);
1160*4882a593Smuzhiyun 			return NULL;
1161*4882a593Smuzhiyun 		}
1162*4882a593Smuzhiyun 		if (ufshpb_check_srgns_issue_state(hpb, rgn))
1163*4882a593Smuzhiyun 			continue;
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 		/*
1166*4882a593Smuzhiyun 		 * in host control mode, verify that the exiting region
1167*4882a593Smuzhiyun 		 * has less reads
1168*4882a593Smuzhiyun 		 */
1169*4882a593Smuzhiyun 		if (hpb->is_hcm &&
1170*4882a593Smuzhiyun 		    rgn->reads > hpb->params.eviction_thld_exit)
1171*4882a593Smuzhiyun 			continue;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 		victim_rgn = rgn;
1174*4882a593Smuzhiyun 		break;
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	return victim_rgn;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun 
ufshpb_cleanup_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)1180*4882a593Smuzhiyun static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
1181*4882a593Smuzhiyun 				    struct ufshpb_region *rgn)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun 	list_del_init(&rgn->list_lru_rgn);
1184*4882a593Smuzhiyun 	rgn->rgn_state = HPB_RGN_INACTIVE;
1185*4882a593Smuzhiyun 	atomic_dec(&lru_info->active_cnt);
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun 
ufshpb_purge_active_subregion(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)1188*4882a593Smuzhiyun static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
1189*4882a593Smuzhiyun 					  struct ufshpb_subregion *srgn)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1192*4882a593Smuzhiyun 		ufshpb_put_map_ctx(hpb, srgn->mctx);
1193*4882a593Smuzhiyun 		srgn->srgn_state = HPB_SRGN_UNUSED;
1194*4882a593Smuzhiyun 		srgn->mctx = NULL;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun 
ufshpb_issue_umap_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,bool atomic)1198*4882a593Smuzhiyun static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
1199*4882a593Smuzhiyun 				 struct ufshpb_region *rgn,
1200*4882a593Smuzhiyun 				 bool atomic)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun 	struct ufshpb_req *umap_req;
1203*4882a593Smuzhiyun 	int rgn_idx = rgn ? rgn->rgn_idx : 0;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT, atomic);
1206*4882a593Smuzhiyun 	if (!umap_req)
1207*4882a593Smuzhiyun 		return -ENOMEM;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	ufshpb_execute_umap_req(hpb, umap_req, rgn);
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	return 0;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun 
ufshpb_issue_umap_single_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1214*4882a593Smuzhiyun static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
1215*4882a593Smuzhiyun 					struct ufshpb_region *rgn)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun 	return ufshpb_issue_umap_req(hpb, rgn, true);
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun 
ufshpb_issue_umap_all_req(struct ufshpb_lu * hpb)1220*4882a593Smuzhiyun static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
1221*4882a593Smuzhiyun {
1222*4882a593Smuzhiyun 	return ufshpb_issue_umap_req(hpb, NULL, false);
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun 
__ufshpb_evict_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1225*4882a593Smuzhiyun static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
1226*4882a593Smuzhiyun 				 struct ufshpb_region *rgn)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun 	struct victim_select_info *lru_info;
1229*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
1230*4882a593Smuzhiyun 	int srgn_idx;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	lru_info = &hpb->lru_info;
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	ufshpb_cleanup_lru_info(lru_info, rgn);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	for_each_sub_region(rgn, srgn_idx, srgn)
1239*4882a593Smuzhiyun 		ufshpb_purge_active_subregion(hpb, srgn);
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun 
ufshpb_evict_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1242*4882a593Smuzhiyun static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun 	unsigned long flags;
1245*4882a593Smuzhiyun 	int ret = 0;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1248*4882a593Smuzhiyun 	if (rgn->rgn_state == HPB_RGN_PINNED) {
1249*4882a593Smuzhiyun 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1250*4882a593Smuzhiyun 			 "pinned region cannot drop-out. region %d\n",
1251*4882a593Smuzhiyun 			 rgn->rgn_idx);
1252*4882a593Smuzhiyun 		goto out;
1253*4882a593Smuzhiyun 	}
1254*4882a593Smuzhiyun 	if (!list_empty(&rgn->list_lru_rgn)) {
1255*4882a593Smuzhiyun 		if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
1256*4882a593Smuzhiyun 			ret = -EBUSY;
1257*4882a593Smuzhiyun 			goto out;
1258*4882a593Smuzhiyun 		}
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 		if (hpb->is_hcm) {
1261*4882a593Smuzhiyun 			spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1262*4882a593Smuzhiyun 			ret = ufshpb_issue_umap_single_req(hpb, rgn);
1263*4882a593Smuzhiyun 			spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1264*4882a593Smuzhiyun 			if (ret)
1265*4882a593Smuzhiyun 				goto out;
1266*4882a593Smuzhiyun 		}
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 		__ufshpb_evict_region(hpb, rgn);
1269*4882a593Smuzhiyun 	}
1270*4882a593Smuzhiyun out:
1271*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1272*4882a593Smuzhiyun 	return ret;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
ufshpb_issue_map_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)1275*4882a593Smuzhiyun static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
1276*4882a593Smuzhiyun 				struct ufshpb_region *rgn,
1277*4882a593Smuzhiyun 				struct ufshpb_subregion *srgn)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun 	struct ufshpb_req *map_req;
1280*4882a593Smuzhiyun 	unsigned long flags;
1281*4882a593Smuzhiyun 	int ret;
1282*4882a593Smuzhiyun 	int err = -EAGAIN;
1283*4882a593Smuzhiyun 	bool alloc_required = false;
1284*4882a593Smuzhiyun 	enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1289*4882a593Smuzhiyun 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1290*4882a593Smuzhiyun 			   "%s: ufshpb state is not PRESENT\n", __func__);
1291*4882a593Smuzhiyun 		goto unlock_out;
1292*4882a593Smuzhiyun 	}
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1295*4882a593Smuzhiyun 	    (srgn->srgn_state == HPB_SRGN_INVALID)) {
1296*4882a593Smuzhiyun 		err = 0;
1297*4882a593Smuzhiyun 		goto unlock_out;
1298*4882a593Smuzhiyun 	}
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	if (srgn->srgn_state == HPB_SRGN_UNUSED)
1301*4882a593Smuzhiyun 		alloc_required = true;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	/*
1304*4882a593Smuzhiyun 	 * If the subregion is already ISSUED state,
1305*4882a593Smuzhiyun 	 * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1306*4882a593Smuzhiyun 	 * the device and HPB response for map loading is received.
1307*4882a593Smuzhiyun 	 * In this case, after finishing the HPB_READ_BUFFER,
1308*4882a593Smuzhiyun 	 * the next HPB_READ_BUFFER is performed again to obtain the latest
1309*4882a593Smuzhiyun 	 * map data.
1310*4882a593Smuzhiyun 	 */
1311*4882a593Smuzhiyun 	if (srgn->srgn_state == HPB_SRGN_ISSUED)
1312*4882a593Smuzhiyun 		goto unlock_out;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	srgn->srgn_state = HPB_SRGN_ISSUED;
1315*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	if (alloc_required) {
1318*4882a593Smuzhiyun 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1319*4882a593Smuzhiyun 		if (!srgn->mctx) {
1320*4882a593Smuzhiyun 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1321*4882a593Smuzhiyun 			    "get map_ctx failed. region %d - %d\n",
1322*4882a593Smuzhiyun 			    rgn->rgn_idx, srgn->srgn_idx);
1323*4882a593Smuzhiyun 			state = HPB_SRGN_UNUSED;
1324*4882a593Smuzhiyun 			goto change_srgn_state;
1325*4882a593Smuzhiyun 		}
1326*4882a593Smuzhiyun 	}
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	map_req = ufshpb_get_map_req(hpb, srgn);
1329*4882a593Smuzhiyun 	if (!map_req)
1330*4882a593Smuzhiyun 		goto change_srgn_state;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1334*4882a593Smuzhiyun 	if (ret) {
1335*4882a593Smuzhiyun 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1336*4882a593Smuzhiyun 			   "%s: issue map_req failed: %d, region %d - %d\n",
1337*4882a593Smuzhiyun 			   __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1338*4882a593Smuzhiyun 		goto free_map_req;
1339*4882a593Smuzhiyun 	}
1340*4882a593Smuzhiyun 	return 0;
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun free_map_req:
1343*4882a593Smuzhiyun 	ufshpb_put_map_req(hpb, map_req);
1344*4882a593Smuzhiyun change_srgn_state:
1345*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1346*4882a593Smuzhiyun 	srgn->srgn_state = state;
1347*4882a593Smuzhiyun unlock_out:
1348*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1349*4882a593Smuzhiyun 	return err;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun 
ufshpb_add_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1352*4882a593Smuzhiyun static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1353*4882a593Smuzhiyun {
1354*4882a593Smuzhiyun 	struct ufshpb_region *victim_rgn = NULL;
1355*4882a593Smuzhiyun 	struct victim_select_info *lru_info = &hpb->lru_info;
1356*4882a593Smuzhiyun 	unsigned long flags;
1357*4882a593Smuzhiyun 	int ret = 0;
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1360*4882a593Smuzhiyun 	/*
1361*4882a593Smuzhiyun 	 * If region belongs to lru_list, just move the region
1362*4882a593Smuzhiyun 	 * to the front of lru list. because the state of the region
1363*4882a593Smuzhiyun 	 * is already active-state
1364*4882a593Smuzhiyun 	 */
1365*4882a593Smuzhiyun 	if (!list_empty(&rgn->list_lru_rgn)) {
1366*4882a593Smuzhiyun 		ufshpb_hit_lru_info(lru_info, rgn);
1367*4882a593Smuzhiyun 		goto out;
1368*4882a593Smuzhiyun 	}
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 	if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1371*4882a593Smuzhiyun 		if (atomic_read(&lru_info->active_cnt) ==
1372*4882a593Smuzhiyun 		    lru_info->max_lru_active_cnt) {
1373*4882a593Smuzhiyun 			/*
1374*4882a593Smuzhiyun 			 * If the maximum number of active regions
1375*4882a593Smuzhiyun 			 * is exceeded, evict the least recently used region.
1376*4882a593Smuzhiyun 			 * This case may occur when the device responds
1377*4882a593Smuzhiyun 			 * to the eviction information late.
1378*4882a593Smuzhiyun 			 * It is okay to evict the least recently used region,
1379*4882a593Smuzhiyun 			 * because the device could detect this region
1380*4882a593Smuzhiyun 			 * by not issuing HPB_READ
1381*4882a593Smuzhiyun 			 *
1382*4882a593Smuzhiyun 			 * in host control mode, verify that the entering
1383*4882a593Smuzhiyun 			 * region has enough reads
1384*4882a593Smuzhiyun 			 */
1385*4882a593Smuzhiyun 			if (hpb->is_hcm &&
1386*4882a593Smuzhiyun 			    rgn->reads < hpb->params.eviction_thld_enter) {
1387*4882a593Smuzhiyun 				ret = -EACCES;
1388*4882a593Smuzhiyun 				goto out;
1389*4882a593Smuzhiyun 			}
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 			victim_rgn = ufshpb_victim_lru_info(hpb);
1392*4882a593Smuzhiyun 			if (!victim_rgn) {
1393*4882a593Smuzhiyun 				dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1394*4882a593Smuzhiyun 				    "cannot get victim region %s\n",
1395*4882a593Smuzhiyun 				    hpb->is_hcm ? "" : "error");
1396*4882a593Smuzhiyun 				ret = -ENOMEM;
1397*4882a593Smuzhiyun 				goto out;
1398*4882a593Smuzhiyun 			}
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1401*4882a593Smuzhiyun 				"LRU full (%d), choose victim %d\n",
1402*4882a593Smuzhiyun 				atomic_read(&lru_info->active_cnt),
1403*4882a593Smuzhiyun 				victim_rgn->rgn_idx);
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 			if (hpb->is_hcm) {
1406*4882a593Smuzhiyun 				spin_unlock_irqrestore(&hpb->rgn_state_lock,
1407*4882a593Smuzhiyun 						       flags);
1408*4882a593Smuzhiyun 				ret = ufshpb_issue_umap_single_req(hpb,
1409*4882a593Smuzhiyun 								victim_rgn);
1410*4882a593Smuzhiyun 				spin_lock_irqsave(&hpb->rgn_state_lock,
1411*4882a593Smuzhiyun 						  flags);
1412*4882a593Smuzhiyun 				if (ret)
1413*4882a593Smuzhiyun 					goto out;
1414*4882a593Smuzhiyun 			}
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 			__ufshpb_evict_region(hpb, victim_rgn);
1417*4882a593Smuzhiyun 		}
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 		/*
1420*4882a593Smuzhiyun 		 * When a region is added to lru_info list_head,
1421*4882a593Smuzhiyun 		 * it is guaranteed that the subregion has been
1422*4882a593Smuzhiyun 		 * assigned all mctx. If failed, try to receive mctx again
1423*4882a593Smuzhiyun 		 * without being added to lru_info list_head
1424*4882a593Smuzhiyun 		 */
1425*4882a593Smuzhiyun 		ufshpb_add_lru_info(lru_info, rgn);
1426*4882a593Smuzhiyun 	}
1427*4882a593Smuzhiyun out:
1428*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1429*4882a593Smuzhiyun 	return ret;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun 
ufshpb_rsp_req_region_update(struct ufshpb_lu * hpb,struct utp_hpb_rsp * rsp_field)1432*4882a593Smuzhiyun static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1433*4882a593Smuzhiyun 					 struct utp_hpb_rsp *rsp_field)
1434*4882a593Smuzhiyun {
1435*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
1436*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
1437*4882a593Smuzhiyun 	int i, rgn_i, srgn_i;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1440*4882a593Smuzhiyun 	/*
1441*4882a593Smuzhiyun 	 * If the active region and the inactive region are the same,
1442*4882a593Smuzhiyun 	 * we will inactivate this region.
1443*4882a593Smuzhiyun 	 * The device could check this (region inactivated) and
1444*4882a593Smuzhiyun 	 * will response the proper active region information
1445*4882a593Smuzhiyun 	 */
1446*4882a593Smuzhiyun 	for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1447*4882a593Smuzhiyun 		rgn_i =
1448*4882a593Smuzhiyun 			be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1449*4882a593Smuzhiyun 		srgn_i =
1450*4882a593Smuzhiyun 			be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 		rgn = hpb->rgn_tbl + rgn_i;
1453*4882a593Smuzhiyun 		if (hpb->is_hcm &&
1454*4882a593Smuzhiyun 		    (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1455*4882a593Smuzhiyun 			/*
1456*4882a593Smuzhiyun 			 * in host control mode, subregion activation
1457*4882a593Smuzhiyun 			 * recommendations are only allowed to active regions.
1458*4882a593Smuzhiyun 			 * Also, ignore recommendations for dirty regions - the
1459*4882a593Smuzhiyun 			 * host will make decisions concerning those by himself
1460*4882a593Smuzhiyun 			 */
1461*4882a593Smuzhiyun 			continue;
1462*4882a593Smuzhiyun 		}
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1465*4882a593Smuzhiyun 			"activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 		spin_lock(&hpb->rsp_list_lock);
1468*4882a593Smuzhiyun 		ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1469*4882a593Smuzhiyun 		spin_unlock(&hpb->rsp_list_lock);
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 		srgn = rgn->srgn_tbl + srgn_i;
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 		/* blocking HPB_READ */
1474*4882a593Smuzhiyun 		spin_lock(&hpb->rgn_state_lock);
1475*4882a593Smuzhiyun 		if (srgn->srgn_state == HPB_SRGN_VALID)
1476*4882a593Smuzhiyun 			srgn->srgn_state = HPB_SRGN_INVALID;
1477*4882a593Smuzhiyun 		spin_unlock(&hpb->rgn_state_lock);
1478*4882a593Smuzhiyun 	}
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	if (hpb->is_hcm) {
1481*4882a593Smuzhiyun 		/*
1482*4882a593Smuzhiyun 		 * in host control mode the device is not allowed to inactivate
1483*4882a593Smuzhiyun 		 * regions
1484*4882a593Smuzhiyun 		 */
1485*4882a593Smuzhiyun 		goto out;
1486*4882a593Smuzhiyun 	}
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1489*4882a593Smuzhiyun 		rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1490*4882a593Smuzhiyun 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1491*4882a593Smuzhiyun 			"inactivate(%d) region %d\n", i, rgn_i);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 		spin_lock(&hpb->rsp_list_lock);
1494*4882a593Smuzhiyun 		ufshpb_update_inactive_info(hpb, rgn_i);
1495*4882a593Smuzhiyun 		spin_unlock(&hpb->rsp_list_lock);
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 		rgn = hpb->rgn_tbl + rgn_i;
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 		spin_lock(&hpb->rgn_state_lock);
1500*4882a593Smuzhiyun 		if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1501*4882a593Smuzhiyun 			for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1502*4882a593Smuzhiyun 				srgn = rgn->srgn_tbl + srgn_i;
1503*4882a593Smuzhiyun 				if (srgn->srgn_state == HPB_SRGN_VALID)
1504*4882a593Smuzhiyun 					srgn->srgn_state = HPB_SRGN_INVALID;
1505*4882a593Smuzhiyun 			}
1506*4882a593Smuzhiyun 		}
1507*4882a593Smuzhiyun 		spin_unlock(&hpb->rgn_state_lock);
1508*4882a593Smuzhiyun 	}
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun out:
1511*4882a593Smuzhiyun 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1512*4882a593Smuzhiyun 		rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	if (ufshpb_get_state(hpb) == HPB_PRESENT)
1515*4882a593Smuzhiyun 		queue_work(ufshpb_wq, &hpb->map_work);
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun 
ufshpb_dev_reset_handler(struct ufshpb_lu * hpb)1518*4882a593Smuzhiyun static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1519*4882a593Smuzhiyun {
1520*4882a593Smuzhiyun 	struct victim_select_info *lru_info = &hpb->lru_info;
1521*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
1522*4882a593Smuzhiyun 	unsigned long flags;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1527*4882a593Smuzhiyun 		set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun /*
1534*4882a593Smuzhiyun  * This function will parse recommended active subregion information in sense
1535*4882a593Smuzhiyun  * data field of response UPIU with SAM_STAT_GOOD state.
1536*4882a593Smuzhiyun  */
ufshpb_rsp_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)1537*4882a593Smuzhiyun void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1540*4882a593Smuzhiyun 	struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1541*4882a593Smuzhiyun 	int data_seg_len;
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	if (unlikely(lrbp->lun != rsp_field->lun)) {
1544*4882a593Smuzhiyun 		struct scsi_device *sdev;
1545*4882a593Smuzhiyun 		bool found = false;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 		__shost_for_each_device(sdev, hba->host) {
1548*4882a593Smuzhiyun 			hpb = ufshpb_get_hpb_data(sdev);
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 			if (!hpb)
1551*4882a593Smuzhiyun 				continue;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 			if (rsp_field->lun == hpb->lun) {
1554*4882a593Smuzhiyun 				found = true;
1555*4882a593Smuzhiyun 				break;
1556*4882a593Smuzhiyun 			}
1557*4882a593Smuzhiyun 		}
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 		if (!found)
1560*4882a593Smuzhiyun 			return;
1561*4882a593Smuzhiyun 	}
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 	if (!hpb)
1564*4882a593Smuzhiyun 		return;
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	if (ufshpb_get_state(hpb) == HPB_INIT)
1567*4882a593Smuzhiyun 		return;
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 	if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1570*4882a593Smuzhiyun 	    (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1571*4882a593Smuzhiyun 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1572*4882a593Smuzhiyun 			   "%s: ufshpb state is not PRESENT/SUSPEND\n",
1573*4882a593Smuzhiyun 			   __func__);
1574*4882a593Smuzhiyun 		return;
1575*4882a593Smuzhiyun 	}
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1578*4882a593Smuzhiyun 		& MASK_RSP_UPIU_DATA_SEG_LEN;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	/* To flush remained rsp_list, we queue the map_work task */
1581*4882a593Smuzhiyun 	if (!data_seg_len) {
1582*4882a593Smuzhiyun 		if (!ufshpb_is_general_lun(hpb->lun))
1583*4882a593Smuzhiyun 			return;
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 		ufshpb_kick_map_work(hpb);
1586*4882a593Smuzhiyun 		return;
1587*4882a593Smuzhiyun 	}
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1592*4882a593Smuzhiyun 		return;
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	hpb->stats.rb_noti_cnt++;
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	switch (rsp_field->hpb_op) {
1597*4882a593Smuzhiyun 	case HPB_RSP_REQ_REGION_UPDATE:
1598*4882a593Smuzhiyun 		if (data_seg_len != DEV_DATA_SEG_LEN)
1599*4882a593Smuzhiyun 			dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1600*4882a593Smuzhiyun 				 "%s: data seg length is not same.\n",
1601*4882a593Smuzhiyun 				 __func__);
1602*4882a593Smuzhiyun 		ufshpb_rsp_req_region_update(hpb, rsp_field);
1603*4882a593Smuzhiyun 		break;
1604*4882a593Smuzhiyun 	case HPB_RSP_DEV_RESET:
1605*4882a593Smuzhiyun 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1606*4882a593Smuzhiyun 			 "UFS device lost HPB information during PM.\n");
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 		if (hpb->is_hcm) {
1609*4882a593Smuzhiyun 			struct scsi_device *sdev;
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 			__shost_for_each_device(sdev, hba->host) {
1612*4882a593Smuzhiyun 				struct ufshpb_lu *h = sdev->hostdata;
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 				if (h)
1615*4882a593Smuzhiyun 					ufshpb_dev_reset_handler(h);
1616*4882a593Smuzhiyun 			}
1617*4882a593Smuzhiyun 		}
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 		break;
1620*4882a593Smuzhiyun 	default:
1621*4882a593Smuzhiyun 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1622*4882a593Smuzhiyun 			   "hpb_op is not available: %d\n",
1623*4882a593Smuzhiyun 			   rsp_field->hpb_op);
1624*4882a593Smuzhiyun 		break;
1625*4882a593Smuzhiyun 	}
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun 
ufshpb_add_active_list(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)1628*4882a593Smuzhiyun static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1629*4882a593Smuzhiyun 				   struct ufshpb_region *rgn,
1630*4882a593Smuzhiyun 				   struct ufshpb_subregion *srgn)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun 	if (!list_empty(&rgn->list_inact_rgn))
1633*4882a593Smuzhiyun 		return;
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	if (!list_empty(&srgn->list_act_srgn)) {
1636*4882a593Smuzhiyun 		list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1637*4882a593Smuzhiyun 		return;
1638*4882a593Smuzhiyun 	}
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1641*4882a593Smuzhiyun }
1642*4882a593Smuzhiyun 
ufshpb_add_pending_evict_list(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct list_head * pending_list)1643*4882a593Smuzhiyun static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1644*4882a593Smuzhiyun 					  struct ufshpb_region *rgn,
1645*4882a593Smuzhiyun 					  struct list_head *pending_list)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
1648*4882a593Smuzhiyun 	int srgn_idx;
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	if (!list_empty(&rgn->list_inact_rgn))
1651*4882a593Smuzhiyun 		return;
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 	for_each_sub_region(rgn, srgn_idx, srgn)
1654*4882a593Smuzhiyun 		if (!list_empty(&srgn->list_act_srgn))
1655*4882a593Smuzhiyun 			return;
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	list_add_tail(&rgn->list_inact_rgn, pending_list);
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun 
ufshpb_run_active_subregion_list(struct ufshpb_lu * hpb)1660*4882a593Smuzhiyun static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
1663*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
1664*4882a593Smuzhiyun 	unsigned long flags;
1665*4882a593Smuzhiyun 	int ret = 0;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1668*4882a593Smuzhiyun 	while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1669*4882a593Smuzhiyun 						struct ufshpb_subregion,
1670*4882a593Smuzhiyun 						list_act_srgn))) {
1671*4882a593Smuzhiyun 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1672*4882a593Smuzhiyun 			break;
1673*4882a593Smuzhiyun 
1674*4882a593Smuzhiyun 		list_del_init(&srgn->list_act_srgn);
1675*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 		rgn = hpb->rgn_tbl + srgn->rgn_idx;
1678*4882a593Smuzhiyun 		ret = ufshpb_add_region(hpb, rgn);
1679*4882a593Smuzhiyun 		if (ret)
1680*4882a593Smuzhiyun 			goto active_failed;
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 		ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1683*4882a593Smuzhiyun 		if (ret) {
1684*4882a593Smuzhiyun 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1685*4882a593Smuzhiyun 			    "issue map_req failed. ret %d, region %d - %d\n",
1686*4882a593Smuzhiyun 			    ret, rgn->rgn_idx, srgn->srgn_idx);
1687*4882a593Smuzhiyun 			goto active_failed;
1688*4882a593Smuzhiyun 		}
1689*4882a593Smuzhiyun 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1690*4882a593Smuzhiyun 	}
1691*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1692*4882a593Smuzhiyun 	return;
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun active_failed:
1695*4882a593Smuzhiyun 	dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1696*4882a593Smuzhiyun 		   rgn->rgn_idx, srgn->srgn_idx);
1697*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1698*4882a593Smuzhiyun 	ufshpb_add_active_list(hpb, rgn, srgn);
1699*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun 
ufshpb_run_inactive_region_list(struct ufshpb_lu * hpb)1702*4882a593Smuzhiyun static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1703*4882a593Smuzhiyun {
1704*4882a593Smuzhiyun 	struct ufshpb_region *rgn;
1705*4882a593Smuzhiyun 	unsigned long flags;
1706*4882a593Smuzhiyun 	int ret;
1707*4882a593Smuzhiyun 	LIST_HEAD(pending_list);
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1710*4882a593Smuzhiyun 	while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1711*4882a593Smuzhiyun 					       struct ufshpb_region,
1712*4882a593Smuzhiyun 					       list_inact_rgn))) {
1713*4882a593Smuzhiyun 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1714*4882a593Smuzhiyun 			break;
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun 		list_del_init(&rgn->list_inact_rgn);
1717*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 		ret = ufshpb_evict_region(hpb, rgn);
1720*4882a593Smuzhiyun 		if (ret) {
1721*4882a593Smuzhiyun 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1722*4882a593Smuzhiyun 			ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1723*4882a593Smuzhiyun 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1724*4882a593Smuzhiyun 		}
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1727*4882a593Smuzhiyun 	}
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	list_splice(&pending_list, &hpb->lh_inact_rgn);
1730*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1731*4882a593Smuzhiyun }
1732*4882a593Smuzhiyun 
ufshpb_normalization_work_handler(struct work_struct * work)1733*4882a593Smuzhiyun static void ufshpb_normalization_work_handler(struct work_struct *work)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1736*4882a593Smuzhiyun 					     ufshpb_normalization_work);
1737*4882a593Smuzhiyun 	int rgn_idx;
1738*4882a593Smuzhiyun 	u8 factor = hpb->params.normalization_factor;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1741*4882a593Smuzhiyun 		struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1742*4882a593Smuzhiyun 		int srgn_idx;
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 		spin_lock(&rgn->rgn_lock);
1745*4882a593Smuzhiyun 		rgn->reads = 0;
1746*4882a593Smuzhiyun 		for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1747*4882a593Smuzhiyun 			struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 			srgn->reads >>= factor;
1750*4882a593Smuzhiyun 			rgn->reads += srgn->reads;
1751*4882a593Smuzhiyun 		}
1752*4882a593Smuzhiyun 		spin_unlock(&rgn->rgn_lock);
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 		if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1755*4882a593Smuzhiyun 			continue;
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 		/* if region is active but has no reads - inactivate it */
1758*4882a593Smuzhiyun 		spin_lock(&hpb->rsp_list_lock);
1759*4882a593Smuzhiyun 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1760*4882a593Smuzhiyun 		spin_unlock(&hpb->rsp_list_lock);
1761*4882a593Smuzhiyun 	}
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun 
ufshpb_map_work_handler(struct work_struct * work)1764*4882a593Smuzhiyun static void ufshpb_map_work_handler(struct work_struct *work)
1765*4882a593Smuzhiyun {
1766*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1769*4882a593Smuzhiyun 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1770*4882a593Smuzhiyun 			   "%s: ufshpb state is not PRESENT\n", __func__);
1771*4882a593Smuzhiyun 		return;
1772*4882a593Smuzhiyun 	}
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	ufshpb_run_inactive_region_list(hpb);
1775*4882a593Smuzhiyun 	ufshpb_run_active_subregion_list(hpb);
1776*4882a593Smuzhiyun }
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun /*
1779*4882a593Smuzhiyun  * this function doesn't need to hold lock due to be called in init.
1780*4882a593Smuzhiyun  * (rgn_state_lock, rsp_list_lock, etc..)
1781*4882a593Smuzhiyun  */
ufshpb_init_pinned_active_region(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1782*4882a593Smuzhiyun static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1783*4882a593Smuzhiyun 					    struct ufshpb_lu *hpb,
1784*4882a593Smuzhiyun 					    struct ufshpb_region *rgn)
1785*4882a593Smuzhiyun {
1786*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
1787*4882a593Smuzhiyun 	int srgn_idx, i;
1788*4882a593Smuzhiyun 	int err = 0;
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 	for_each_sub_region(rgn, srgn_idx, srgn) {
1791*4882a593Smuzhiyun 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1792*4882a593Smuzhiyun 		srgn->srgn_state = HPB_SRGN_INVALID;
1793*4882a593Smuzhiyun 		if (!srgn->mctx) {
1794*4882a593Smuzhiyun 			err = -ENOMEM;
1795*4882a593Smuzhiyun 			dev_err(hba->dev,
1796*4882a593Smuzhiyun 				"alloc mctx for pinned region failed\n");
1797*4882a593Smuzhiyun 			goto release;
1798*4882a593Smuzhiyun 		}
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1801*4882a593Smuzhiyun 	}
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	rgn->rgn_state = HPB_RGN_PINNED;
1804*4882a593Smuzhiyun 	return 0;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun release:
1807*4882a593Smuzhiyun 	for (i = 0; i < srgn_idx; i++) {
1808*4882a593Smuzhiyun 		srgn = rgn->srgn_tbl + i;
1809*4882a593Smuzhiyun 		ufshpb_put_map_ctx(hpb, srgn->mctx);
1810*4882a593Smuzhiyun 	}
1811*4882a593Smuzhiyun 	return err;
1812*4882a593Smuzhiyun }
1813*4882a593Smuzhiyun 
ufshpb_init_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,bool last)1814*4882a593Smuzhiyun static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1815*4882a593Smuzhiyun 				      struct ufshpb_region *rgn, bool last)
1816*4882a593Smuzhiyun {
1817*4882a593Smuzhiyun 	int srgn_idx;
1818*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	for_each_sub_region(rgn, srgn_idx, srgn) {
1821*4882a593Smuzhiyun 		INIT_LIST_HEAD(&srgn->list_act_srgn);
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 		srgn->rgn_idx = rgn->rgn_idx;
1824*4882a593Smuzhiyun 		srgn->srgn_idx = srgn_idx;
1825*4882a593Smuzhiyun 		srgn->srgn_state = HPB_SRGN_UNUSED;
1826*4882a593Smuzhiyun 	}
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	if (unlikely(last && hpb->last_srgn_entries))
1829*4882a593Smuzhiyun 		srgn->is_last = true;
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun 
ufshpb_alloc_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,int srgn_cnt)1832*4882a593Smuzhiyun static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1833*4882a593Smuzhiyun 				      struct ufshpb_region *rgn, int srgn_cnt)
1834*4882a593Smuzhiyun {
1835*4882a593Smuzhiyun 	rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1836*4882a593Smuzhiyun 				 GFP_KERNEL);
1837*4882a593Smuzhiyun 	if (!rgn->srgn_tbl)
1838*4882a593Smuzhiyun 		return -ENOMEM;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	rgn->srgn_cnt = srgn_cnt;
1841*4882a593Smuzhiyun 	return 0;
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun 
ufshpb_lu_parameter_init(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshpb_dev_info * hpb_dev_info,struct ufshpb_lu_info * hpb_lu_info)1844*4882a593Smuzhiyun static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1845*4882a593Smuzhiyun 				     struct ufshpb_lu *hpb,
1846*4882a593Smuzhiyun 				     struct ufshpb_dev_info *hpb_dev_info,
1847*4882a593Smuzhiyun 				     struct ufshpb_lu_info *hpb_lu_info)
1848*4882a593Smuzhiyun {
1849*4882a593Smuzhiyun 	u32 entries_per_rgn;
1850*4882a593Smuzhiyun 	u64 rgn_mem_size, tmp;
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	/* for pre_req */
1853*4882a593Smuzhiyun 	hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 	if (ufshpb_is_legacy(hba))
1856*4882a593Smuzhiyun 		hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1857*4882a593Smuzhiyun 	else
1858*4882a593Smuzhiyun 		hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	hpb->cur_read_id = 0;
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1864*4882a593Smuzhiyun 	hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1865*4882a593Smuzhiyun 		(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1866*4882a593Smuzhiyun 		: PINNED_NOT_SET;
1867*4882a593Smuzhiyun 	hpb->lru_info.max_lru_active_cnt =
1868*4882a593Smuzhiyun 		hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 	rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1871*4882a593Smuzhiyun 			* HPB_ENTRY_SIZE;
1872*4882a593Smuzhiyun 	do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1873*4882a593Smuzhiyun 	hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1874*4882a593Smuzhiyun 		* HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	tmp = rgn_mem_size;
1877*4882a593Smuzhiyun 	do_div(tmp, HPB_ENTRY_SIZE);
1878*4882a593Smuzhiyun 	entries_per_rgn = (u32)tmp;
1879*4882a593Smuzhiyun 	hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1880*4882a593Smuzhiyun 	hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1883*4882a593Smuzhiyun 	hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1884*4882a593Smuzhiyun 	hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 	tmp = rgn_mem_size;
1887*4882a593Smuzhiyun 	do_div(tmp, hpb->srgn_mem_size);
1888*4882a593Smuzhiyun 	hpb->srgns_per_rgn = (int)tmp;
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 	hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1891*4882a593Smuzhiyun 				entries_per_rgn);
1892*4882a593Smuzhiyun 	hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1893*4882a593Smuzhiyun 				(hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1894*4882a593Smuzhiyun 	hpb->last_srgn_entries = hpb_lu_info->num_blocks
1895*4882a593Smuzhiyun 				 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 	hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1900*4882a593Smuzhiyun 		hpb->is_hcm = true;
1901*4882a593Smuzhiyun }
1902*4882a593Smuzhiyun 
ufshpb_alloc_region_tbl(struct ufs_hba * hba,struct ufshpb_lu * hpb)1903*4882a593Smuzhiyun static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1904*4882a593Smuzhiyun {
1905*4882a593Smuzhiyun 	struct ufshpb_region *rgn_table, *rgn;
1906*4882a593Smuzhiyun 	int rgn_idx, i;
1907*4882a593Smuzhiyun 	int ret = 0;
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1910*4882a593Smuzhiyun 			    GFP_KERNEL);
1911*4882a593Smuzhiyun 	if (!rgn_table)
1912*4882a593Smuzhiyun 		return -ENOMEM;
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	hpb->rgn_tbl = rgn_table;
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1917*4882a593Smuzhiyun 		int srgn_cnt = hpb->srgns_per_rgn;
1918*4882a593Smuzhiyun 		bool last_srgn = false;
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 		rgn = rgn_table + rgn_idx;
1921*4882a593Smuzhiyun 		rgn->rgn_idx = rgn_idx;
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 		spin_lock_init(&rgn->rgn_lock);
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 		INIT_LIST_HEAD(&rgn->list_inact_rgn);
1926*4882a593Smuzhiyun 		INIT_LIST_HEAD(&rgn->list_lru_rgn);
1927*4882a593Smuzhiyun 		INIT_LIST_HEAD(&rgn->list_expired_rgn);
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun 		if (rgn_idx == hpb->rgns_per_lu - 1) {
1930*4882a593Smuzhiyun 			srgn_cnt = ((hpb->srgns_per_lu - 1) %
1931*4882a593Smuzhiyun 				    hpb->srgns_per_rgn) + 1;
1932*4882a593Smuzhiyun 			last_srgn = true;
1933*4882a593Smuzhiyun 		}
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 		ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1936*4882a593Smuzhiyun 		if (ret)
1937*4882a593Smuzhiyun 			goto release_srgn_table;
1938*4882a593Smuzhiyun 		ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 		if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1941*4882a593Smuzhiyun 			ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1942*4882a593Smuzhiyun 			if (ret)
1943*4882a593Smuzhiyun 				goto release_srgn_table;
1944*4882a593Smuzhiyun 		} else {
1945*4882a593Smuzhiyun 			rgn->rgn_state = HPB_RGN_INACTIVE;
1946*4882a593Smuzhiyun 		}
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 		rgn->rgn_flags = 0;
1949*4882a593Smuzhiyun 		rgn->hpb = hpb;
1950*4882a593Smuzhiyun 	}
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	return 0;
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun release_srgn_table:
1955*4882a593Smuzhiyun 	for (i = 0; i < rgn_idx; i++) {
1956*4882a593Smuzhiyun 		rgn = rgn_table + i;
1957*4882a593Smuzhiyun 		kvfree(rgn->srgn_tbl);
1958*4882a593Smuzhiyun 	}
1959*4882a593Smuzhiyun 	kvfree(rgn_table);
1960*4882a593Smuzhiyun 	return ret;
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun 
ufshpb_destroy_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1963*4882a593Smuzhiyun static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1964*4882a593Smuzhiyun 					 struct ufshpb_region *rgn)
1965*4882a593Smuzhiyun {
1966*4882a593Smuzhiyun 	int srgn_idx;
1967*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn;
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun 	for_each_sub_region(rgn, srgn_idx, srgn)
1970*4882a593Smuzhiyun 		if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1971*4882a593Smuzhiyun 			srgn->srgn_state = HPB_SRGN_UNUSED;
1972*4882a593Smuzhiyun 			ufshpb_put_map_ctx(hpb, srgn->mctx);
1973*4882a593Smuzhiyun 		}
1974*4882a593Smuzhiyun }
1975*4882a593Smuzhiyun 
ufshpb_destroy_region_tbl(struct ufshpb_lu * hpb)1976*4882a593Smuzhiyun static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1977*4882a593Smuzhiyun {
1978*4882a593Smuzhiyun 	int rgn_idx;
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1981*4882a593Smuzhiyun 		struct ufshpb_region *rgn;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 		rgn = hpb->rgn_tbl + rgn_idx;
1984*4882a593Smuzhiyun 		if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1985*4882a593Smuzhiyun 			rgn->rgn_state = HPB_RGN_INACTIVE;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 			ufshpb_destroy_subregion_tbl(hpb, rgn);
1988*4882a593Smuzhiyun 		}
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 		kvfree(rgn->srgn_tbl);
1991*4882a593Smuzhiyun 	}
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	kvfree(hpb->rgn_tbl);
1994*4882a593Smuzhiyun }
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun /* SYSFS functions */
1997*4882a593Smuzhiyun #define ufshpb_sysfs_attr_show_func(__name)				\
1998*4882a593Smuzhiyun static ssize_t __name##_show(struct device *dev,			\
1999*4882a593Smuzhiyun 	struct device_attribute *attr, char *buf)			\
2000*4882a593Smuzhiyun {									\
2001*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);			\
2002*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
2003*4882a593Smuzhiyun 									\
2004*4882a593Smuzhiyun 	if (!hpb)							\
2005*4882a593Smuzhiyun 		return -ENODEV;						\
2006*4882a593Smuzhiyun 									\
2007*4882a593Smuzhiyun 	return sysfs_emit(buf, "%llu\n", hpb->stats.__name);		\
2008*4882a593Smuzhiyun }									\
2009*4882a593Smuzhiyun \
2010*4882a593Smuzhiyun static DEVICE_ATTR_RO(__name)
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun ufshpb_sysfs_attr_show_func(hit_cnt);
2013*4882a593Smuzhiyun ufshpb_sysfs_attr_show_func(miss_cnt);
2014*4882a593Smuzhiyun ufshpb_sysfs_attr_show_func(rb_noti_cnt);
2015*4882a593Smuzhiyun ufshpb_sysfs_attr_show_func(rb_active_cnt);
2016*4882a593Smuzhiyun ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
2017*4882a593Smuzhiyun ufshpb_sysfs_attr_show_func(map_req_cnt);
2018*4882a593Smuzhiyun ufshpb_sysfs_attr_show_func(umap_req_cnt);
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun static struct attribute *hpb_dev_stat_attrs[] = {
2021*4882a593Smuzhiyun 	&dev_attr_hit_cnt.attr,
2022*4882a593Smuzhiyun 	&dev_attr_miss_cnt.attr,
2023*4882a593Smuzhiyun 	&dev_attr_rb_noti_cnt.attr,
2024*4882a593Smuzhiyun 	&dev_attr_rb_active_cnt.attr,
2025*4882a593Smuzhiyun 	&dev_attr_rb_inactive_cnt.attr,
2026*4882a593Smuzhiyun 	&dev_attr_map_req_cnt.attr,
2027*4882a593Smuzhiyun 	&dev_attr_umap_req_cnt.attr,
2028*4882a593Smuzhiyun 	NULL,
2029*4882a593Smuzhiyun };
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun struct attribute_group ufs_sysfs_hpb_stat_group = {
2032*4882a593Smuzhiyun 	.name = "hpb_stats",
2033*4882a593Smuzhiyun 	.attrs = hpb_dev_stat_attrs,
2034*4882a593Smuzhiyun };
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun /* SYSFS functions */
2037*4882a593Smuzhiyun #define ufshpb_sysfs_param_show_func(__name)				\
2038*4882a593Smuzhiyun static ssize_t __name##_show(struct device *dev,			\
2039*4882a593Smuzhiyun 	struct device_attribute *attr, char *buf)			\
2040*4882a593Smuzhiyun {									\
2041*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);			\
2042*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
2043*4882a593Smuzhiyun 									\
2044*4882a593Smuzhiyun 	if (!hpb)							\
2045*4882a593Smuzhiyun 		return -ENODEV;						\
2046*4882a593Smuzhiyun 									\
2047*4882a593Smuzhiyun 	return sysfs_emit(buf, "%d\n", hpb->params.__name);		\
2048*4882a593Smuzhiyun }
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun ufshpb_sysfs_param_show_func(requeue_timeout_ms);
2051*4882a593Smuzhiyun static ssize_t
requeue_timeout_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2052*4882a593Smuzhiyun requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
2053*4882a593Smuzhiyun 			 const char *buf, size_t count)
2054*4882a593Smuzhiyun {
2055*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
2056*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2057*4882a593Smuzhiyun 	int val;
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	if (!hpb)
2060*4882a593Smuzhiyun 		return -ENODEV;
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	if (kstrtouint(buf, 0, &val))
2063*4882a593Smuzhiyun 		return -EINVAL;
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	if (val < 0)
2066*4882a593Smuzhiyun 		return -EINVAL;
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 	hpb->params.requeue_timeout_ms = val;
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 	return count;
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun static DEVICE_ATTR_RW(requeue_timeout_ms);
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun ufshpb_sysfs_param_show_func(activation_thld);
2075*4882a593Smuzhiyun static ssize_t
activation_thld_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2076*4882a593Smuzhiyun activation_thld_store(struct device *dev, struct device_attribute *attr,
2077*4882a593Smuzhiyun 		      const char *buf, size_t count)
2078*4882a593Smuzhiyun {
2079*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
2080*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2081*4882a593Smuzhiyun 	int val;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	if (!hpb)
2084*4882a593Smuzhiyun 		return -ENODEV;
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	if (!hpb->is_hcm)
2087*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	if (kstrtouint(buf, 0, &val))
2090*4882a593Smuzhiyun 		return -EINVAL;
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	if (val <= 0)
2093*4882a593Smuzhiyun 		return -EINVAL;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	hpb->params.activation_thld = val;
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	return count;
2098*4882a593Smuzhiyun }
2099*4882a593Smuzhiyun static DEVICE_ATTR_RW(activation_thld);
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun ufshpb_sysfs_param_show_func(normalization_factor);
2102*4882a593Smuzhiyun static ssize_t
normalization_factor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2103*4882a593Smuzhiyun normalization_factor_store(struct device *dev, struct device_attribute *attr,
2104*4882a593Smuzhiyun 			   const char *buf, size_t count)
2105*4882a593Smuzhiyun {
2106*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
2107*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2108*4882a593Smuzhiyun 	int val;
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	if (!hpb)
2111*4882a593Smuzhiyun 		return -ENODEV;
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	if (!hpb->is_hcm)
2114*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 	if (kstrtouint(buf, 0, &val))
2117*4882a593Smuzhiyun 		return -EINVAL;
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
2120*4882a593Smuzhiyun 		return -EINVAL;
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	hpb->params.normalization_factor = val;
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 	return count;
2125*4882a593Smuzhiyun }
2126*4882a593Smuzhiyun static DEVICE_ATTR_RW(normalization_factor);
2127*4882a593Smuzhiyun 
2128*4882a593Smuzhiyun ufshpb_sysfs_param_show_func(eviction_thld_enter);
2129*4882a593Smuzhiyun static ssize_t
eviction_thld_enter_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2130*4882a593Smuzhiyun eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
2131*4882a593Smuzhiyun 			  const char *buf, size_t count)
2132*4882a593Smuzhiyun {
2133*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
2134*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2135*4882a593Smuzhiyun 	int val;
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun 	if (!hpb)
2138*4882a593Smuzhiyun 		return -ENODEV;
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 	if (!hpb->is_hcm)
2141*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 	if (kstrtouint(buf, 0, &val))
2144*4882a593Smuzhiyun 		return -EINVAL;
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	if (val <= hpb->params.eviction_thld_exit)
2147*4882a593Smuzhiyun 		return -EINVAL;
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	hpb->params.eviction_thld_enter = val;
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	return count;
2152*4882a593Smuzhiyun }
2153*4882a593Smuzhiyun static DEVICE_ATTR_RW(eviction_thld_enter);
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun ufshpb_sysfs_param_show_func(eviction_thld_exit);
2156*4882a593Smuzhiyun static ssize_t
eviction_thld_exit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2157*4882a593Smuzhiyun eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
2158*4882a593Smuzhiyun 			 const char *buf, size_t count)
2159*4882a593Smuzhiyun {
2160*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
2161*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2162*4882a593Smuzhiyun 	int val;
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	if (!hpb)
2165*4882a593Smuzhiyun 		return -ENODEV;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	if (!hpb->is_hcm)
2168*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	if (kstrtouint(buf, 0, &val))
2171*4882a593Smuzhiyun 		return -EINVAL;
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 	if (val <= hpb->params.activation_thld)
2174*4882a593Smuzhiyun 		return -EINVAL;
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun 	hpb->params.eviction_thld_exit = val;
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 	return count;
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun static DEVICE_ATTR_RW(eviction_thld_exit);
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun ufshpb_sysfs_param_show_func(read_timeout_ms);
2183*4882a593Smuzhiyun static ssize_t
read_timeout_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2184*4882a593Smuzhiyun read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
2185*4882a593Smuzhiyun 		      const char *buf, size_t count)
2186*4882a593Smuzhiyun {
2187*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
2188*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2189*4882a593Smuzhiyun 	int val;
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	if (!hpb)
2192*4882a593Smuzhiyun 		return -ENODEV;
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	if (!hpb->is_hcm)
2195*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun 	if (kstrtouint(buf, 0, &val))
2198*4882a593Smuzhiyun 		return -EINVAL;
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun 	/* read_timeout >> timeout_polling_interval */
2201*4882a593Smuzhiyun 	if (val < hpb->params.timeout_polling_interval_ms * 2)
2202*4882a593Smuzhiyun 		return -EINVAL;
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun 	hpb->params.read_timeout_ms = val;
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 	return count;
2207*4882a593Smuzhiyun }
2208*4882a593Smuzhiyun static DEVICE_ATTR_RW(read_timeout_ms);
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun ufshpb_sysfs_param_show_func(read_timeout_expiries);
2211*4882a593Smuzhiyun static ssize_t
read_timeout_expiries_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2212*4882a593Smuzhiyun read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
2213*4882a593Smuzhiyun 			    const char *buf, size_t count)
2214*4882a593Smuzhiyun {
2215*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
2216*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2217*4882a593Smuzhiyun 	int val;
2218*4882a593Smuzhiyun 
2219*4882a593Smuzhiyun 	if (!hpb)
2220*4882a593Smuzhiyun 		return -ENODEV;
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	if (!hpb->is_hcm)
2223*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	if (kstrtouint(buf, 0, &val))
2226*4882a593Smuzhiyun 		return -EINVAL;
2227*4882a593Smuzhiyun 
2228*4882a593Smuzhiyun 	if (val <= 0)
2229*4882a593Smuzhiyun 		return -EINVAL;
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun 	hpb->params.read_timeout_expiries = val;
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun 	return count;
2234*4882a593Smuzhiyun }
2235*4882a593Smuzhiyun static DEVICE_ATTR_RW(read_timeout_expiries);
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
2238*4882a593Smuzhiyun static ssize_t
timeout_polling_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2239*4882a593Smuzhiyun timeout_polling_interval_ms_store(struct device *dev,
2240*4882a593Smuzhiyun 				  struct device_attribute *attr,
2241*4882a593Smuzhiyun 				  const char *buf, size_t count)
2242*4882a593Smuzhiyun {
2243*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
2244*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2245*4882a593Smuzhiyun 	int val;
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	if (!hpb)
2248*4882a593Smuzhiyun 		return -ENODEV;
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	if (!hpb->is_hcm)
2251*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	if (kstrtouint(buf, 0, &val))
2254*4882a593Smuzhiyun 		return -EINVAL;
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	/* timeout_polling_interval << read_timeout */
2257*4882a593Smuzhiyun 	if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
2258*4882a593Smuzhiyun 		return -EINVAL;
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 	hpb->params.timeout_polling_interval_ms = val;
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 	return count;
2263*4882a593Smuzhiyun }
2264*4882a593Smuzhiyun static DEVICE_ATTR_RW(timeout_polling_interval_ms);
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun ufshpb_sysfs_param_show_func(inflight_map_req);
inflight_map_req_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2267*4882a593Smuzhiyun static ssize_t inflight_map_req_store(struct device *dev,
2268*4882a593Smuzhiyun 				      struct device_attribute *attr,
2269*4882a593Smuzhiyun 				      const char *buf, size_t count)
2270*4882a593Smuzhiyun {
2271*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
2272*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2273*4882a593Smuzhiyun 	int val;
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	if (!hpb)
2276*4882a593Smuzhiyun 		return -ENODEV;
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun 	if (!hpb->is_hcm)
2279*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 	if (kstrtouint(buf, 0, &val))
2282*4882a593Smuzhiyun 		return -EINVAL;
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
2285*4882a593Smuzhiyun 		return -EINVAL;
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	hpb->params.inflight_map_req = val;
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun 	return count;
2290*4882a593Smuzhiyun }
2291*4882a593Smuzhiyun static DEVICE_ATTR_RW(inflight_map_req);
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 
ufshpb_hcm_param_init(struct ufshpb_lu * hpb)2294*4882a593Smuzhiyun static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2295*4882a593Smuzhiyun {
2296*4882a593Smuzhiyun 	hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2297*4882a593Smuzhiyun 	hpb->params.normalization_factor = 1;
2298*4882a593Smuzhiyun 	hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2299*4882a593Smuzhiyun 	hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2300*4882a593Smuzhiyun 	hpb->params.read_timeout_ms = READ_TO_MS;
2301*4882a593Smuzhiyun 	hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2302*4882a593Smuzhiyun 	hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2303*4882a593Smuzhiyun 	hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun static struct attribute *hpb_dev_param_attrs[] = {
2307*4882a593Smuzhiyun 	&dev_attr_requeue_timeout_ms.attr,
2308*4882a593Smuzhiyun 	&dev_attr_activation_thld.attr,
2309*4882a593Smuzhiyun 	&dev_attr_normalization_factor.attr,
2310*4882a593Smuzhiyun 	&dev_attr_eviction_thld_enter.attr,
2311*4882a593Smuzhiyun 	&dev_attr_eviction_thld_exit.attr,
2312*4882a593Smuzhiyun 	&dev_attr_read_timeout_ms.attr,
2313*4882a593Smuzhiyun 	&dev_attr_read_timeout_expiries.attr,
2314*4882a593Smuzhiyun 	&dev_attr_timeout_polling_interval_ms.attr,
2315*4882a593Smuzhiyun 	&dev_attr_inflight_map_req.attr,
2316*4882a593Smuzhiyun 	NULL,
2317*4882a593Smuzhiyun };
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun struct attribute_group ufs_sysfs_hpb_param_group = {
2320*4882a593Smuzhiyun 	.name = "hpb_params",
2321*4882a593Smuzhiyun 	.attrs = hpb_dev_param_attrs,
2322*4882a593Smuzhiyun };
2323*4882a593Smuzhiyun 
ufshpb_pre_req_mempool_init(struct ufshpb_lu * hpb)2324*4882a593Smuzhiyun static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2325*4882a593Smuzhiyun {
2326*4882a593Smuzhiyun 	struct ufshpb_req *pre_req = NULL, *t;
2327*4882a593Smuzhiyun 	int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2328*4882a593Smuzhiyun 	int i;
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2333*4882a593Smuzhiyun 	hpb->throttle_pre_req = qd;
2334*4882a593Smuzhiyun 	hpb->num_inflight_pre_req = 0;
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	if (!hpb->pre_req)
2337*4882a593Smuzhiyun 		goto release_mem;
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	for (i = 0; i < qd; i++) {
2340*4882a593Smuzhiyun 		pre_req = hpb->pre_req + i;
2341*4882a593Smuzhiyun 		INIT_LIST_HEAD(&pre_req->list_req);
2342*4882a593Smuzhiyun 		pre_req->req = NULL;
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 		pre_req->bio = bio_alloc(GFP_KERNEL, 1);
2345*4882a593Smuzhiyun 		if (!pre_req->bio)
2346*4882a593Smuzhiyun 			goto release_mem;
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 		pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2349*4882a593Smuzhiyun 		if (!pre_req->wb.m_page) {
2350*4882a593Smuzhiyun 			bio_put(pre_req->bio);
2351*4882a593Smuzhiyun 			goto release_mem;
2352*4882a593Smuzhiyun 		}
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun 		list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2355*4882a593Smuzhiyun 	}
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 	return 0;
2358*4882a593Smuzhiyun release_mem:
2359*4882a593Smuzhiyun 	list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2360*4882a593Smuzhiyun 		list_del_init(&pre_req->list_req);
2361*4882a593Smuzhiyun 		bio_put(pre_req->bio);
2362*4882a593Smuzhiyun 		__free_page(pre_req->wb.m_page);
2363*4882a593Smuzhiyun 	}
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 	kfree(hpb->pre_req);
2366*4882a593Smuzhiyun 	return -ENOMEM;
2367*4882a593Smuzhiyun }
2368*4882a593Smuzhiyun 
ufshpb_pre_req_mempool_destroy(struct ufshpb_lu * hpb)2369*4882a593Smuzhiyun static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2370*4882a593Smuzhiyun {
2371*4882a593Smuzhiyun 	struct ufshpb_req *pre_req = NULL;
2372*4882a593Smuzhiyun 	int i;
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 	for (i = 0; i < hpb->throttle_pre_req; i++) {
2375*4882a593Smuzhiyun 		pre_req = hpb->pre_req + i;
2376*4882a593Smuzhiyun 		bio_put(hpb->pre_req[i].bio);
2377*4882a593Smuzhiyun 		if (!pre_req->wb.m_page)
2378*4882a593Smuzhiyun 			__free_page(hpb->pre_req[i].wb.m_page);
2379*4882a593Smuzhiyun 		list_del_init(&pre_req->list_req);
2380*4882a593Smuzhiyun 	}
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 	kfree(hpb->pre_req);
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun 
ufshpb_stat_init(struct ufshpb_lu * hpb)2385*4882a593Smuzhiyun static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2386*4882a593Smuzhiyun {
2387*4882a593Smuzhiyun 	hpb->stats.hit_cnt = 0;
2388*4882a593Smuzhiyun 	hpb->stats.miss_cnt = 0;
2389*4882a593Smuzhiyun 	hpb->stats.rb_noti_cnt = 0;
2390*4882a593Smuzhiyun 	hpb->stats.rb_active_cnt = 0;
2391*4882a593Smuzhiyun 	hpb->stats.rb_inactive_cnt = 0;
2392*4882a593Smuzhiyun 	hpb->stats.map_req_cnt = 0;
2393*4882a593Smuzhiyun 	hpb->stats.umap_req_cnt = 0;
2394*4882a593Smuzhiyun }
2395*4882a593Smuzhiyun 
ufshpb_param_init(struct ufshpb_lu * hpb)2396*4882a593Smuzhiyun static void ufshpb_param_init(struct ufshpb_lu *hpb)
2397*4882a593Smuzhiyun {
2398*4882a593Smuzhiyun 	hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2399*4882a593Smuzhiyun 	if (hpb->is_hcm)
2400*4882a593Smuzhiyun 		ufshpb_hcm_param_init(hpb);
2401*4882a593Smuzhiyun }
2402*4882a593Smuzhiyun 
ufshpb_lu_hpb_init(struct ufs_hba * hba,struct ufshpb_lu * hpb)2403*4882a593Smuzhiyun static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2404*4882a593Smuzhiyun {
2405*4882a593Smuzhiyun 	int ret;
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun 	spin_lock_init(&hpb->rgn_state_lock);
2408*4882a593Smuzhiyun 	spin_lock_init(&hpb->rsp_list_lock);
2409*4882a593Smuzhiyun 	spin_lock_init(&hpb->param_lock);
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2412*4882a593Smuzhiyun 	INIT_LIST_HEAD(&hpb->lh_act_srgn);
2413*4882a593Smuzhiyun 	INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2414*4882a593Smuzhiyun 	INIT_LIST_HEAD(&hpb->list_hpb_lu);
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun 	INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2417*4882a593Smuzhiyun 	if (hpb->is_hcm) {
2418*4882a593Smuzhiyun 		INIT_WORK(&hpb->ufshpb_normalization_work,
2419*4882a593Smuzhiyun 			  ufshpb_normalization_work_handler);
2420*4882a593Smuzhiyun 		INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2421*4882a593Smuzhiyun 				  ufshpb_read_to_handler);
2422*4882a593Smuzhiyun 	}
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 	hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2425*4882a593Smuzhiyun 			  sizeof(struct ufshpb_req), 0, 0, NULL);
2426*4882a593Smuzhiyun 	if (!hpb->map_req_cache) {
2427*4882a593Smuzhiyun 		dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2428*4882a593Smuzhiyun 			hpb->lun);
2429*4882a593Smuzhiyun 		return -ENOMEM;
2430*4882a593Smuzhiyun 	}
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 	hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2433*4882a593Smuzhiyun 			  sizeof(struct page *) * hpb->pages_per_srgn,
2434*4882a593Smuzhiyun 			  0, 0, NULL);
2435*4882a593Smuzhiyun 	if (!hpb->m_page_cache) {
2436*4882a593Smuzhiyun 		dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2437*4882a593Smuzhiyun 			hpb->lun);
2438*4882a593Smuzhiyun 		ret = -ENOMEM;
2439*4882a593Smuzhiyun 		goto release_req_cache;
2440*4882a593Smuzhiyun 	}
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 	ret = ufshpb_pre_req_mempool_init(hpb);
2443*4882a593Smuzhiyun 	if (ret) {
2444*4882a593Smuzhiyun 		dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2445*4882a593Smuzhiyun 			hpb->lun);
2446*4882a593Smuzhiyun 		goto release_m_page_cache;
2447*4882a593Smuzhiyun 	}
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun 	ret = ufshpb_alloc_region_tbl(hba, hpb);
2450*4882a593Smuzhiyun 	if (ret)
2451*4882a593Smuzhiyun 		goto release_pre_req_mempool;
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 	ufshpb_stat_init(hpb);
2454*4882a593Smuzhiyun 	ufshpb_param_init(hpb);
2455*4882a593Smuzhiyun 
2456*4882a593Smuzhiyun 	if (hpb->is_hcm) {
2457*4882a593Smuzhiyun 		unsigned int poll;
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 		poll = hpb->params.timeout_polling_interval_ms;
2460*4882a593Smuzhiyun 		schedule_delayed_work(&hpb->ufshpb_read_to_work,
2461*4882a593Smuzhiyun 				      msecs_to_jiffies(poll));
2462*4882a593Smuzhiyun 	}
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 	return 0;
2465*4882a593Smuzhiyun 
2466*4882a593Smuzhiyun release_pre_req_mempool:
2467*4882a593Smuzhiyun 	ufshpb_pre_req_mempool_destroy(hpb);
2468*4882a593Smuzhiyun release_m_page_cache:
2469*4882a593Smuzhiyun 	kmem_cache_destroy(hpb->m_page_cache);
2470*4882a593Smuzhiyun release_req_cache:
2471*4882a593Smuzhiyun 	kmem_cache_destroy(hpb->map_req_cache);
2472*4882a593Smuzhiyun 	return ret;
2473*4882a593Smuzhiyun }
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun static struct ufshpb_lu *
ufshpb_alloc_hpb_lu(struct ufs_hba * hba,struct scsi_device * sdev,struct ufshpb_dev_info * hpb_dev_info,struct ufshpb_lu_info * hpb_lu_info)2476*4882a593Smuzhiyun ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2477*4882a593Smuzhiyun 		    struct ufshpb_dev_info *hpb_dev_info,
2478*4882a593Smuzhiyun 		    struct ufshpb_lu_info *hpb_lu_info)
2479*4882a593Smuzhiyun {
2480*4882a593Smuzhiyun 	struct ufshpb_lu *hpb;
2481*4882a593Smuzhiyun 	int ret;
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 	hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2484*4882a593Smuzhiyun 	if (!hpb)
2485*4882a593Smuzhiyun 		return NULL;
2486*4882a593Smuzhiyun 
2487*4882a593Smuzhiyun 	hpb->lun = sdev->lun;
2488*4882a593Smuzhiyun 	hpb->sdev_ufs_lu = sdev;
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun 	ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 	ret = ufshpb_lu_hpb_init(hba, hpb);
2493*4882a593Smuzhiyun 	if (ret) {
2494*4882a593Smuzhiyun 		dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2495*4882a593Smuzhiyun 		goto release_hpb;
2496*4882a593Smuzhiyun 	}
2497*4882a593Smuzhiyun 
2498*4882a593Smuzhiyun 	sdev->hostdata = hpb;
2499*4882a593Smuzhiyun 	return hpb;
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun release_hpb:
2502*4882a593Smuzhiyun 	kfree(hpb);
2503*4882a593Smuzhiyun 	return NULL;
2504*4882a593Smuzhiyun }
2505*4882a593Smuzhiyun 
ufshpb_discard_rsp_lists(struct ufshpb_lu * hpb)2506*4882a593Smuzhiyun static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2507*4882a593Smuzhiyun {
2508*4882a593Smuzhiyun 	struct ufshpb_region *rgn, *next_rgn;
2509*4882a593Smuzhiyun 	struct ufshpb_subregion *srgn, *next_srgn;
2510*4882a593Smuzhiyun 	unsigned long flags;
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	/*
2513*4882a593Smuzhiyun 	 * If the device reset occurred, the remained HPB region information
2514*4882a593Smuzhiyun 	 * may be stale. Therefore, by dicarding the lists of HPB response
2515*4882a593Smuzhiyun 	 * that remained after reset, it prevents unnecessary work.
2516*4882a593Smuzhiyun 	 */
2517*4882a593Smuzhiyun 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2518*4882a593Smuzhiyun 	list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2519*4882a593Smuzhiyun 				 list_inact_rgn)
2520*4882a593Smuzhiyun 		list_del_init(&rgn->list_inact_rgn);
2521*4882a593Smuzhiyun 
2522*4882a593Smuzhiyun 	list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2523*4882a593Smuzhiyun 				 list_act_srgn)
2524*4882a593Smuzhiyun 		list_del_init(&srgn->list_act_srgn);
2525*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2526*4882a593Smuzhiyun }
2527*4882a593Smuzhiyun 
ufshpb_cancel_jobs(struct ufshpb_lu * hpb)2528*4882a593Smuzhiyun static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2529*4882a593Smuzhiyun {
2530*4882a593Smuzhiyun 	if (hpb->is_hcm) {
2531*4882a593Smuzhiyun 		cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2532*4882a593Smuzhiyun 		cancel_work_sync(&hpb->ufshpb_normalization_work);
2533*4882a593Smuzhiyun 	}
2534*4882a593Smuzhiyun 	cancel_work_sync(&hpb->map_work);
2535*4882a593Smuzhiyun }
2536*4882a593Smuzhiyun 
ufshpb_check_hpb_reset_query(struct ufs_hba * hba)2537*4882a593Smuzhiyun static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2538*4882a593Smuzhiyun {
2539*4882a593Smuzhiyun 	int err = 0;
2540*4882a593Smuzhiyun 	bool flag_res = true;
2541*4882a593Smuzhiyun 	int try;
2542*4882a593Smuzhiyun 
2543*4882a593Smuzhiyun 	/* wait for the device to complete HPB reset query */
2544*4882a593Smuzhiyun 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2545*4882a593Smuzhiyun 		dev_dbg(hba->dev,
2546*4882a593Smuzhiyun 			"%s start flag reset polling %d times\n",
2547*4882a593Smuzhiyun 			__func__, try);
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 		/* Poll fHpbReset flag to be cleared */
2550*4882a593Smuzhiyun 		err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2551*4882a593Smuzhiyun 				QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2552*4882a593Smuzhiyun 
2553*4882a593Smuzhiyun 		if (err) {
2554*4882a593Smuzhiyun 			dev_err(hba->dev,
2555*4882a593Smuzhiyun 				"%s reading fHpbReset flag failed with error %d\n",
2556*4882a593Smuzhiyun 				__func__, err);
2557*4882a593Smuzhiyun 			return flag_res;
2558*4882a593Smuzhiyun 		}
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 		if (!flag_res)
2561*4882a593Smuzhiyun 			goto out;
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 		usleep_range(1000, 1100);
2564*4882a593Smuzhiyun 	}
2565*4882a593Smuzhiyun 	if (flag_res) {
2566*4882a593Smuzhiyun 		dev_err(hba->dev,
2567*4882a593Smuzhiyun 			"%s fHpbReset was not cleared by the device\n",
2568*4882a593Smuzhiyun 			__func__);
2569*4882a593Smuzhiyun 	}
2570*4882a593Smuzhiyun out:
2571*4882a593Smuzhiyun 	return flag_res;
2572*4882a593Smuzhiyun }
2573*4882a593Smuzhiyun 
ufshpb_reset(struct ufs_hba * hba)2574*4882a593Smuzhiyun void ufshpb_reset(struct ufs_hba *hba)
2575*4882a593Smuzhiyun {
2576*4882a593Smuzhiyun 	struct ufshpb_lu *hpb;
2577*4882a593Smuzhiyun 	struct scsi_device *sdev;
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun 	shost_for_each_device(sdev, hba->host) {
2580*4882a593Smuzhiyun 		hpb = ufshpb_get_hpb_data(sdev);
2581*4882a593Smuzhiyun 		if (!hpb)
2582*4882a593Smuzhiyun 			continue;
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 		if (ufshpb_get_state(hpb) != HPB_RESET)
2585*4882a593Smuzhiyun 			continue;
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun 		ufshpb_set_state(hpb, HPB_PRESENT);
2588*4882a593Smuzhiyun 	}
2589*4882a593Smuzhiyun }
2590*4882a593Smuzhiyun 
ufshpb_reset_host(struct ufs_hba * hba)2591*4882a593Smuzhiyun void ufshpb_reset_host(struct ufs_hba *hba)
2592*4882a593Smuzhiyun {
2593*4882a593Smuzhiyun 	struct ufshpb_lu *hpb;
2594*4882a593Smuzhiyun 	struct scsi_device *sdev;
2595*4882a593Smuzhiyun 
2596*4882a593Smuzhiyun 	shost_for_each_device(sdev, hba->host) {
2597*4882a593Smuzhiyun 		hpb = ufshpb_get_hpb_data(sdev);
2598*4882a593Smuzhiyun 		if (!hpb)
2599*4882a593Smuzhiyun 			continue;
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 		if (ufshpb_get_state(hpb) != HPB_PRESENT)
2602*4882a593Smuzhiyun 			continue;
2603*4882a593Smuzhiyun 		ufshpb_set_state(hpb, HPB_RESET);
2604*4882a593Smuzhiyun 		ufshpb_cancel_jobs(hpb);
2605*4882a593Smuzhiyun 		ufshpb_discard_rsp_lists(hpb);
2606*4882a593Smuzhiyun 	}
2607*4882a593Smuzhiyun }
2608*4882a593Smuzhiyun 
ufshpb_suspend(struct ufs_hba * hba)2609*4882a593Smuzhiyun void ufshpb_suspend(struct ufs_hba *hba)
2610*4882a593Smuzhiyun {
2611*4882a593Smuzhiyun 	struct ufshpb_lu *hpb;
2612*4882a593Smuzhiyun 	struct scsi_device *sdev;
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 	shost_for_each_device(sdev, hba->host) {
2615*4882a593Smuzhiyun 		hpb = ufshpb_get_hpb_data(sdev);
2616*4882a593Smuzhiyun 		if (!hpb)
2617*4882a593Smuzhiyun 			continue;
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 		if (ufshpb_get_state(hpb) != HPB_PRESENT)
2620*4882a593Smuzhiyun 			continue;
2621*4882a593Smuzhiyun 		ufshpb_set_state(hpb, HPB_SUSPEND);
2622*4882a593Smuzhiyun 		ufshpb_cancel_jobs(hpb);
2623*4882a593Smuzhiyun 	}
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun 
ufshpb_resume(struct ufs_hba * hba)2626*4882a593Smuzhiyun void ufshpb_resume(struct ufs_hba *hba)
2627*4882a593Smuzhiyun {
2628*4882a593Smuzhiyun 	struct ufshpb_lu *hpb;
2629*4882a593Smuzhiyun 	struct scsi_device *sdev;
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 	shost_for_each_device(sdev, hba->host) {
2632*4882a593Smuzhiyun 		hpb = ufshpb_get_hpb_data(sdev);
2633*4882a593Smuzhiyun 		if (!hpb)
2634*4882a593Smuzhiyun 			continue;
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 		if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2637*4882a593Smuzhiyun 		    (ufshpb_get_state(hpb) != HPB_SUSPEND))
2638*4882a593Smuzhiyun 			continue;
2639*4882a593Smuzhiyun 		ufshpb_set_state(hpb, HPB_PRESENT);
2640*4882a593Smuzhiyun 		ufshpb_kick_map_work(hpb);
2641*4882a593Smuzhiyun 		if (hpb->is_hcm) {
2642*4882a593Smuzhiyun 			unsigned int poll =
2643*4882a593Smuzhiyun 				hpb->params.timeout_polling_interval_ms;
2644*4882a593Smuzhiyun 
2645*4882a593Smuzhiyun 			schedule_delayed_work(&hpb->ufshpb_read_to_work,
2646*4882a593Smuzhiyun 				msecs_to_jiffies(poll));
2647*4882a593Smuzhiyun 		}
2648*4882a593Smuzhiyun 	}
2649*4882a593Smuzhiyun }
2650*4882a593Smuzhiyun 
ufshpb_get_lu_info(struct ufs_hba * hba,int lun,struct ufshpb_lu_info * hpb_lu_info)2651*4882a593Smuzhiyun static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2652*4882a593Smuzhiyun 			      struct ufshpb_lu_info *hpb_lu_info)
2653*4882a593Smuzhiyun {
2654*4882a593Smuzhiyun 	u16 max_active_rgns;
2655*4882a593Smuzhiyun 	u8 lu_enable;
2656*4882a593Smuzhiyun 	int size;
2657*4882a593Smuzhiyun 	int ret;
2658*4882a593Smuzhiyun 	char desc_buf[QUERY_DESC_MAX_SIZE];
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun 	ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2661*4882a593Smuzhiyun 
2662*4882a593Smuzhiyun 	pm_runtime_get_sync(hba->dev);
2663*4882a593Smuzhiyun 	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2664*4882a593Smuzhiyun 					    QUERY_DESC_IDN_UNIT, lun, 0,
2665*4882a593Smuzhiyun 					    desc_buf, &size);
2666*4882a593Smuzhiyun 	pm_runtime_put_sync(hba->dev);
2667*4882a593Smuzhiyun 
2668*4882a593Smuzhiyun 	if (ret) {
2669*4882a593Smuzhiyun 		dev_err(hba->dev,
2670*4882a593Smuzhiyun 			"%s: idn: %d lun: %d  query request failed",
2671*4882a593Smuzhiyun 			__func__, QUERY_DESC_IDN_UNIT, lun);
2672*4882a593Smuzhiyun 		return ret;
2673*4882a593Smuzhiyun 	}
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun 	lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2676*4882a593Smuzhiyun 	if (lu_enable != LU_ENABLED_HPB_FUNC)
2677*4882a593Smuzhiyun 		return -ENODEV;
2678*4882a593Smuzhiyun 
2679*4882a593Smuzhiyun 	max_active_rgns = get_unaligned_be16(
2680*4882a593Smuzhiyun 			desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2681*4882a593Smuzhiyun 	if (!max_active_rgns) {
2682*4882a593Smuzhiyun 		dev_err(hba->dev,
2683*4882a593Smuzhiyun 			"lun %d wrong number of max active regions\n", lun);
2684*4882a593Smuzhiyun 		return -ENODEV;
2685*4882a593Smuzhiyun 	}
2686*4882a593Smuzhiyun 
2687*4882a593Smuzhiyun 	hpb_lu_info->num_blocks = get_unaligned_be64(
2688*4882a593Smuzhiyun 			desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2689*4882a593Smuzhiyun 	hpb_lu_info->pinned_start = get_unaligned_be16(
2690*4882a593Smuzhiyun 			desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2691*4882a593Smuzhiyun 	hpb_lu_info->num_pinned = get_unaligned_be16(
2692*4882a593Smuzhiyun 			desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2693*4882a593Smuzhiyun 	hpb_lu_info->max_active_rgns = max_active_rgns;
2694*4882a593Smuzhiyun 
2695*4882a593Smuzhiyun 	return 0;
2696*4882a593Smuzhiyun }
2697*4882a593Smuzhiyun 
ufshpb_destroy_lu(struct ufs_hba * hba,struct scsi_device * sdev)2698*4882a593Smuzhiyun void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2699*4882a593Smuzhiyun {
2700*4882a593Smuzhiyun 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 	if (!hpb)
2703*4882a593Smuzhiyun 		return;
2704*4882a593Smuzhiyun 
2705*4882a593Smuzhiyun 	ufshpb_set_state(hpb, HPB_FAILED);
2706*4882a593Smuzhiyun 
2707*4882a593Smuzhiyun 	sdev = hpb->sdev_ufs_lu;
2708*4882a593Smuzhiyun 	sdev->hostdata = NULL;
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 	ufshpb_cancel_jobs(hpb);
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun 	ufshpb_pre_req_mempool_destroy(hpb);
2713*4882a593Smuzhiyun 	ufshpb_destroy_region_tbl(hpb);
2714*4882a593Smuzhiyun 
2715*4882a593Smuzhiyun 	kmem_cache_destroy(hpb->map_req_cache);
2716*4882a593Smuzhiyun 	kmem_cache_destroy(hpb->m_page_cache);
2717*4882a593Smuzhiyun 
2718*4882a593Smuzhiyun 	list_del_init(&hpb->list_hpb_lu);
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun 	kfree(hpb);
2721*4882a593Smuzhiyun }
2722*4882a593Smuzhiyun 
ufshpb_hpb_lu_prepared(struct ufs_hba * hba)2723*4882a593Smuzhiyun static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2724*4882a593Smuzhiyun {
2725*4882a593Smuzhiyun 	int pool_size;
2726*4882a593Smuzhiyun 	struct ufshpb_lu *hpb;
2727*4882a593Smuzhiyun 	struct scsi_device *sdev;
2728*4882a593Smuzhiyun 	bool init_success;
2729*4882a593Smuzhiyun 
2730*4882a593Smuzhiyun 	if (tot_active_srgn_pages == 0) {
2731*4882a593Smuzhiyun 		ufshpb_remove(hba);
2732*4882a593Smuzhiyun 		return;
2733*4882a593Smuzhiyun 	}
2734*4882a593Smuzhiyun 
2735*4882a593Smuzhiyun 	init_success = !ufshpb_check_hpb_reset_query(hba);
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2738*4882a593Smuzhiyun 	if (pool_size > tot_active_srgn_pages) {
2739*4882a593Smuzhiyun 		mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2740*4882a593Smuzhiyun 		mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2741*4882a593Smuzhiyun 	}
2742*4882a593Smuzhiyun 
2743*4882a593Smuzhiyun 	shost_for_each_device(sdev, hba->host) {
2744*4882a593Smuzhiyun 		hpb = ufshpb_get_hpb_data(sdev);
2745*4882a593Smuzhiyun 		if (!hpb)
2746*4882a593Smuzhiyun 			continue;
2747*4882a593Smuzhiyun 
2748*4882a593Smuzhiyun 		if (init_success) {
2749*4882a593Smuzhiyun 			ufshpb_set_state(hpb, HPB_PRESENT);
2750*4882a593Smuzhiyun 			if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2751*4882a593Smuzhiyun 				queue_work(ufshpb_wq, &hpb->map_work);
2752*4882a593Smuzhiyun 			if (!hpb->is_hcm)
2753*4882a593Smuzhiyun 				ufshpb_issue_umap_all_req(hpb);
2754*4882a593Smuzhiyun 		} else {
2755*4882a593Smuzhiyun 			dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2756*4882a593Smuzhiyun 			ufshpb_destroy_lu(hba, sdev);
2757*4882a593Smuzhiyun 		}
2758*4882a593Smuzhiyun 	}
2759*4882a593Smuzhiyun 
2760*4882a593Smuzhiyun 	if (!init_success)
2761*4882a593Smuzhiyun 		ufshpb_remove(hba);
2762*4882a593Smuzhiyun }
2763*4882a593Smuzhiyun 
ufshpb_init_hpb_lu(struct ufs_hba * hba,struct scsi_device * sdev)2764*4882a593Smuzhiyun void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2765*4882a593Smuzhiyun {
2766*4882a593Smuzhiyun 	struct ufshpb_lu *hpb;
2767*4882a593Smuzhiyun 	int ret;
2768*4882a593Smuzhiyun 	struct ufshpb_lu_info hpb_lu_info = { 0 };
2769*4882a593Smuzhiyun 	int lun = sdev->lun;
2770*4882a593Smuzhiyun 
2771*4882a593Smuzhiyun 	if (lun >= hba->dev_info.max_lu_supported)
2772*4882a593Smuzhiyun 		goto out;
2773*4882a593Smuzhiyun 
2774*4882a593Smuzhiyun 	ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2775*4882a593Smuzhiyun 	if (ret)
2776*4882a593Smuzhiyun 		goto out;
2777*4882a593Smuzhiyun 
2778*4882a593Smuzhiyun 	hpb = ufshpb_alloc_hpb_lu(hba, sdev, ufs_hba_to_hpb(hba), &hpb_lu_info);
2779*4882a593Smuzhiyun 	if (!hpb)
2780*4882a593Smuzhiyun 		goto out;
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun 	tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2783*4882a593Smuzhiyun 			hpb->srgns_per_rgn * hpb->pages_per_srgn;
2784*4882a593Smuzhiyun 
2785*4882a593Smuzhiyun out:
2786*4882a593Smuzhiyun 	/* All LUs are initialized */
2787*4882a593Smuzhiyun 	if (atomic_dec_and_test(&ufs_hba_to_hpb(hba)->slave_conf_cnt))
2788*4882a593Smuzhiyun 		ufshpb_hpb_lu_prepared(hba);
2789*4882a593Smuzhiyun }
2790*4882a593Smuzhiyun 
ufshpb_init_mem_wq(struct ufs_hba * hba)2791*4882a593Smuzhiyun static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2792*4882a593Smuzhiyun {
2793*4882a593Smuzhiyun 	int ret;
2794*4882a593Smuzhiyun 	unsigned int pool_size;
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun 	ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2797*4882a593Smuzhiyun 					sizeof(struct ufshpb_map_ctx),
2798*4882a593Smuzhiyun 					0, 0, NULL);
2799*4882a593Smuzhiyun 	if (!ufshpb_mctx_cache) {
2800*4882a593Smuzhiyun 		dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2801*4882a593Smuzhiyun 		return -ENOMEM;
2802*4882a593Smuzhiyun 	}
2803*4882a593Smuzhiyun 
2804*4882a593Smuzhiyun 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2805*4882a593Smuzhiyun 	dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2806*4882a593Smuzhiyun 	       __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun 	ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2809*4882a593Smuzhiyun 						    ufshpb_mctx_cache);
2810*4882a593Smuzhiyun 	if (!ufshpb_mctx_pool) {
2811*4882a593Smuzhiyun 		dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2812*4882a593Smuzhiyun 		ret = -ENOMEM;
2813*4882a593Smuzhiyun 		goto release_mctx_cache;
2814*4882a593Smuzhiyun 	}
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun 	ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2817*4882a593Smuzhiyun 	if (!ufshpb_page_pool) {
2818*4882a593Smuzhiyun 		dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2819*4882a593Smuzhiyun 		ret = -ENOMEM;
2820*4882a593Smuzhiyun 		goto release_mctx_pool;
2821*4882a593Smuzhiyun 	}
2822*4882a593Smuzhiyun 
2823*4882a593Smuzhiyun 	ufshpb_wq = alloc_workqueue("ufshpb-wq",
2824*4882a593Smuzhiyun 					WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2825*4882a593Smuzhiyun 	if (!ufshpb_wq) {
2826*4882a593Smuzhiyun 		dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2827*4882a593Smuzhiyun 		ret = -ENOMEM;
2828*4882a593Smuzhiyun 		goto release_page_pool;
2829*4882a593Smuzhiyun 	}
2830*4882a593Smuzhiyun 
2831*4882a593Smuzhiyun 	return 0;
2832*4882a593Smuzhiyun 
2833*4882a593Smuzhiyun release_page_pool:
2834*4882a593Smuzhiyun 	mempool_destroy(ufshpb_page_pool);
2835*4882a593Smuzhiyun release_mctx_pool:
2836*4882a593Smuzhiyun 	mempool_destroy(ufshpb_mctx_pool);
2837*4882a593Smuzhiyun release_mctx_cache:
2838*4882a593Smuzhiyun 	kmem_cache_destroy(ufshpb_mctx_cache);
2839*4882a593Smuzhiyun 	return ret;
2840*4882a593Smuzhiyun }
2841*4882a593Smuzhiyun 
ufshpb_get_geo_info(struct ufs_hba * hba,u8 * geo_buf)2842*4882a593Smuzhiyun void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2843*4882a593Smuzhiyun {
2844*4882a593Smuzhiyun 	struct ufshpb_dev_info *hpb_info = ufs_hba_to_hpb(hba);
2845*4882a593Smuzhiyun 	int max_active_rgns = 0;
2846*4882a593Smuzhiyun 	int hpb_num_lu;
2847*4882a593Smuzhiyun 
2848*4882a593Smuzhiyun 	hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2849*4882a593Smuzhiyun 	if (hpb_num_lu == 0) {
2850*4882a593Smuzhiyun 		dev_err(hba->dev, "No HPB LU supported\n");
2851*4882a593Smuzhiyun 		hpb_info->hpb_disabled = true;
2852*4882a593Smuzhiyun 		return;
2853*4882a593Smuzhiyun 	}
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun 	hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2856*4882a593Smuzhiyun 	hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2857*4882a593Smuzhiyun 	max_active_rgns = get_unaligned_be16(geo_buf +
2858*4882a593Smuzhiyun 			  GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2859*4882a593Smuzhiyun 
2860*4882a593Smuzhiyun 	if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2861*4882a593Smuzhiyun 	    max_active_rgns == 0) {
2862*4882a593Smuzhiyun 		dev_err(hba->dev, "No HPB supported device\n");
2863*4882a593Smuzhiyun 		hpb_info->hpb_disabled = true;
2864*4882a593Smuzhiyun 		return;
2865*4882a593Smuzhiyun 	}
2866*4882a593Smuzhiyun }
2867*4882a593Smuzhiyun 
ufshpb_get_dev_info(struct ufs_hba * hba,u8 * desc_buf)2868*4882a593Smuzhiyun void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2869*4882a593Smuzhiyun {
2870*4882a593Smuzhiyun 	struct ufshpb_dev_info *hpb_dev_info = ufs_hba_to_hpb(hba);
2871*4882a593Smuzhiyun 	int version, ret;
2872*4882a593Smuzhiyun 	u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
2873*4882a593Smuzhiyun 
2874*4882a593Smuzhiyun 	hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun 	version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
2877*4882a593Smuzhiyun 	if ((version != HPB_SUPPORT_VERSION) &&
2878*4882a593Smuzhiyun 	    (version != HPB_SUPPORT_LEGACY_VERSION)) {
2879*4882a593Smuzhiyun 		dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2880*4882a593Smuzhiyun 			__func__, version);
2881*4882a593Smuzhiyun 		hpb_dev_info->hpb_disabled = true;
2882*4882a593Smuzhiyun 		return;
2883*4882a593Smuzhiyun 	}
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 	if (version == HPB_SUPPORT_LEGACY_VERSION)
2886*4882a593Smuzhiyun 		hpb_dev_info->is_legacy = true;
2887*4882a593Smuzhiyun 
2888*4882a593Smuzhiyun 	pm_runtime_get_sync(hba->dev);
2889*4882a593Smuzhiyun 	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2890*4882a593Smuzhiyun 		QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
2891*4882a593Smuzhiyun 	pm_runtime_put_sync(hba->dev);
2892*4882a593Smuzhiyun 
2893*4882a593Smuzhiyun 	if (ret)
2894*4882a593Smuzhiyun 		dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
2895*4882a593Smuzhiyun 			__func__);
2896*4882a593Smuzhiyun 	hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	/*
2899*4882a593Smuzhiyun 	 * Get the number of user logical unit to check whether all
2900*4882a593Smuzhiyun 	 * scsi_device finish initialization
2901*4882a593Smuzhiyun 	 */
2902*4882a593Smuzhiyun 	hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2903*4882a593Smuzhiyun }
2904*4882a593Smuzhiyun 
ufshpb_init(struct ufs_hba * hba)2905*4882a593Smuzhiyun void ufshpb_init(struct ufs_hba *hba)
2906*4882a593Smuzhiyun {
2907*4882a593Smuzhiyun 	struct ufshpb_dev_info *hpb_dev_info = ufs_hba_to_hpb(hba);
2908*4882a593Smuzhiyun 	int try;
2909*4882a593Smuzhiyun 	int ret;
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 	if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2912*4882a593Smuzhiyun 		return;
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 	if (ufshpb_init_mem_wq(hba)) {
2915*4882a593Smuzhiyun 		hpb_dev_info->hpb_disabled = true;
2916*4882a593Smuzhiyun 		return;
2917*4882a593Smuzhiyun 	}
2918*4882a593Smuzhiyun 
2919*4882a593Smuzhiyun 	atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2920*4882a593Smuzhiyun 	tot_active_srgn_pages = 0;
2921*4882a593Smuzhiyun 	/* issue HPB reset query */
2922*4882a593Smuzhiyun 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2923*4882a593Smuzhiyun 		ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2924*4882a593Smuzhiyun 					QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2925*4882a593Smuzhiyun 		if (!ret)
2926*4882a593Smuzhiyun 			break;
2927*4882a593Smuzhiyun 	}
2928*4882a593Smuzhiyun }
2929*4882a593Smuzhiyun 
ufshpb_remove(struct ufs_hba * hba)2930*4882a593Smuzhiyun void ufshpb_remove(struct ufs_hba *hba)
2931*4882a593Smuzhiyun {
2932*4882a593Smuzhiyun 	mempool_destroy(ufshpb_page_pool);
2933*4882a593Smuzhiyun 	mempool_destroy(ufshpb_mctx_pool);
2934*4882a593Smuzhiyun 	kmem_cache_destroy(ufshpb_mctx_cache);
2935*4882a593Smuzhiyun 
2936*4882a593Smuzhiyun 	destroy_workqueue(ufshpb_wq);
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun 
2939*4882a593Smuzhiyun module_param(ufshpb_host_map_kbytes, uint, 0644);
2940*4882a593Smuzhiyun MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2941*4882a593Smuzhiyun 	"ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
2942