xref: /OK3568_Linux_fs/kernel/drivers/scsi/cxlflash/vlun.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * CXL Flash Device Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
6*4882a593Smuzhiyun  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 2015 IBM Corporation
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/syscalls.h>
14*4882a593Smuzhiyun #include <asm/unaligned.h>
15*4882a593Smuzhiyun #include <asm/bitsperlong.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
18*4882a593Smuzhiyun #include <scsi/scsi_host.h>
19*4882a593Smuzhiyun #include <uapi/scsi/cxlflash_ioctl.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "sislite.h"
22*4882a593Smuzhiyun #include "common.h"
23*4882a593Smuzhiyun #include "vlun.h"
24*4882a593Smuzhiyun #include "superpipe.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun  * marshal_virt_to_resize() - translate uvirtual to resize structure
28*4882a593Smuzhiyun  * @virt:	Source structure from which to translate/copy.
29*4882a593Smuzhiyun  * @resize:	Destination structure for the translate/copy.
30*4882a593Smuzhiyun  */
marshal_virt_to_resize(struct dk_cxlflash_uvirtual * virt,struct dk_cxlflash_resize * resize)31*4882a593Smuzhiyun static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
32*4882a593Smuzhiyun 				   struct dk_cxlflash_resize *resize)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	resize->hdr = virt->hdr;
35*4882a593Smuzhiyun 	resize->context_id = virt->context_id;
36*4882a593Smuzhiyun 	resize->rsrc_handle = virt->rsrc_handle;
37*4882a593Smuzhiyun 	resize->req_size = virt->lun_size;
38*4882a593Smuzhiyun 	resize->last_lba = virt->last_lba;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun  * marshal_clone_to_rele() - translate clone to release structure
43*4882a593Smuzhiyun  * @clone:	Source structure from which to translate/copy.
44*4882a593Smuzhiyun  * @rele:	Destination structure for the translate/copy.
45*4882a593Smuzhiyun  */
marshal_clone_to_rele(struct dk_cxlflash_clone * clone,struct dk_cxlflash_release * release)46*4882a593Smuzhiyun static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
47*4882a593Smuzhiyun 				  struct dk_cxlflash_release *release)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	release->hdr = clone->hdr;
50*4882a593Smuzhiyun 	release->context_id = clone->context_id_dst;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /**
54*4882a593Smuzhiyun  * ba_init() - initializes a block allocator
55*4882a593Smuzhiyun  * @ba_lun:	Block allocator to initialize.
56*4882a593Smuzhiyun  *
57*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
58*4882a593Smuzhiyun  */
ba_init(struct ba_lun * ba_lun)59*4882a593Smuzhiyun static int ba_init(struct ba_lun *ba_lun)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	struct ba_lun_info *bali = NULL;
62*4882a593Smuzhiyun 	int lun_size_au = 0, i = 0;
63*4882a593Smuzhiyun 	int last_word_underflow = 0;
64*4882a593Smuzhiyun 	u64 *lam;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	pr_debug("%s: Initializing LUN: lun_id=%016llx "
67*4882a593Smuzhiyun 		 "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
68*4882a593Smuzhiyun 		__func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/* Calculate bit map size */
71*4882a593Smuzhiyun 	lun_size_au = ba_lun->lsize / ba_lun->au_size;
72*4882a593Smuzhiyun 	if (lun_size_au == 0) {
73*4882a593Smuzhiyun 		pr_debug("%s: Requested LUN size of 0!\n", __func__);
74*4882a593Smuzhiyun 		return -EINVAL;
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* Allocate lun information container */
78*4882a593Smuzhiyun 	bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
79*4882a593Smuzhiyun 	if (unlikely(!bali)) {
80*4882a593Smuzhiyun 		pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
81*4882a593Smuzhiyun 		       __func__, ba_lun->lun_id);
82*4882a593Smuzhiyun 		return -ENOMEM;
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	bali->total_aus = lun_size_au;
86*4882a593Smuzhiyun 	bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (lun_size_au % BITS_PER_LONG)
89*4882a593Smuzhiyun 		bali->lun_bmap_size++;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* Allocate bitmap space */
92*4882a593Smuzhiyun 	bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
93*4882a593Smuzhiyun 				      GFP_KERNEL);
94*4882a593Smuzhiyun 	if (unlikely(!bali->lun_alloc_map)) {
95*4882a593Smuzhiyun 		pr_err("%s: Failed to allocate lun allocation map: "
96*4882a593Smuzhiyun 		       "lun_id=%016llx\n", __func__, ba_lun->lun_id);
97*4882a593Smuzhiyun 		kfree(bali);
98*4882a593Smuzhiyun 		return -ENOMEM;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	/* Initialize the bit map size and set all bits to '1' */
102*4882a593Smuzhiyun 	bali->free_aun_cnt = lun_size_au;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	for (i = 0; i < bali->lun_bmap_size; i++)
105*4882a593Smuzhiyun 		bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* If the last word not fully utilized, mark extra bits as allocated */
108*4882a593Smuzhiyun 	last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
109*4882a593Smuzhiyun 	last_word_underflow -= bali->free_aun_cnt;
110*4882a593Smuzhiyun 	if (last_word_underflow > 0) {
111*4882a593Smuzhiyun 		lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
112*4882a593Smuzhiyun 		for (i = (HIBIT - last_word_underflow + 1);
113*4882a593Smuzhiyun 		     i < BITS_PER_LONG;
114*4882a593Smuzhiyun 		     i++)
115*4882a593Smuzhiyun 			clear_bit(i, (ulong *)lam);
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* Initialize high elevator index, low/curr already at 0 from kzalloc */
119*4882a593Smuzhiyun 	bali->free_high_idx = bali->lun_bmap_size;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* Allocate clone map */
122*4882a593Smuzhiyun 	bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
123*4882a593Smuzhiyun 				      GFP_KERNEL);
124*4882a593Smuzhiyun 	if (unlikely(!bali->aun_clone_map)) {
125*4882a593Smuzhiyun 		pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
126*4882a593Smuzhiyun 		       __func__, ba_lun->lun_id);
127*4882a593Smuzhiyun 		kfree(bali->lun_alloc_map);
128*4882a593Smuzhiyun 		kfree(bali);
129*4882a593Smuzhiyun 		return -ENOMEM;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/* Pass the allocated LUN info as a handle to the user */
133*4882a593Smuzhiyun 	ba_lun->ba_lun_handle = bali;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	pr_debug("%s: Successfully initialized the LUN: "
136*4882a593Smuzhiyun 		 "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
137*4882a593Smuzhiyun 		__func__, ba_lun->lun_id, bali->lun_bmap_size,
138*4882a593Smuzhiyun 		bali->free_aun_cnt);
139*4882a593Smuzhiyun 	return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun  * find_free_range() - locates a free bit within the block allocator
144*4882a593Smuzhiyun  * @low:	First word in block allocator to start search.
145*4882a593Smuzhiyun  * @high:	Last word in block allocator to search.
146*4882a593Smuzhiyun  * @bali:	LUN information structure owning the block allocator to search.
147*4882a593Smuzhiyun  * @bit_word:	Passes back the word in the block allocator owning the free bit.
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * Return: The bit position within the passed back word, -1 on failure
150*4882a593Smuzhiyun  */
find_free_range(u32 low,u32 high,struct ba_lun_info * bali,int * bit_word)151*4882a593Smuzhiyun static int find_free_range(u32 low,
152*4882a593Smuzhiyun 			   u32 high,
153*4882a593Smuzhiyun 			   struct ba_lun_info *bali, int *bit_word)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	int i;
156*4882a593Smuzhiyun 	u64 bit_pos = -1;
157*4882a593Smuzhiyun 	ulong *lam, num_bits;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	for (i = low; i < high; i++)
160*4882a593Smuzhiyun 		if (bali->lun_alloc_map[i] != 0) {
161*4882a593Smuzhiyun 			lam = (ulong *)&bali->lun_alloc_map[i];
162*4882a593Smuzhiyun 			num_bits = (sizeof(*lam) * BITS_PER_BYTE);
163*4882a593Smuzhiyun 			bit_pos = find_first_bit(lam, num_bits);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 			pr_devel("%s: Found free bit %llu in LUN "
166*4882a593Smuzhiyun 				 "map entry %016llx at bitmap index = %d\n",
167*4882a593Smuzhiyun 				 __func__, bit_pos, bali->lun_alloc_map[i], i);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 			*bit_word = i;
170*4882a593Smuzhiyun 			bali->free_aun_cnt--;
171*4882a593Smuzhiyun 			clear_bit(bit_pos, lam);
172*4882a593Smuzhiyun 			break;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	return bit_pos;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun  * ba_alloc() - allocates a block from the block allocator
180*4882a593Smuzhiyun  * @ba_lun:	Block allocator from which to allocate a block.
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  * Return: The allocated block, -1 on failure
183*4882a593Smuzhiyun  */
ba_alloc(struct ba_lun * ba_lun)184*4882a593Smuzhiyun static u64 ba_alloc(struct ba_lun *ba_lun)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	u64 bit_pos = -1;
187*4882a593Smuzhiyun 	int bit_word = 0;
188*4882a593Smuzhiyun 	struct ba_lun_info *bali = NULL;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	bali = ba_lun->ba_lun_handle;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	pr_debug("%s: Received block allocation request: "
193*4882a593Smuzhiyun 		 "lun_id=%016llx free_aun_cnt=%llx\n",
194*4882a593Smuzhiyun 		 __func__, ba_lun->lun_id, bali->free_aun_cnt);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if (bali->free_aun_cnt == 0) {
197*4882a593Smuzhiyun 		pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
198*4882a593Smuzhiyun 			 __func__, ba_lun->lun_id);
199*4882a593Smuzhiyun 		return -1ULL;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* Search to find a free entry, curr->high then low->curr */
203*4882a593Smuzhiyun 	bit_pos = find_free_range(bali->free_curr_idx,
204*4882a593Smuzhiyun 				  bali->free_high_idx, bali, &bit_word);
205*4882a593Smuzhiyun 	if (bit_pos == -1) {
206*4882a593Smuzhiyun 		bit_pos = find_free_range(bali->free_low_idx,
207*4882a593Smuzhiyun 					  bali->free_curr_idx,
208*4882a593Smuzhiyun 					  bali, &bit_word);
209*4882a593Smuzhiyun 		if (bit_pos == -1) {
210*4882a593Smuzhiyun 			pr_debug("%s: Could not find an allocation unit on LUN:"
211*4882a593Smuzhiyun 				 " lun_id=%016llx\n", __func__, ba_lun->lun_id);
212*4882a593Smuzhiyun 			return -1ULL;
213*4882a593Smuzhiyun 		}
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	/* Update the free_curr_idx */
217*4882a593Smuzhiyun 	if (bit_pos == HIBIT)
218*4882a593Smuzhiyun 		bali->free_curr_idx = bit_word + 1;
219*4882a593Smuzhiyun 	else
220*4882a593Smuzhiyun 		bali->free_curr_idx = bit_word;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
223*4882a593Smuzhiyun 		 "free_aun_cnt=%llx\n", __func__,
224*4882a593Smuzhiyun 		 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
225*4882a593Smuzhiyun 		 bali->free_aun_cnt);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /**
231*4882a593Smuzhiyun  * validate_alloc() - validates the specified block has been allocated
232*4882a593Smuzhiyun  * @ba_lun_info:	LUN info owning the block allocator.
233*4882a593Smuzhiyun  * @aun:		Block to validate.
234*4882a593Smuzhiyun  *
235*4882a593Smuzhiyun  * Return: 0 on success, -1 on failure
236*4882a593Smuzhiyun  */
validate_alloc(struct ba_lun_info * bali,u64 aun)237*4882a593Smuzhiyun static int validate_alloc(struct ba_lun_info *bali, u64 aun)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	int idx = 0, bit_pos = 0;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	idx = aun / BITS_PER_LONG;
242*4882a593Smuzhiyun 	bit_pos = aun % BITS_PER_LONG;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
245*4882a593Smuzhiyun 		return -1;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun  * ba_free() - frees a block from the block allocator
252*4882a593Smuzhiyun  * @ba_lun:	Block allocator from which to allocate a block.
253*4882a593Smuzhiyun  * @to_free:	Block to free.
254*4882a593Smuzhiyun  *
255*4882a593Smuzhiyun  * Return: 0 on success, -1 on failure
256*4882a593Smuzhiyun  */
ba_free(struct ba_lun * ba_lun,u64 to_free)257*4882a593Smuzhiyun static int ba_free(struct ba_lun *ba_lun, u64 to_free)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	int idx = 0, bit_pos = 0;
260*4882a593Smuzhiyun 	struct ba_lun_info *bali = NULL;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	bali = ba_lun->ba_lun_handle;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	if (validate_alloc(bali, to_free)) {
265*4882a593Smuzhiyun 		pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
266*4882a593Smuzhiyun 			 __func__, to_free, ba_lun->lun_id);
267*4882a593Smuzhiyun 		return -1;
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
271*4882a593Smuzhiyun 		 "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
272*4882a593Smuzhiyun 		 bali->free_aun_cnt);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	if (bali->aun_clone_map[to_free] > 0) {
275*4882a593Smuzhiyun 		pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
276*4882a593Smuzhiyun 			 __func__, to_free, ba_lun->lun_id,
277*4882a593Smuzhiyun 			 bali->aun_clone_map[to_free]);
278*4882a593Smuzhiyun 		bali->aun_clone_map[to_free]--;
279*4882a593Smuzhiyun 		return 0;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	idx = to_free / BITS_PER_LONG;
283*4882a593Smuzhiyun 	bit_pos = to_free % BITS_PER_LONG;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
286*4882a593Smuzhiyun 	bali->free_aun_cnt++;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (idx < bali->free_low_idx)
289*4882a593Smuzhiyun 		bali->free_low_idx = idx;
290*4882a593Smuzhiyun 	else if (idx > bali->free_high_idx)
291*4882a593Smuzhiyun 		bali->free_high_idx = idx;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
294*4882a593Smuzhiyun 		 "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
295*4882a593Smuzhiyun 		 ba_lun->lun_id, bali->free_aun_cnt);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	return 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun /**
301*4882a593Smuzhiyun  * ba_clone() - Clone a chunk of the block allocation table
302*4882a593Smuzhiyun  * @ba_lun:	Block allocator from which to allocate a block.
303*4882a593Smuzhiyun  * @to_free:	Block to free.
304*4882a593Smuzhiyun  *
305*4882a593Smuzhiyun  * Return: 0 on success, -1 on failure
306*4882a593Smuzhiyun  */
ba_clone(struct ba_lun * ba_lun,u64 to_clone)307*4882a593Smuzhiyun static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	struct ba_lun_info *bali = ba_lun->ba_lun_handle;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (validate_alloc(bali, to_clone)) {
312*4882a593Smuzhiyun 		pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
313*4882a593Smuzhiyun 			 __func__, to_clone, ba_lun->lun_id);
314*4882a593Smuzhiyun 		return -1;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
318*4882a593Smuzhiyun 		 __func__, to_clone, ba_lun->lun_id);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
321*4882a593Smuzhiyun 		pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
322*4882a593Smuzhiyun 			 __func__, to_clone, ba_lun->lun_id);
323*4882a593Smuzhiyun 		return -1;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	bali->aun_clone_map[to_clone]++;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return 0;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun /**
332*4882a593Smuzhiyun  * ba_space() - returns the amount of free space left in the block allocator
333*4882a593Smuzhiyun  * @ba_lun:	Block allocator.
334*4882a593Smuzhiyun  *
335*4882a593Smuzhiyun  * Return: Amount of free space in block allocator
336*4882a593Smuzhiyun  */
ba_space(struct ba_lun * ba_lun)337*4882a593Smuzhiyun static u64 ba_space(struct ba_lun *ba_lun)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct ba_lun_info *bali = ba_lun->ba_lun_handle;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	return bali->free_aun_cnt;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /**
345*4882a593Smuzhiyun  * cxlflash_ba_terminate() - frees resources associated with the block allocator
346*4882a593Smuzhiyun  * @ba_lun:	Block allocator.
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * Safe to call in a partially allocated state.
349*4882a593Smuzhiyun  */
cxlflash_ba_terminate(struct ba_lun * ba_lun)350*4882a593Smuzhiyun void cxlflash_ba_terminate(struct ba_lun *ba_lun)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	struct ba_lun_info *bali = ba_lun->ba_lun_handle;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (bali) {
355*4882a593Smuzhiyun 		kfree(bali->aun_clone_map);
356*4882a593Smuzhiyun 		kfree(bali->lun_alloc_map);
357*4882a593Smuzhiyun 		kfree(bali);
358*4882a593Smuzhiyun 		ba_lun->ba_lun_handle = NULL;
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun /**
363*4882a593Smuzhiyun  * init_vlun() - initializes a LUN for virtual use
364*4882a593Smuzhiyun  * @lun_info:	LUN information structure that owns the block allocator.
365*4882a593Smuzhiyun  *
366*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
367*4882a593Smuzhiyun  */
init_vlun(struct llun_info * lli)368*4882a593Smuzhiyun static int init_vlun(struct llun_info *lli)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	int rc = 0;
371*4882a593Smuzhiyun 	struct glun_info *gli = lli->parent;
372*4882a593Smuzhiyun 	struct blka *blka = &gli->blka;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	memset(blka, 0, sizeof(*blka));
375*4882a593Smuzhiyun 	mutex_init(&blka->mutex);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/* LUN IDs are unique per port, save the index instead */
378*4882a593Smuzhiyun 	blka->ba_lun.lun_id = lli->lun_index;
379*4882a593Smuzhiyun 	blka->ba_lun.lsize = gli->max_lba + 1;
380*4882a593Smuzhiyun 	blka->ba_lun.lba_size = gli->blk_len;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	blka->ba_lun.au_size = MC_CHUNK_SIZE;
383*4882a593Smuzhiyun 	blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	rc = ba_init(&blka->ba_lun);
386*4882a593Smuzhiyun 	if (unlikely(rc))
387*4882a593Smuzhiyun 		pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
390*4882a593Smuzhiyun 	return rc;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun  * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
395*4882a593Smuzhiyun  * @sdev:	SCSI device associated with LUN.
396*4882a593Smuzhiyun  * @lba:	Logical block address to start write same.
397*4882a593Smuzhiyun  * @nblks:	Number of logical blocks to write same.
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
400*4882a593Smuzhiyun  * while in scsi_execute(), the EEH handler will attempt to recover. As part of
401*4882a593Smuzhiyun  * the recovery, the handler drains all currently running ioctls, waiting until
402*4882a593Smuzhiyun  * they have completed before proceeding with a reset. As this routine is used
403*4882a593Smuzhiyun  * on the ioctl path, this can create a condition where the EEH handler becomes
404*4882a593Smuzhiyun  * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
405*4882a593Smuzhiyun  * temporarily unmark this thread as an ioctl thread by releasing the ioctl read
406*4882a593Smuzhiyun  * semaphore. This will allow the EEH handler to proceed with a recovery while
407*4882a593Smuzhiyun  * this thread is still running. Once the scsi_execute() returns, reacquire the
408*4882a593Smuzhiyun  * ioctl read semaphore and check the adapter state in case it changed while
409*4882a593Smuzhiyun  * inside of scsi_execute(). The state check will wait if the adapter is still
410*4882a593Smuzhiyun  * being recovered or return a failure if the recovery failed. In the event that
411*4882a593Smuzhiyun  * the adapter reset failed, simply return the failure as the ioctl would be
412*4882a593Smuzhiyun  * unable to continue.
413*4882a593Smuzhiyun  *
414*4882a593Smuzhiyun  * Note that the above puts a requirement on this routine to only be called on
415*4882a593Smuzhiyun  * an ioctl thread.
416*4882a593Smuzhiyun  *
417*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
418*4882a593Smuzhiyun  */
write_same16(struct scsi_device * sdev,u64 lba,u32 nblks)419*4882a593Smuzhiyun static int write_same16(struct scsi_device *sdev,
420*4882a593Smuzhiyun 			u64 lba,
421*4882a593Smuzhiyun 			u32 nblks)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	u8 *cmd_buf = NULL;
424*4882a593Smuzhiyun 	u8 *scsi_cmd = NULL;
425*4882a593Smuzhiyun 	int rc = 0;
426*4882a593Smuzhiyun 	int result = 0;
427*4882a593Smuzhiyun 	u64 offset = lba;
428*4882a593Smuzhiyun 	int left = nblks;
429*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
430*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
431*4882a593Smuzhiyun 	const u32 s = ilog2(sdev->sector_size) - 9;
432*4882a593Smuzhiyun 	const u32 to = sdev->request_queue->rq_timeout;
433*4882a593Smuzhiyun 	const u32 ws_limit = blk_queue_get_max_sectors(sdev->request_queue,
434*4882a593Smuzhiyun 						       REQ_OP_WRITE_SAME) >> s;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
437*4882a593Smuzhiyun 	scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
438*4882a593Smuzhiyun 	if (unlikely(!cmd_buf || !scsi_cmd)) {
439*4882a593Smuzhiyun 		rc = -ENOMEM;
440*4882a593Smuzhiyun 		goto out;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	while (left > 0) {
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		scsi_cmd[0] = WRITE_SAME_16;
446*4882a593Smuzhiyun 		scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
447*4882a593Smuzhiyun 		put_unaligned_be64(offset, &scsi_cmd[2]);
448*4882a593Smuzhiyun 		put_unaligned_be32(ws_limit < left ? ws_limit : left,
449*4882a593Smuzhiyun 				   &scsi_cmd[10]);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 		/* Drop the ioctl read semahpore across lengthy call */
452*4882a593Smuzhiyun 		up_read(&cfg->ioctl_rwsem);
453*4882a593Smuzhiyun 		result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
454*4882a593Smuzhiyun 				      CMD_BUFSIZE, NULL, NULL, to,
455*4882a593Smuzhiyun 				      CMD_RETRIES, 0, 0, NULL);
456*4882a593Smuzhiyun 		down_read(&cfg->ioctl_rwsem);
457*4882a593Smuzhiyun 		rc = check_state(cfg);
458*4882a593Smuzhiyun 		if (rc) {
459*4882a593Smuzhiyun 			dev_err(dev, "%s: Failed state result=%08x\n",
460*4882a593Smuzhiyun 				__func__, result);
461*4882a593Smuzhiyun 			rc = -ENODEV;
462*4882a593Smuzhiyun 			goto out;
463*4882a593Smuzhiyun 		}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 		if (result) {
466*4882a593Smuzhiyun 			dev_err_ratelimited(dev, "%s: command failed for "
467*4882a593Smuzhiyun 					    "offset=%lld result=%08x\n",
468*4882a593Smuzhiyun 					    __func__, offset, result);
469*4882a593Smuzhiyun 			rc = -EIO;
470*4882a593Smuzhiyun 			goto out;
471*4882a593Smuzhiyun 		}
472*4882a593Smuzhiyun 		left -= ws_limit;
473*4882a593Smuzhiyun 		offset += ws_limit;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun out:
477*4882a593Smuzhiyun 	kfree(cmd_buf);
478*4882a593Smuzhiyun 	kfree(scsi_cmd);
479*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
480*4882a593Smuzhiyun 	return rc;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun /**
484*4882a593Smuzhiyun  * grow_lxt() - expands the translation table associated with the specified RHTE
485*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
486*4882a593Smuzhiyun  * @sdev:	SCSI device associated with LUN.
487*4882a593Smuzhiyun  * @ctxid:	Context ID of context owning the RHTE.
488*4882a593Smuzhiyun  * @rhndl:	Resource handle associated with the RHTE.
489*4882a593Smuzhiyun  * @rhte:	Resource handle entry (RHTE).
490*4882a593Smuzhiyun  * @new_size:	Number of translation entries associated with RHTE.
491*4882a593Smuzhiyun  *
492*4882a593Smuzhiyun  * By design, this routine employs a 'best attempt' allocation and will
493*4882a593Smuzhiyun  * truncate the requested size down if there is not sufficient space in
494*4882a593Smuzhiyun  * the block allocator to satisfy the request but there does exist some
495*4882a593Smuzhiyun  * amount of space. The user is made aware of this by returning the size
496*4882a593Smuzhiyun  * allocated.
497*4882a593Smuzhiyun  *
498*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
499*4882a593Smuzhiyun  */
grow_lxt(struct afu * afu,struct scsi_device * sdev,ctx_hndl_t ctxid,res_hndl_t rhndl,struct sisl_rht_entry * rhte,u64 * new_size)500*4882a593Smuzhiyun static int grow_lxt(struct afu *afu,
501*4882a593Smuzhiyun 		    struct scsi_device *sdev,
502*4882a593Smuzhiyun 		    ctx_hndl_t ctxid,
503*4882a593Smuzhiyun 		    res_hndl_t rhndl,
504*4882a593Smuzhiyun 		    struct sisl_rht_entry *rhte,
505*4882a593Smuzhiyun 		    u64 *new_size)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
508*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
509*4882a593Smuzhiyun 	struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
510*4882a593Smuzhiyun 	struct llun_info *lli = sdev->hostdata;
511*4882a593Smuzhiyun 	struct glun_info *gli = lli->parent;
512*4882a593Smuzhiyun 	struct blka *blka = &gli->blka;
513*4882a593Smuzhiyun 	u32 av_size;
514*4882a593Smuzhiyun 	u32 ngrps, ngrps_old;
515*4882a593Smuzhiyun 	u64 aun;		/* chunk# allocated by block allocator */
516*4882a593Smuzhiyun 	u64 delta = *new_size - rhte->lxt_cnt;
517*4882a593Smuzhiyun 	u64 my_new_size;
518*4882a593Smuzhiyun 	int i, rc = 0;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/*
521*4882a593Smuzhiyun 	 * Check what is available in the block allocator before re-allocating
522*4882a593Smuzhiyun 	 * LXT array. This is done up front under the mutex which must not be
523*4882a593Smuzhiyun 	 * released until after allocation is complete.
524*4882a593Smuzhiyun 	 */
525*4882a593Smuzhiyun 	mutex_lock(&blka->mutex);
526*4882a593Smuzhiyun 	av_size = ba_space(&blka->ba_lun);
527*4882a593Smuzhiyun 	if (unlikely(av_size <= 0)) {
528*4882a593Smuzhiyun 		dev_dbg(dev, "%s: ba_space error av_size=%d\n",
529*4882a593Smuzhiyun 			__func__, av_size);
530*4882a593Smuzhiyun 		mutex_unlock(&blka->mutex);
531*4882a593Smuzhiyun 		rc = -ENOSPC;
532*4882a593Smuzhiyun 		goto out;
533*4882a593Smuzhiyun 	}
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (av_size < delta)
536*4882a593Smuzhiyun 		delta = av_size;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	lxt_old = rhte->lxt_start;
539*4882a593Smuzhiyun 	ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
540*4882a593Smuzhiyun 	ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (ngrps != ngrps_old) {
543*4882a593Smuzhiyun 		/* reallocate to fit new size */
544*4882a593Smuzhiyun 		lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
545*4882a593Smuzhiyun 			      GFP_KERNEL);
546*4882a593Smuzhiyun 		if (unlikely(!lxt)) {
547*4882a593Smuzhiyun 			mutex_unlock(&blka->mutex);
548*4882a593Smuzhiyun 			rc = -ENOMEM;
549*4882a593Smuzhiyun 			goto out;
550*4882a593Smuzhiyun 		}
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		/* copy over all old entries */
553*4882a593Smuzhiyun 		memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
554*4882a593Smuzhiyun 	} else
555*4882a593Smuzhiyun 		lxt = lxt_old;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	/* nothing can fail from now on */
558*4882a593Smuzhiyun 	my_new_size = rhte->lxt_cnt + delta;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	/* add new entries to the end */
561*4882a593Smuzhiyun 	for (i = rhte->lxt_cnt; i < my_new_size; i++) {
562*4882a593Smuzhiyun 		/*
563*4882a593Smuzhiyun 		 * Due to the earlier check of available space, ba_alloc
564*4882a593Smuzhiyun 		 * cannot fail here. If it did due to internal error,
565*4882a593Smuzhiyun 		 * leave a rlba_base of -1u which will likely be a
566*4882a593Smuzhiyun 		 * invalid LUN (too large).
567*4882a593Smuzhiyun 		 */
568*4882a593Smuzhiyun 		aun = ba_alloc(&blka->ba_lun);
569*4882a593Smuzhiyun 		if ((aun == -1ULL) || (aun >= blka->nchunk))
570*4882a593Smuzhiyun 			dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
571*4882a593Smuzhiyun 				"max=%llu\n", __func__, aun, blka->nchunk - 1);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		/* select both ports, use r/w perms from RHT */
574*4882a593Smuzhiyun 		lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
575*4882a593Smuzhiyun 				    (lli->lun_index << LXT_LUNIDX_SHIFT) |
576*4882a593Smuzhiyun 				    (RHT_PERM_RW << LXT_PERM_SHIFT |
577*4882a593Smuzhiyun 				     lli->port_sel));
578*4882a593Smuzhiyun 	}
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	mutex_unlock(&blka->mutex);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/*
583*4882a593Smuzhiyun 	 * The following sequence is prescribed in the SISlite spec
584*4882a593Smuzhiyun 	 * for syncing up with the AFU when adding LXT entries.
585*4882a593Smuzhiyun 	 */
586*4882a593Smuzhiyun 	dma_wmb(); /* Make LXT updates are visible */
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	rhte->lxt_start = lxt;
589*4882a593Smuzhiyun 	dma_wmb(); /* Make RHT entry's LXT table update visible */
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	rhte->lxt_cnt = my_new_size;
592*4882a593Smuzhiyun 	dma_wmb(); /* Make RHT entry's LXT table size update visible */
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
595*4882a593Smuzhiyun 	if (unlikely(rc))
596*4882a593Smuzhiyun 		rc = -EAGAIN;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	/* free old lxt if reallocated */
599*4882a593Smuzhiyun 	if (lxt != lxt_old)
600*4882a593Smuzhiyun 		kfree(lxt_old);
601*4882a593Smuzhiyun 	*new_size = my_new_size;
602*4882a593Smuzhiyun out:
603*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
604*4882a593Smuzhiyun 	return rc;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun /**
608*4882a593Smuzhiyun  * shrink_lxt() - reduces translation table associated with the specified RHTE
609*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
610*4882a593Smuzhiyun  * @sdev:	SCSI device associated with LUN.
611*4882a593Smuzhiyun  * @rhndl:	Resource handle associated with the RHTE.
612*4882a593Smuzhiyun  * @rhte:	Resource handle entry (RHTE).
613*4882a593Smuzhiyun  * @ctxi:	Context owning resources.
614*4882a593Smuzhiyun  * @new_size:	Number of translation entries associated with RHTE.
615*4882a593Smuzhiyun  *
616*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
617*4882a593Smuzhiyun  */
shrink_lxt(struct afu * afu,struct scsi_device * sdev,res_hndl_t rhndl,struct sisl_rht_entry * rhte,struct ctx_info * ctxi,u64 * new_size)618*4882a593Smuzhiyun static int shrink_lxt(struct afu *afu,
619*4882a593Smuzhiyun 		      struct scsi_device *sdev,
620*4882a593Smuzhiyun 		      res_hndl_t rhndl,
621*4882a593Smuzhiyun 		      struct sisl_rht_entry *rhte,
622*4882a593Smuzhiyun 		      struct ctx_info *ctxi,
623*4882a593Smuzhiyun 		      u64 *new_size)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
626*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
627*4882a593Smuzhiyun 	struct sisl_lxt_entry *lxt, *lxt_old;
628*4882a593Smuzhiyun 	struct llun_info *lli = sdev->hostdata;
629*4882a593Smuzhiyun 	struct glun_info *gli = lli->parent;
630*4882a593Smuzhiyun 	struct blka *blka = &gli->blka;
631*4882a593Smuzhiyun 	ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
632*4882a593Smuzhiyun 	bool needs_ws = ctxi->rht_needs_ws[rhndl];
633*4882a593Smuzhiyun 	bool needs_sync = !ctxi->err_recovery_active;
634*4882a593Smuzhiyun 	u32 ngrps, ngrps_old;
635*4882a593Smuzhiyun 	u64 aun;		/* chunk# allocated by block allocator */
636*4882a593Smuzhiyun 	u64 delta = rhte->lxt_cnt - *new_size;
637*4882a593Smuzhiyun 	u64 my_new_size;
638*4882a593Smuzhiyun 	int i, rc = 0;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	lxt_old = rhte->lxt_start;
641*4882a593Smuzhiyun 	ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
642*4882a593Smuzhiyun 	ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	if (ngrps != ngrps_old) {
645*4882a593Smuzhiyun 		/* Reallocate to fit new size unless new size is 0 */
646*4882a593Smuzhiyun 		if (ngrps) {
647*4882a593Smuzhiyun 			lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
648*4882a593Smuzhiyun 				      GFP_KERNEL);
649*4882a593Smuzhiyun 			if (unlikely(!lxt)) {
650*4882a593Smuzhiyun 				rc = -ENOMEM;
651*4882a593Smuzhiyun 				goto out;
652*4882a593Smuzhiyun 			}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 			/* Copy over old entries that will remain */
655*4882a593Smuzhiyun 			memcpy(lxt, lxt_old,
656*4882a593Smuzhiyun 			       (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
657*4882a593Smuzhiyun 		} else
658*4882a593Smuzhiyun 			lxt = NULL;
659*4882a593Smuzhiyun 	} else
660*4882a593Smuzhiyun 		lxt = lxt_old;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/* Nothing can fail from now on */
663*4882a593Smuzhiyun 	my_new_size = rhte->lxt_cnt - delta;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	/*
666*4882a593Smuzhiyun 	 * The following sequence is prescribed in the SISlite spec
667*4882a593Smuzhiyun 	 * for syncing up with the AFU when removing LXT entries.
668*4882a593Smuzhiyun 	 */
669*4882a593Smuzhiyun 	rhte->lxt_cnt = my_new_size;
670*4882a593Smuzhiyun 	dma_wmb(); /* Make RHT entry's LXT table size update visible */
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	rhte->lxt_start = lxt;
673*4882a593Smuzhiyun 	dma_wmb(); /* Make RHT entry's LXT table update visible */
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (needs_sync) {
676*4882a593Smuzhiyun 		rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
677*4882a593Smuzhiyun 		if (unlikely(rc))
678*4882a593Smuzhiyun 			rc = -EAGAIN;
679*4882a593Smuzhiyun 	}
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (needs_ws) {
682*4882a593Smuzhiyun 		/*
683*4882a593Smuzhiyun 		 * Mark the context as unavailable, so that we can release
684*4882a593Smuzhiyun 		 * the mutex safely.
685*4882a593Smuzhiyun 		 */
686*4882a593Smuzhiyun 		ctxi->unavail = true;
687*4882a593Smuzhiyun 		mutex_unlock(&ctxi->mutex);
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	/* Free LBAs allocated to freed chunks */
691*4882a593Smuzhiyun 	mutex_lock(&blka->mutex);
692*4882a593Smuzhiyun 	for (i = delta - 1; i >= 0; i--) {
693*4882a593Smuzhiyun 		aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
694*4882a593Smuzhiyun 		if (needs_ws)
695*4882a593Smuzhiyun 			write_same16(sdev, aun, MC_CHUNK_SIZE);
696*4882a593Smuzhiyun 		ba_free(&blka->ba_lun, aun);
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 	mutex_unlock(&blka->mutex);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	if (needs_ws) {
701*4882a593Smuzhiyun 		/* Make the context visible again */
702*4882a593Smuzhiyun 		mutex_lock(&ctxi->mutex);
703*4882a593Smuzhiyun 		ctxi->unavail = false;
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/* Free old lxt if reallocated */
707*4882a593Smuzhiyun 	if (lxt != lxt_old)
708*4882a593Smuzhiyun 		kfree(lxt_old);
709*4882a593Smuzhiyun 	*new_size = my_new_size;
710*4882a593Smuzhiyun out:
711*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
712*4882a593Smuzhiyun 	return rc;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun /**
716*4882a593Smuzhiyun  * _cxlflash_vlun_resize() - changes the size of a virtual LUN
717*4882a593Smuzhiyun  * @sdev:	SCSI device associated with LUN owning virtual LUN.
718*4882a593Smuzhiyun  * @ctxi:	Context owning resources.
719*4882a593Smuzhiyun  * @resize:	Resize ioctl data structure.
720*4882a593Smuzhiyun  *
721*4882a593Smuzhiyun  * On successful return, the user is informed of the new size (in blocks)
722*4882a593Smuzhiyun  * of the virtual LUN in last LBA format. When the size of the virtual
723*4882a593Smuzhiyun  * LUN is zero, the last LBA is reflected as -1. See comment in the
724*4882a593Smuzhiyun  * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
725*4882a593Smuzhiyun  * on the error recovery list.
726*4882a593Smuzhiyun  *
727*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
728*4882a593Smuzhiyun  */
_cxlflash_vlun_resize(struct scsi_device * sdev,struct ctx_info * ctxi,struct dk_cxlflash_resize * resize)729*4882a593Smuzhiyun int _cxlflash_vlun_resize(struct scsi_device *sdev,
730*4882a593Smuzhiyun 			  struct ctx_info *ctxi,
731*4882a593Smuzhiyun 			  struct dk_cxlflash_resize *resize)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
734*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
735*4882a593Smuzhiyun 	struct llun_info *lli = sdev->hostdata;
736*4882a593Smuzhiyun 	struct glun_info *gli = lli->parent;
737*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
738*4882a593Smuzhiyun 	bool put_ctx = false;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	res_hndl_t rhndl = resize->rsrc_handle;
741*4882a593Smuzhiyun 	u64 new_size;
742*4882a593Smuzhiyun 	u64 nsectors;
743*4882a593Smuzhiyun 	u64 ctxid = DECODE_CTXID(resize->context_id),
744*4882a593Smuzhiyun 	    rctxid = resize->context_id;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	struct sisl_rht_entry *rhte;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	int rc = 0;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	/*
751*4882a593Smuzhiyun 	 * The requested size (req_size) is always assumed to be in 4k blocks,
752*4882a593Smuzhiyun 	 * so we have to convert it here from 4k to chunk size.
753*4882a593Smuzhiyun 	 */
754*4882a593Smuzhiyun 	nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
755*4882a593Smuzhiyun 	new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
758*4882a593Smuzhiyun 		__func__, ctxid, resize->rsrc_handle, resize->req_size,
759*4882a593Smuzhiyun 		new_size);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	if (unlikely(gli->mode != MODE_VIRTUAL)) {
762*4882a593Smuzhiyun 		dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
763*4882a593Smuzhiyun 			__func__, gli->mode);
764*4882a593Smuzhiyun 		rc = -EINVAL;
765*4882a593Smuzhiyun 		goto out;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	if (!ctxi) {
770*4882a593Smuzhiyun 		ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
771*4882a593Smuzhiyun 		if (unlikely(!ctxi)) {
772*4882a593Smuzhiyun 			dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
773*4882a593Smuzhiyun 				__func__, ctxid);
774*4882a593Smuzhiyun 			rc = -EINVAL;
775*4882a593Smuzhiyun 			goto out;
776*4882a593Smuzhiyun 		}
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 		put_ctx = true;
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	rhte = get_rhte(ctxi, rhndl, lli);
782*4882a593Smuzhiyun 	if (unlikely(!rhte)) {
783*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
784*4882a593Smuzhiyun 			__func__, rhndl);
785*4882a593Smuzhiyun 		rc = -EINVAL;
786*4882a593Smuzhiyun 		goto out;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	if (new_size > rhte->lxt_cnt)
790*4882a593Smuzhiyun 		rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
791*4882a593Smuzhiyun 	else if (new_size < rhte->lxt_cnt)
792*4882a593Smuzhiyun 		rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
793*4882a593Smuzhiyun 	else {
794*4882a593Smuzhiyun 		/*
795*4882a593Smuzhiyun 		 * Rare case where there is already sufficient space, just
796*4882a593Smuzhiyun 		 * need to perform a translation sync with the AFU. This
797*4882a593Smuzhiyun 		 * scenario likely follows a previous sync failure during
798*4882a593Smuzhiyun 		 * a resize operation. Accordingly, perform the heavyweight
799*4882a593Smuzhiyun 		 * form of translation sync as it is unknown which type of
800*4882a593Smuzhiyun 		 * resize failed previously.
801*4882a593Smuzhiyun 		 */
802*4882a593Smuzhiyun 		rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
803*4882a593Smuzhiyun 		if (unlikely(rc)) {
804*4882a593Smuzhiyun 			rc = -EAGAIN;
805*4882a593Smuzhiyun 			goto out;
806*4882a593Smuzhiyun 		}
807*4882a593Smuzhiyun 	}
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	resize->hdr.return_flags = 0;
810*4882a593Smuzhiyun 	resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
811*4882a593Smuzhiyun 	resize->last_lba /= CXLFLASH_BLOCK_SIZE;
812*4882a593Smuzhiyun 	resize->last_lba--;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun out:
815*4882a593Smuzhiyun 	if (put_ctx)
816*4882a593Smuzhiyun 		put_context(ctxi);
817*4882a593Smuzhiyun 	dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
818*4882a593Smuzhiyun 		__func__, resize->last_lba, rc);
819*4882a593Smuzhiyun 	return rc;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
cxlflash_vlun_resize(struct scsi_device * sdev,struct dk_cxlflash_resize * resize)822*4882a593Smuzhiyun int cxlflash_vlun_resize(struct scsi_device *sdev,
823*4882a593Smuzhiyun 			 struct dk_cxlflash_resize *resize)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun 	return _cxlflash_vlun_resize(sdev, NULL, resize);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun /**
829*4882a593Smuzhiyun  * cxlflash_restore_luntable() - Restore LUN table to prior state
830*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
831*4882a593Smuzhiyun  */
cxlflash_restore_luntable(struct cxlflash_cfg * cfg)832*4882a593Smuzhiyun void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun 	struct llun_info *lli, *temp;
835*4882a593Smuzhiyun 	u32 lind;
836*4882a593Smuzhiyun 	int k;
837*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
838*4882a593Smuzhiyun 	__be64 __iomem *fc_port_luns;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	mutex_lock(&global.mutex);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
843*4882a593Smuzhiyun 		if (!lli->in_table)
844*4882a593Smuzhiyun 			continue;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		lind = lli->lun_index;
847*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 		for (k = 0; k < cfg->num_fc_ports; k++)
850*4882a593Smuzhiyun 			if (lli->port_sel & (1 << k)) {
851*4882a593Smuzhiyun 				fc_port_luns = get_fc_port_luns(cfg, k);
852*4882a593Smuzhiyun 				writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
853*4882a593Smuzhiyun 				dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
854*4882a593Smuzhiyun 			}
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	mutex_unlock(&global.mutex);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun /**
861*4882a593Smuzhiyun  * get_num_ports() - compute number of ports from port selection mask
862*4882a593Smuzhiyun  * @psm:	Port selection mask.
863*4882a593Smuzhiyun  *
864*4882a593Smuzhiyun  * Return: Population count of port selection mask
865*4882a593Smuzhiyun  */
get_num_ports(u32 psm)866*4882a593Smuzhiyun static inline u8 get_num_ports(u32 psm)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun 	static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
869*4882a593Smuzhiyun 				     1, 2, 2, 3, 2, 3, 3, 4 };
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	return bits[psm & 0xf];
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun /**
875*4882a593Smuzhiyun  * init_luntable() - write an entry in the LUN table
876*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
877*4882a593Smuzhiyun  * @lli:	Per adapter LUN information structure.
878*4882a593Smuzhiyun  *
879*4882a593Smuzhiyun  * On successful return, a LUN table entry is created:
880*4882a593Smuzhiyun  *	- at the top for LUNs visible on multiple ports.
881*4882a593Smuzhiyun  *	- at the bottom for LUNs visible only on one port.
882*4882a593Smuzhiyun  *
883*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
884*4882a593Smuzhiyun  */
init_luntable(struct cxlflash_cfg * cfg,struct llun_info * lli)885*4882a593Smuzhiyun static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun 	u32 chan;
888*4882a593Smuzhiyun 	u32 lind;
889*4882a593Smuzhiyun 	u32 nports;
890*4882a593Smuzhiyun 	int rc = 0;
891*4882a593Smuzhiyun 	int k;
892*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
893*4882a593Smuzhiyun 	__be64 __iomem *fc_port_luns;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	mutex_lock(&global.mutex);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	if (lli->in_table)
898*4882a593Smuzhiyun 		goto out;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	nports = get_num_ports(lli->port_sel);
901*4882a593Smuzhiyun 	if (nports == 0 || nports > cfg->num_fc_ports) {
902*4882a593Smuzhiyun 		WARN(1, "Unsupported port configuration nports=%u", nports);
903*4882a593Smuzhiyun 		rc = -EIO;
904*4882a593Smuzhiyun 		goto out;
905*4882a593Smuzhiyun 	}
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	if (nports > 1) {
908*4882a593Smuzhiyun 		/*
909*4882a593Smuzhiyun 		 * When LUN is visible from multiple ports, we will put
910*4882a593Smuzhiyun 		 * it in the top half of the LUN table.
911*4882a593Smuzhiyun 		 */
912*4882a593Smuzhiyun 		for (k = 0; k < cfg->num_fc_ports; k++) {
913*4882a593Smuzhiyun 			if (!(lli->port_sel & (1 << k)))
914*4882a593Smuzhiyun 				continue;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 			if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
917*4882a593Smuzhiyun 				rc = -ENOSPC;
918*4882a593Smuzhiyun 				goto out;
919*4882a593Smuzhiyun 			}
920*4882a593Smuzhiyun 		}
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 		lind = lli->lun_index = cfg->promote_lun_index;
923*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 		for (k = 0; k < cfg->num_fc_ports; k++) {
926*4882a593Smuzhiyun 			if (!(lli->port_sel & (1 << k)))
927*4882a593Smuzhiyun 				continue;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 			fc_port_luns = get_fc_port_luns(cfg, k);
930*4882a593Smuzhiyun 			writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
931*4882a593Smuzhiyun 			dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
932*4882a593Smuzhiyun 		}
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 		cfg->promote_lun_index++;
935*4882a593Smuzhiyun 	} else {
936*4882a593Smuzhiyun 		/*
937*4882a593Smuzhiyun 		 * When LUN is visible only from one port, we will put
938*4882a593Smuzhiyun 		 * it in the bottom half of the LUN table.
939*4882a593Smuzhiyun 		 */
940*4882a593Smuzhiyun 		chan = PORTMASK2CHAN(lli->port_sel);
941*4882a593Smuzhiyun 		if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
942*4882a593Smuzhiyun 			rc = -ENOSPC;
943*4882a593Smuzhiyun 			goto out;
944*4882a593Smuzhiyun 		}
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 		lind = lli->lun_index = cfg->last_lun_index[chan];
947*4882a593Smuzhiyun 		fc_port_luns = get_fc_port_luns(cfg, chan);
948*4882a593Smuzhiyun 		writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
949*4882a593Smuzhiyun 		cfg->last_lun_index[chan]--;
950*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
951*4882a593Smuzhiyun 			__func__, lind, chan, lli->lun_id[chan]);
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	lli->in_table = true;
955*4882a593Smuzhiyun out:
956*4882a593Smuzhiyun 	mutex_unlock(&global.mutex);
957*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
958*4882a593Smuzhiyun 	return rc;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun /**
962*4882a593Smuzhiyun  * cxlflash_disk_virtual_open() - open a virtual disk of specified size
963*4882a593Smuzhiyun  * @sdev:	SCSI device associated with LUN owning virtual LUN.
964*4882a593Smuzhiyun  * @arg:	UVirtual ioctl data structure.
965*4882a593Smuzhiyun  *
966*4882a593Smuzhiyun  * On successful return, the user is informed of the resource handle
967*4882a593Smuzhiyun  * to be used to identify the virtual LUN and the size (in blocks) of
968*4882a593Smuzhiyun  * the virtual LUN in last LBA format. When the size of the virtual LUN
969*4882a593Smuzhiyun  * is zero, the last LBA is reflected as -1.
970*4882a593Smuzhiyun  *
971*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
972*4882a593Smuzhiyun  */
cxlflash_disk_virtual_open(struct scsi_device * sdev,void * arg)973*4882a593Smuzhiyun int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
976*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
977*4882a593Smuzhiyun 	struct llun_info *lli = sdev->hostdata;
978*4882a593Smuzhiyun 	struct glun_info *gli = lli->parent;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
981*4882a593Smuzhiyun 	struct dk_cxlflash_resize resize;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	u64 ctxid = DECODE_CTXID(virt->context_id),
984*4882a593Smuzhiyun 	    rctxid = virt->context_id;
985*4882a593Smuzhiyun 	u64 lun_size = virt->lun_size;
986*4882a593Smuzhiyun 	u64 last_lba = 0;
987*4882a593Smuzhiyun 	u64 rsrc_handle = -1;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	int rc = 0;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	struct ctx_info *ctxi = NULL;
992*4882a593Smuzhiyun 	struct sisl_rht_entry *rhte = NULL;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/* Setup the LUNs block allocator on first call */
997*4882a593Smuzhiyun 	mutex_lock(&gli->mutex);
998*4882a593Smuzhiyun 	if (gli->mode == MODE_NONE) {
999*4882a593Smuzhiyun 		rc = init_vlun(lli);
1000*4882a593Smuzhiyun 		if (rc) {
1001*4882a593Smuzhiyun 			dev_err(dev, "%s: init_vlun failed rc=%d\n",
1002*4882a593Smuzhiyun 				__func__, rc);
1003*4882a593Smuzhiyun 			rc = -ENOMEM;
1004*4882a593Smuzhiyun 			goto err0;
1005*4882a593Smuzhiyun 		}
1006*4882a593Smuzhiyun 	}
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
1009*4882a593Smuzhiyun 	if (unlikely(rc)) {
1010*4882a593Smuzhiyun 		dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
1011*4882a593Smuzhiyun 		goto err0;
1012*4882a593Smuzhiyun 	}
1013*4882a593Smuzhiyun 	mutex_unlock(&gli->mutex);
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	rc = init_luntable(cfg, lli);
1016*4882a593Smuzhiyun 	if (rc) {
1017*4882a593Smuzhiyun 		dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
1018*4882a593Smuzhiyun 		goto err1;
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	ctxi = get_context(cfg, rctxid, lli, 0);
1022*4882a593Smuzhiyun 	if (unlikely(!ctxi)) {
1023*4882a593Smuzhiyun 		dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1024*4882a593Smuzhiyun 		rc = -EINVAL;
1025*4882a593Smuzhiyun 		goto err1;
1026*4882a593Smuzhiyun 	}
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	rhte = rhte_checkout(ctxi, lli);
1029*4882a593Smuzhiyun 	if (unlikely(!rhte)) {
1030*4882a593Smuzhiyun 		dev_err(dev, "%s: too many opens ctxid=%llu\n",
1031*4882a593Smuzhiyun 			__func__, ctxid);
1032*4882a593Smuzhiyun 		rc = -EMFILE;	/* too many opens  */
1033*4882a593Smuzhiyun 		goto err1;
1034*4882a593Smuzhiyun 	}
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	rsrc_handle = (rhte - ctxi->rht_start);
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	/* Populate RHT format 0 */
1039*4882a593Smuzhiyun 	rhte->nmask = MC_RHT_NMASK;
1040*4882a593Smuzhiyun 	rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	/* Resize even if requested size is 0 */
1043*4882a593Smuzhiyun 	marshal_virt_to_resize(virt, &resize);
1044*4882a593Smuzhiyun 	resize.rsrc_handle = rsrc_handle;
1045*4882a593Smuzhiyun 	rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
1046*4882a593Smuzhiyun 	if (rc) {
1047*4882a593Smuzhiyun 		dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
1048*4882a593Smuzhiyun 		goto err2;
1049*4882a593Smuzhiyun 	}
1050*4882a593Smuzhiyun 	last_lba = resize.last_lba;
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
1053*4882a593Smuzhiyun 		ctxi->rht_needs_ws[rsrc_handle] = true;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	virt->hdr.return_flags = 0;
1056*4882a593Smuzhiyun 	virt->last_lba = last_lba;
1057*4882a593Smuzhiyun 	virt->rsrc_handle = rsrc_handle;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	if (get_num_ports(lli->port_sel) > 1)
1060*4882a593Smuzhiyun 		virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
1061*4882a593Smuzhiyun out:
1062*4882a593Smuzhiyun 	if (likely(ctxi))
1063*4882a593Smuzhiyun 		put_context(ctxi);
1064*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
1065*4882a593Smuzhiyun 		__func__, rsrc_handle, rc, last_lba);
1066*4882a593Smuzhiyun 	return rc;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun err2:
1069*4882a593Smuzhiyun 	rhte_checkin(ctxi, rhte);
1070*4882a593Smuzhiyun err1:
1071*4882a593Smuzhiyun 	cxlflash_lun_detach(gli);
1072*4882a593Smuzhiyun 	goto out;
1073*4882a593Smuzhiyun err0:
1074*4882a593Smuzhiyun 	/* Special common cleanup prior to successful LUN attach */
1075*4882a593Smuzhiyun 	cxlflash_ba_terminate(&gli->blka.ba_lun);
1076*4882a593Smuzhiyun 	mutex_unlock(&gli->mutex);
1077*4882a593Smuzhiyun 	goto out;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun /**
1081*4882a593Smuzhiyun  * clone_lxt() - copies translation tables from source to destination RHTE
1082*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
1083*4882a593Smuzhiyun  * @blka:	Block allocator associated with LUN.
1084*4882a593Smuzhiyun  * @ctxid:	Context ID of context owning the RHTE.
1085*4882a593Smuzhiyun  * @rhndl:	Resource handle associated with the RHTE.
1086*4882a593Smuzhiyun  * @rhte:	Destination resource handle entry (RHTE).
1087*4882a593Smuzhiyun  * @rhte_src:	Source resource handle entry (RHTE).
1088*4882a593Smuzhiyun  *
1089*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
1090*4882a593Smuzhiyun  */
clone_lxt(struct afu * afu,struct blka * blka,ctx_hndl_t ctxid,res_hndl_t rhndl,struct sisl_rht_entry * rhte,struct sisl_rht_entry * rhte_src)1091*4882a593Smuzhiyun static int clone_lxt(struct afu *afu,
1092*4882a593Smuzhiyun 		     struct blka *blka,
1093*4882a593Smuzhiyun 		     ctx_hndl_t ctxid,
1094*4882a593Smuzhiyun 		     res_hndl_t rhndl,
1095*4882a593Smuzhiyun 		     struct sisl_rht_entry *rhte,
1096*4882a593Smuzhiyun 		     struct sisl_rht_entry *rhte_src)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
1099*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1100*4882a593Smuzhiyun 	struct sisl_lxt_entry *lxt = NULL;
1101*4882a593Smuzhiyun 	bool locked = false;
1102*4882a593Smuzhiyun 	u32 ngrps;
1103*4882a593Smuzhiyun 	u64 aun;		/* chunk# allocated by block allocator */
1104*4882a593Smuzhiyun 	int j;
1105*4882a593Smuzhiyun 	int i = 0;
1106*4882a593Smuzhiyun 	int rc = 0;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	if (ngrps) {
1111*4882a593Smuzhiyun 		/* allocate new LXTs for clone */
1112*4882a593Smuzhiyun 		lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
1113*4882a593Smuzhiyun 				GFP_KERNEL);
1114*4882a593Smuzhiyun 		if (unlikely(!lxt)) {
1115*4882a593Smuzhiyun 			rc = -ENOMEM;
1116*4882a593Smuzhiyun 			goto out;
1117*4882a593Smuzhiyun 		}
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 		/* copy over */
1120*4882a593Smuzhiyun 		memcpy(lxt, rhte_src->lxt_start,
1121*4882a593Smuzhiyun 		       (sizeof(*lxt) * rhte_src->lxt_cnt));
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 		/* clone the LBAs in block allocator via ref_cnt, note that the
1124*4882a593Smuzhiyun 		 * block allocator mutex must be held until it is established
1125*4882a593Smuzhiyun 		 * that this routine will complete without the need for a
1126*4882a593Smuzhiyun 		 * cleanup.
1127*4882a593Smuzhiyun 		 */
1128*4882a593Smuzhiyun 		mutex_lock(&blka->mutex);
1129*4882a593Smuzhiyun 		locked = true;
1130*4882a593Smuzhiyun 		for (i = 0; i < rhte_src->lxt_cnt; i++) {
1131*4882a593Smuzhiyun 			aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
1132*4882a593Smuzhiyun 			if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
1133*4882a593Smuzhiyun 				rc = -EIO;
1134*4882a593Smuzhiyun 				goto err;
1135*4882a593Smuzhiyun 			}
1136*4882a593Smuzhiyun 		}
1137*4882a593Smuzhiyun 	}
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	/*
1140*4882a593Smuzhiyun 	 * The following sequence is prescribed in the SISlite spec
1141*4882a593Smuzhiyun 	 * for syncing up with the AFU when adding LXT entries.
1142*4882a593Smuzhiyun 	 */
1143*4882a593Smuzhiyun 	dma_wmb(); /* Make LXT updates are visible */
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	rhte->lxt_start = lxt;
1146*4882a593Smuzhiyun 	dma_wmb(); /* Make RHT entry's LXT table update visible */
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	rhte->lxt_cnt = rhte_src->lxt_cnt;
1149*4882a593Smuzhiyun 	dma_wmb(); /* Make RHT entry's LXT table size update visible */
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
1152*4882a593Smuzhiyun 	if (unlikely(rc)) {
1153*4882a593Smuzhiyun 		rc = -EAGAIN;
1154*4882a593Smuzhiyun 		goto err2;
1155*4882a593Smuzhiyun 	}
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun out:
1158*4882a593Smuzhiyun 	if (locked)
1159*4882a593Smuzhiyun 		mutex_unlock(&blka->mutex);
1160*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1161*4882a593Smuzhiyun 	return rc;
1162*4882a593Smuzhiyun err2:
1163*4882a593Smuzhiyun 	/* Reset the RHTE */
1164*4882a593Smuzhiyun 	rhte->lxt_cnt = 0;
1165*4882a593Smuzhiyun 	dma_wmb();
1166*4882a593Smuzhiyun 	rhte->lxt_start = NULL;
1167*4882a593Smuzhiyun 	dma_wmb();
1168*4882a593Smuzhiyun err:
1169*4882a593Smuzhiyun 	/* free the clones already made */
1170*4882a593Smuzhiyun 	for (j = 0; j < i; j++) {
1171*4882a593Smuzhiyun 		aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
1172*4882a593Smuzhiyun 		ba_free(&blka->ba_lun, aun);
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 	kfree(lxt);
1175*4882a593Smuzhiyun 	goto out;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun /**
1179*4882a593Smuzhiyun  * cxlflash_disk_clone() - clone a context by making snapshot of another
1180*4882a593Smuzhiyun  * @sdev:	SCSI device associated with LUN owning virtual LUN.
1181*4882a593Smuzhiyun  * @clone:	Clone ioctl data structure.
1182*4882a593Smuzhiyun  *
1183*4882a593Smuzhiyun  * This routine effectively performs cxlflash_disk_open operation for each
1184*4882a593Smuzhiyun  * in-use virtual resource in the source context. Note that the destination
1185*4882a593Smuzhiyun  * context must be in pristine state and cannot have any resource handles
1186*4882a593Smuzhiyun  * open at the time of the clone.
1187*4882a593Smuzhiyun  *
1188*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
1189*4882a593Smuzhiyun  */
cxlflash_disk_clone(struct scsi_device * sdev,struct dk_cxlflash_clone * clone)1190*4882a593Smuzhiyun int cxlflash_disk_clone(struct scsi_device *sdev,
1191*4882a593Smuzhiyun 			struct dk_cxlflash_clone *clone)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1194*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1195*4882a593Smuzhiyun 	struct llun_info *lli = sdev->hostdata;
1196*4882a593Smuzhiyun 	struct glun_info *gli = lli->parent;
1197*4882a593Smuzhiyun 	struct blka *blka = &gli->blka;
1198*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
1199*4882a593Smuzhiyun 	struct dk_cxlflash_release release = { { 0 }, 0 };
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	struct ctx_info *ctxi_src = NULL,
1202*4882a593Smuzhiyun 			*ctxi_dst = NULL;
1203*4882a593Smuzhiyun 	struct lun_access *lun_access_src, *lun_access_dst;
1204*4882a593Smuzhiyun 	u32 perms;
1205*4882a593Smuzhiyun 	u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
1206*4882a593Smuzhiyun 	    ctxid_dst = DECODE_CTXID(clone->context_id_dst),
1207*4882a593Smuzhiyun 	    rctxid_src = clone->context_id_src,
1208*4882a593Smuzhiyun 	    rctxid_dst = clone->context_id_dst;
1209*4882a593Smuzhiyun 	int i, j;
1210*4882a593Smuzhiyun 	int rc = 0;
1211*4882a593Smuzhiyun 	bool found;
1212*4882a593Smuzhiyun 	LIST_HEAD(sidecar);
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
1215*4882a593Smuzhiyun 		__func__, ctxid_src, ctxid_dst);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	/* Do not clone yourself */
1218*4882a593Smuzhiyun 	if (unlikely(rctxid_src == rctxid_dst)) {
1219*4882a593Smuzhiyun 		rc = -EINVAL;
1220*4882a593Smuzhiyun 		goto out;
1221*4882a593Smuzhiyun 	}
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	if (unlikely(gli->mode != MODE_VIRTUAL)) {
1224*4882a593Smuzhiyun 		rc = -EINVAL;
1225*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
1226*4882a593Smuzhiyun 			__func__, gli->mode);
1227*4882a593Smuzhiyun 		goto out;
1228*4882a593Smuzhiyun 	}
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
1231*4882a593Smuzhiyun 	ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
1232*4882a593Smuzhiyun 	if (unlikely(!ctxi_src || !ctxi_dst)) {
1233*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
1234*4882a593Smuzhiyun 			__func__, ctxid_src, ctxid_dst);
1235*4882a593Smuzhiyun 		rc = -EINVAL;
1236*4882a593Smuzhiyun 		goto out;
1237*4882a593Smuzhiyun 	}
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	/* Verify there is no open resource handle in the destination context */
1240*4882a593Smuzhiyun 	for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
1241*4882a593Smuzhiyun 		if (ctxi_dst->rht_start[i].nmask != 0) {
1242*4882a593Smuzhiyun 			rc = -EINVAL;
1243*4882a593Smuzhiyun 			goto out;
1244*4882a593Smuzhiyun 		}
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	/* Clone LUN access list */
1247*4882a593Smuzhiyun 	list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
1248*4882a593Smuzhiyun 		found = false;
1249*4882a593Smuzhiyun 		list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
1250*4882a593Smuzhiyun 			if (lun_access_dst->sdev == lun_access_src->sdev) {
1251*4882a593Smuzhiyun 				found = true;
1252*4882a593Smuzhiyun 				break;
1253*4882a593Smuzhiyun 			}
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 		if (!found) {
1256*4882a593Smuzhiyun 			lun_access_dst = kzalloc(sizeof(*lun_access_dst),
1257*4882a593Smuzhiyun 						 GFP_KERNEL);
1258*4882a593Smuzhiyun 			if (unlikely(!lun_access_dst)) {
1259*4882a593Smuzhiyun 				dev_err(dev, "%s: lun_access allocation fail\n",
1260*4882a593Smuzhiyun 					__func__);
1261*4882a593Smuzhiyun 				rc = -ENOMEM;
1262*4882a593Smuzhiyun 				goto out;
1263*4882a593Smuzhiyun 			}
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 			*lun_access_dst = *lun_access_src;
1266*4882a593Smuzhiyun 			list_add(&lun_access_dst->list, &sidecar);
1267*4882a593Smuzhiyun 		}
1268*4882a593Smuzhiyun 	}
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	if (unlikely(!ctxi_src->rht_out)) {
1271*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Nothing to clone\n", __func__);
1272*4882a593Smuzhiyun 		goto out_success;
1273*4882a593Smuzhiyun 	}
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	/* User specified permission on attach */
1276*4882a593Smuzhiyun 	perms = ctxi_dst->rht_perms;
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	/*
1279*4882a593Smuzhiyun 	 * Copy over checked-out RHT (and their associated LXT) entries by
1280*4882a593Smuzhiyun 	 * hand, stopping after we've copied all outstanding entries and
1281*4882a593Smuzhiyun 	 * cleaning up if the clone fails.
1282*4882a593Smuzhiyun 	 *
1283*4882a593Smuzhiyun 	 * Note: This loop is equivalent to performing cxlflash_disk_open and
1284*4882a593Smuzhiyun 	 * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
1285*4882a593Smuzhiyun 	 * account by attaching after each successful RHT entry clone. In the
1286*4882a593Smuzhiyun 	 * event that a clone failure is experienced, the LUN detach is handled
1287*4882a593Smuzhiyun 	 * via the cleanup performed by _cxlflash_disk_release.
1288*4882a593Smuzhiyun 	 */
1289*4882a593Smuzhiyun 	for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
1290*4882a593Smuzhiyun 		if (ctxi_src->rht_out == ctxi_dst->rht_out)
1291*4882a593Smuzhiyun 			break;
1292*4882a593Smuzhiyun 		if (ctxi_src->rht_start[i].nmask == 0)
1293*4882a593Smuzhiyun 			continue;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 		/* Consume a destination RHT entry */
1296*4882a593Smuzhiyun 		ctxi_dst->rht_out++;
1297*4882a593Smuzhiyun 		ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
1298*4882a593Smuzhiyun 		ctxi_dst->rht_start[i].fp =
1299*4882a593Smuzhiyun 		    SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
1300*4882a593Smuzhiyun 		ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 		rc = clone_lxt(afu, blka, ctxid_dst, i,
1303*4882a593Smuzhiyun 			       &ctxi_dst->rht_start[i],
1304*4882a593Smuzhiyun 			       &ctxi_src->rht_start[i]);
1305*4882a593Smuzhiyun 		if (rc) {
1306*4882a593Smuzhiyun 			marshal_clone_to_rele(clone, &release);
1307*4882a593Smuzhiyun 			for (j = 0; j < i; j++) {
1308*4882a593Smuzhiyun 				release.rsrc_handle = j;
1309*4882a593Smuzhiyun 				_cxlflash_disk_release(sdev, ctxi_dst,
1310*4882a593Smuzhiyun 						       &release);
1311*4882a593Smuzhiyun 			}
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 			/* Put back the one we failed on */
1314*4882a593Smuzhiyun 			rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
1315*4882a593Smuzhiyun 			goto err;
1316*4882a593Smuzhiyun 		}
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 		cxlflash_lun_attach(gli, gli->mode, false);
1319*4882a593Smuzhiyun 	}
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun out_success:
1322*4882a593Smuzhiyun 	list_splice(&sidecar, &ctxi_dst->luns);
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	/* fall through */
1325*4882a593Smuzhiyun out:
1326*4882a593Smuzhiyun 	if (ctxi_src)
1327*4882a593Smuzhiyun 		put_context(ctxi_src);
1328*4882a593Smuzhiyun 	if (ctxi_dst)
1329*4882a593Smuzhiyun 		put_context(ctxi_dst);
1330*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1331*4882a593Smuzhiyun 	return rc;
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun err:
1334*4882a593Smuzhiyun 	list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
1335*4882a593Smuzhiyun 		kfree(lun_access_src);
1336*4882a593Smuzhiyun 	goto out;
1337*4882a593Smuzhiyun }
1338