xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/qlogic/qed/qed_chain.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2*4882a593Smuzhiyun /* Copyright (c) 2020 Marvell International Ltd. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/dma-mapping.h>
5*4882a593Smuzhiyun #include <linux/qed/qed_chain.h>
6*4882a593Smuzhiyun #include <linux/vmalloc.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "qed_dev_api.h"
9*4882a593Smuzhiyun 
qed_chain_init(struct qed_chain * chain,const struct qed_chain_init_params * params,u32 page_cnt)10*4882a593Smuzhiyun static void qed_chain_init(struct qed_chain *chain,
11*4882a593Smuzhiyun 			   const struct qed_chain_init_params *params,
12*4882a593Smuzhiyun 			   u32 page_cnt)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	memset(chain, 0, sizeof(*chain));
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 	chain->elem_size = params->elem_size;
17*4882a593Smuzhiyun 	chain->intended_use = params->intended_use;
18*4882a593Smuzhiyun 	chain->mode = params->mode;
19*4882a593Smuzhiyun 	chain->cnt_type = params->cnt_type;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size,
22*4882a593Smuzhiyun 					      params->page_size);
23*4882a593Smuzhiyun 	chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size,
24*4882a593Smuzhiyun 						       params->page_size,
25*4882a593Smuzhiyun 						       params->mode);
26*4882a593Smuzhiyun 	chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size,
27*4882a593Smuzhiyun 						       params->mode);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	chain->elem_per_page_mask = chain->elem_per_page - 1;
30*4882a593Smuzhiyun 	chain->next_page_mask = chain->usable_per_page &
31*4882a593Smuzhiyun 				chain->elem_per_page_mask;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	chain->page_size = params->page_size;
34*4882a593Smuzhiyun 	chain->page_cnt = page_cnt;
35*4882a593Smuzhiyun 	chain->capacity = chain->usable_per_page * page_cnt;
36*4882a593Smuzhiyun 	chain->size = chain->elem_per_page * page_cnt;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	if (params->ext_pbl_virt) {
39*4882a593Smuzhiyun 		chain->pbl_sp.table_virt = params->ext_pbl_virt;
40*4882a593Smuzhiyun 		chain->pbl_sp.table_phys = params->ext_pbl_phys;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 		chain->b_external_pbl = true;
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
qed_chain_init_next_ptr_elem(const struct qed_chain * chain,void * virt_curr,void * virt_next,dma_addr_t phys_next)46*4882a593Smuzhiyun static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
47*4882a593Smuzhiyun 					 void *virt_curr, void *virt_next,
48*4882a593Smuzhiyun 					 dma_addr_t phys_next)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct qed_chain_next *next;
51*4882a593Smuzhiyun 	u32 size;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	size = chain->elem_size * chain->usable_per_page;
54*4882a593Smuzhiyun 	next = virt_curr + size;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	DMA_REGPAIR_LE(next->next_phys, phys_next);
57*4882a593Smuzhiyun 	next->next_virt = virt_next;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
qed_chain_init_mem(struct qed_chain * chain,void * virt_addr,dma_addr_t phys_addr)60*4882a593Smuzhiyun static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr,
61*4882a593Smuzhiyun 			       dma_addr_t phys_addr)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	chain->p_virt_addr = virt_addr;
64*4882a593Smuzhiyun 	chain->p_phys_addr = phys_addr;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
qed_chain_free_next_ptr(struct qed_dev * cdev,struct qed_chain * chain)67*4882a593Smuzhiyun static void qed_chain_free_next_ptr(struct qed_dev *cdev,
68*4882a593Smuzhiyun 				    struct qed_chain *chain)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct device *dev = &cdev->pdev->dev;
71*4882a593Smuzhiyun 	struct qed_chain_next *next;
72*4882a593Smuzhiyun 	dma_addr_t phys, phys_next;
73*4882a593Smuzhiyun 	void *virt, *virt_next;
74*4882a593Smuzhiyun 	u32 size, i;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	size = chain->elem_size * chain->usable_per_page;
77*4882a593Smuzhiyun 	virt = chain->p_virt_addr;
78*4882a593Smuzhiyun 	phys = chain->p_phys_addr;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	for (i = 0; i < chain->page_cnt; i++) {
81*4882a593Smuzhiyun 		if (!virt)
82*4882a593Smuzhiyun 			break;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		next = virt + size;
85*4882a593Smuzhiyun 		virt_next = next->next_virt;
86*4882a593Smuzhiyun 		phys_next = HILO_DMA_REGPAIR(next->next_phys);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 		dma_free_coherent(dev, chain->page_size, virt, phys);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 		virt = virt_next;
91*4882a593Smuzhiyun 		phys = phys_next;
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
qed_chain_free_single(struct qed_dev * cdev,struct qed_chain * chain)95*4882a593Smuzhiyun static void qed_chain_free_single(struct qed_dev *cdev,
96*4882a593Smuzhiyun 				  struct qed_chain *chain)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	if (!chain->p_virt_addr)
99*4882a593Smuzhiyun 		return;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	dma_free_coherent(&cdev->pdev->dev, chain->page_size,
102*4882a593Smuzhiyun 			  chain->p_virt_addr, chain->p_phys_addr);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
qed_chain_free_pbl(struct qed_dev * cdev,struct qed_chain * chain)105*4882a593Smuzhiyun static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct device *dev = &cdev->pdev->dev;
108*4882a593Smuzhiyun 	struct addr_tbl_entry *entry;
109*4882a593Smuzhiyun 	u32 i;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (!chain->pbl.pp_addr_tbl)
112*4882a593Smuzhiyun 		return;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	for (i = 0; i < chain->page_cnt; i++) {
115*4882a593Smuzhiyun 		entry = chain->pbl.pp_addr_tbl + i;
116*4882a593Smuzhiyun 		if (!entry->virt_addr)
117*4882a593Smuzhiyun 			break;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 		dma_free_coherent(dev, chain->page_size, entry->virt_addr,
120*4882a593Smuzhiyun 				  entry->dma_map);
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (!chain->b_external_pbl)
124*4882a593Smuzhiyun 		dma_free_coherent(dev, chain->pbl_sp.table_size,
125*4882a593Smuzhiyun 				  chain->pbl_sp.table_virt,
126*4882a593Smuzhiyun 				  chain->pbl_sp.table_phys);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	vfree(chain->pbl.pp_addr_tbl);
129*4882a593Smuzhiyun 	chain->pbl.pp_addr_tbl = NULL;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /**
133*4882a593Smuzhiyun  * qed_chain_free() - Free chain DMA memory.
134*4882a593Smuzhiyun  *
135*4882a593Smuzhiyun  * @cdev: Main device structure.
136*4882a593Smuzhiyun  * @chain: Chain to free.
137*4882a593Smuzhiyun  */
qed_chain_free(struct qed_dev * cdev,struct qed_chain * chain)138*4882a593Smuzhiyun void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	switch (chain->mode) {
141*4882a593Smuzhiyun 	case QED_CHAIN_MODE_NEXT_PTR:
142*4882a593Smuzhiyun 		qed_chain_free_next_ptr(cdev, chain);
143*4882a593Smuzhiyun 		break;
144*4882a593Smuzhiyun 	case QED_CHAIN_MODE_SINGLE:
145*4882a593Smuzhiyun 		qed_chain_free_single(cdev, chain);
146*4882a593Smuzhiyun 		break;
147*4882a593Smuzhiyun 	case QED_CHAIN_MODE_PBL:
148*4882a593Smuzhiyun 		qed_chain_free_pbl(cdev, chain);
149*4882a593Smuzhiyun 		break;
150*4882a593Smuzhiyun 	default:
151*4882a593Smuzhiyun 		return;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	qed_chain_init_mem(chain, NULL, 0);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun static int
qed_chain_alloc_sanity_check(struct qed_dev * cdev,const struct qed_chain_init_params * params,u32 page_cnt)158*4882a593Smuzhiyun qed_chain_alloc_sanity_check(struct qed_dev *cdev,
159*4882a593Smuzhiyun 			     const struct qed_chain_init_params *params,
160*4882a593Smuzhiyun 			     u32 page_cnt)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	u64 chain_size;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size);
165*4882a593Smuzhiyun 	chain_size *= page_cnt;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (!chain_size)
168*4882a593Smuzhiyun 		return -EINVAL;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/* The actual chain size can be larger than the maximal possible value
171*4882a593Smuzhiyun 	 * after rounding up the requested elements number to pages, and after
172*4882a593Smuzhiyun 	 * taking into account the unusuable elements (next-ptr elements).
173*4882a593Smuzhiyun 	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
174*4882a593Smuzhiyun 	 * size/capacity fields are of u32 type.
175*4882a593Smuzhiyun 	 */
176*4882a593Smuzhiyun 	switch (params->cnt_type) {
177*4882a593Smuzhiyun 	case QED_CHAIN_CNT_TYPE_U16:
178*4882a593Smuzhiyun 		if (chain_size > U16_MAX + 1)
179*4882a593Smuzhiyun 			break;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		return 0;
182*4882a593Smuzhiyun 	case QED_CHAIN_CNT_TYPE_U32:
183*4882a593Smuzhiyun 		if (chain_size > U32_MAX)
184*4882a593Smuzhiyun 			break;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		return 0;
187*4882a593Smuzhiyun 	default:
188*4882a593Smuzhiyun 		return -EINVAL;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	DP_NOTICE(cdev,
192*4882a593Smuzhiyun 		  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
193*4882a593Smuzhiyun 		  chain_size);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	return -EINVAL;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
qed_chain_alloc_next_ptr(struct qed_dev * cdev,struct qed_chain * chain)198*4882a593Smuzhiyun static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
199*4882a593Smuzhiyun 				    struct qed_chain *chain)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct device *dev = &cdev->pdev->dev;
202*4882a593Smuzhiyun 	void *virt, *virt_prev = NULL;
203*4882a593Smuzhiyun 	dma_addr_t phys;
204*4882a593Smuzhiyun 	u32 i;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	for (i = 0; i < chain->page_cnt; i++) {
207*4882a593Smuzhiyun 		virt = dma_alloc_coherent(dev, chain->page_size, &phys,
208*4882a593Smuzhiyun 					  GFP_KERNEL);
209*4882a593Smuzhiyun 		if (!virt)
210*4882a593Smuzhiyun 			return -ENOMEM;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		if (i == 0) {
213*4882a593Smuzhiyun 			qed_chain_init_mem(chain, virt, phys);
214*4882a593Smuzhiyun 			qed_chain_reset(chain);
215*4882a593Smuzhiyun 		} else {
216*4882a593Smuzhiyun 			qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
217*4882a593Smuzhiyun 						     phys);
218*4882a593Smuzhiyun 		}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		virt_prev = virt;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* Last page's next element should point to the beginning of the
224*4882a593Smuzhiyun 	 * chain.
225*4882a593Smuzhiyun 	 */
226*4882a593Smuzhiyun 	qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
227*4882a593Smuzhiyun 				     chain->p_phys_addr);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	return 0;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
qed_chain_alloc_single(struct qed_dev * cdev,struct qed_chain * chain)232*4882a593Smuzhiyun static int qed_chain_alloc_single(struct qed_dev *cdev,
233*4882a593Smuzhiyun 				  struct qed_chain *chain)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	dma_addr_t phys;
236*4882a593Smuzhiyun 	void *virt;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size,
239*4882a593Smuzhiyun 				  &phys, GFP_KERNEL);
240*4882a593Smuzhiyun 	if (!virt)
241*4882a593Smuzhiyun 		return -ENOMEM;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	qed_chain_init_mem(chain, virt, phys);
244*4882a593Smuzhiyun 	qed_chain_reset(chain);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	return 0;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
qed_chain_alloc_pbl(struct qed_dev * cdev,struct qed_chain * chain)249*4882a593Smuzhiyun static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	struct device *dev = &cdev->pdev->dev;
252*4882a593Smuzhiyun 	struct addr_tbl_entry *addr_tbl;
253*4882a593Smuzhiyun 	dma_addr_t phys, pbl_phys;
254*4882a593Smuzhiyun 	__le64 *pbl_virt;
255*4882a593Smuzhiyun 	u32 page_cnt, i;
256*4882a593Smuzhiyun 	size_t size;
257*4882a593Smuzhiyun 	void *virt;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	page_cnt = chain->page_cnt;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	size = array_size(page_cnt, sizeof(*addr_tbl));
262*4882a593Smuzhiyun 	if (unlikely(size == SIZE_MAX))
263*4882a593Smuzhiyun 		return -EOVERFLOW;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	addr_tbl = vzalloc(size);
266*4882a593Smuzhiyun 	if (!addr_tbl)
267*4882a593Smuzhiyun 		return -ENOMEM;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	chain->pbl.pp_addr_tbl = addr_tbl;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	if (chain->b_external_pbl) {
272*4882a593Smuzhiyun 		pbl_virt = chain->pbl_sp.table_virt;
273*4882a593Smuzhiyun 		goto alloc_pages;
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	size = array_size(page_cnt, sizeof(*pbl_virt));
277*4882a593Smuzhiyun 	if (unlikely(size == SIZE_MAX))
278*4882a593Smuzhiyun 		return -EOVERFLOW;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL);
281*4882a593Smuzhiyun 	if (!pbl_virt)
282*4882a593Smuzhiyun 		return -ENOMEM;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	chain->pbl_sp.table_virt = pbl_virt;
285*4882a593Smuzhiyun 	chain->pbl_sp.table_phys = pbl_phys;
286*4882a593Smuzhiyun 	chain->pbl_sp.table_size = size;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun alloc_pages:
289*4882a593Smuzhiyun 	for (i = 0; i < page_cnt; i++) {
290*4882a593Smuzhiyun 		virt = dma_alloc_coherent(dev, chain->page_size, &phys,
291*4882a593Smuzhiyun 					  GFP_KERNEL);
292*4882a593Smuzhiyun 		if (!virt)
293*4882a593Smuzhiyun 			return -ENOMEM;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 		if (i == 0) {
296*4882a593Smuzhiyun 			qed_chain_init_mem(chain, virt, phys);
297*4882a593Smuzhiyun 			qed_chain_reset(chain);
298*4882a593Smuzhiyun 		}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		/* Fill the PBL table with the physical address of the page */
301*4882a593Smuzhiyun 		pbl_virt[i] = cpu_to_le64(phys);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		/* Keep the virtual address of the page */
304*4882a593Smuzhiyun 		addr_tbl[i].virt_addr = virt;
305*4882a593Smuzhiyun 		addr_tbl[i].dma_map = phys;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /**
312*4882a593Smuzhiyun  * qed_chain_alloc() - Allocate and initialize a chain.
313*4882a593Smuzhiyun  *
314*4882a593Smuzhiyun  * @cdev: Main device structure.
315*4882a593Smuzhiyun  * @chain: Chain to be processed.
316*4882a593Smuzhiyun  * @params: Chain initialization parameters.
317*4882a593Smuzhiyun  *
318*4882a593Smuzhiyun  * Return: 0 on success, negative errno otherwise.
319*4882a593Smuzhiyun  */
qed_chain_alloc(struct qed_dev * cdev,struct qed_chain * chain,struct qed_chain_init_params * params)320*4882a593Smuzhiyun int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
321*4882a593Smuzhiyun 		    struct qed_chain_init_params *params)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	u32 page_cnt;
324*4882a593Smuzhiyun 	int rc;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (!params->page_size)
327*4882a593Smuzhiyun 		params->page_size = QED_CHAIN_PAGE_SIZE;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	if (params->mode == QED_CHAIN_MODE_SINGLE)
330*4882a593Smuzhiyun 		page_cnt = 1;
331*4882a593Smuzhiyun 	else
332*4882a593Smuzhiyun 		page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems,
333*4882a593Smuzhiyun 					      params->elem_size,
334*4882a593Smuzhiyun 					      params->page_size,
335*4882a593Smuzhiyun 					      params->mode);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt);
338*4882a593Smuzhiyun 	if (rc) {
339*4882a593Smuzhiyun 		DP_NOTICE(cdev,
340*4882a593Smuzhiyun 			  "Cannot allocate a chain with the given arguments:\n");
341*4882a593Smuzhiyun 		DP_NOTICE(cdev,
342*4882a593Smuzhiyun 			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n",
343*4882a593Smuzhiyun 			  params->intended_use, params->mode, params->cnt_type,
344*4882a593Smuzhiyun 			  params->num_elems, params->elem_size,
345*4882a593Smuzhiyun 			  params->page_size);
346*4882a593Smuzhiyun 		return rc;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	qed_chain_init(chain, params, page_cnt);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	switch (params->mode) {
352*4882a593Smuzhiyun 	case QED_CHAIN_MODE_NEXT_PTR:
353*4882a593Smuzhiyun 		rc = qed_chain_alloc_next_ptr(cdev, chain);
354*4882a593Smuzhiyun 		break;
355*4882a593Smuzhiyun 	case QED_CHAIN_MODE_SINGLE:
356*4882a593Smuzhiyun 		rc = qed_chain_alloc_single(cdev, chain);
357*4882a593Smuzhiyun 		break;
358*4882a593Smuzhiyun 	case QED_CHAIN_MODE_PBL:
359*4882a593Smuzhiyun 		rc = qed_chain_alloc_pbl(cdev, chain);
360*4882a593Smuzhiyun 		break;
361*4882a593Smuzhiyun 	default:
362*4882a593Smuzhiyun 		return -EINVAL;
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (!rc)
366*4882a593Smuzhiyun 		return 0;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	qed_chain_free(cdev, chain);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	return rc;
371*4882a593Smuzhiyun }
372