xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/mellanox/mlx4/icm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3*4882a593Smuzhiyun  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun  * OpenIB.org BSD license below:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
13*4882a593Smuzhiyun  *     conditions are met:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
16*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun  *        disclaimer.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun  *        provided with the distribution.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun  * SOFTWARE.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <linux/errno.h>
35*4882a593Smuzhiyun #include <linux/mm.h>
36*4882a593Smuzhiyun #include <linux/scatterlist.h>
37*4882a593Smuzhiyun #include <linux/slab.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include <linux/mlx4/cmd.h>
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #include "mlx4.h"
42*4882a593Smuzhiyun #include "icm.h"
43*4882a593Smuzhiyun #include "fw.h"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * We allocate in as big chunks as we can, up to a maximum of 256 KB
47*4882a593Smuzhiyun  * per chunk. Note that the chunks are not necessarily in contiguous
48*4882a593Smuzhiyun  * physical memory.
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun enum {
51*4882a593Smuzhiyun 	MLX4_ICM_ALLOC_SIZE	= 1 << 18,
52*4882a593Smuzhiyun 	MLX4_TABLE_CHUNK_SIZE	= 1 << 18,
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun 
mlx4_free_icm_pages(struct mlx4_dev * dev,struct mlx4_icm_chunk * chunk)55*4882a593Smuzhiyun static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	int i;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (chunk->nsg > 0)
60*4882a593Smuzhiyun 		dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages,
61*4882a593Smuzhiyun 			     DMA_BIDIRECTIONAL);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	for (i = 0; i < chunk->npages; ++i)
64*4882a593Smuzhiyun 		__free_pages(sg_page(&chunk->sg[i]),
65*4882a593Smuzhiyun 			     get_order(chunk->sg[i].length));
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
mlx4_free_icm_coherent(struct mlx4_dev * dev,struct mlx4_icm_chunk * chunk)68*4882a593Smuzhiyun static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	int i;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	for (i = 0; i < chunk->npages; ++i)
73*4882a593Smuzhiyun 		dma_free_coherent(&dev->persist->pdev->dev,
74*4882a593Smuzhiyun 				  chunk->buf[i].size,
75*4882a593Smuzhiyun 				  chunk->buf[i].addr,
76*4882a593Smuzhiyun 				  chunk->buf[i].dma_addr);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
mlx4_free_icm(struct mlx4_dev * dev,struct mlx4_icm * icm,int coherent)79*4882a593Smuzhiyun void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct mlx4_icm_chunk *chunk, *tmp;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (!icm)
84*4882a593Smuzhiyun 		return;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
87*4882a593Smuzhiyun 		if (coherent)
88*4882a593Smuzhiyun 			mlx4_free_icm_coherent(dev, chunk);
89*4882a593Smuzhiyun 		else
90*4882a593Smuzhiyun 			mlx4_free_icm_pages(dev, chunk);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		kfree(chunk);
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	kfree(icm);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
mlx4_alloc_icm_pages(struct scatterlist * mem,int order,gfp_t gfp_mask,int node)98*4882a593Smuzhiyun static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
99*4882a593Smuzhiyun 				gfp_t gfp_mask, int node)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	struct page *page;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	page = alloc_pages_node(node, gfp_mask, order);
104*4882a593Smuzhiyun 	if (!page) {
105*4882a593Smuzhiyun 		page = alloc_pages(gfp_mask, order);
106*4882a593Smuzhiyun 		if (!page)
107*4882a593Smuzhiyun 			return -ENOMEM;
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	sg_set_page(mem, page, PAGE_SIZE << order, 0);
111*4882a593Smuzhiyun 	return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
mlx4_alloc_icm_coherent(struct device * dev,struct mlx4_icm_buf * buf,int order,gfp_t gfp_mask)114*4882a593Smuzhiyun static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
115*4882a593Smuzhiyun 				   int order, gfp_t gfp_mask)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
118*4882a593Smuzhiyun 				       &buf->dma_addr, gfp_mask);
119*4882a593Smuzhiyun 	if (!buf->addr)
120*4882a593Smuzhiyun 		return -ENOMEM;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if (offset_in_page(buf->addr)) {
123*4882a593Smuzhiyun 		dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
124*4882a593Smuzhiyun 				  buf->dma_addr);
125*4882a593Smuzhiyun 		return -ENOMEM;
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	buf->size = PAGE_SIZE << order;
129*4882a593Smuzhiyun 	return 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
mlx4_alloc_icm(struct mlx4_dev * dev,int npages,gfp_t gfp_mask,int coherent)132*4882a593Smuzhiyun struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
133*4882a593Smuzhiyun 				gfp_t gfp_mask, int coherent)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	struct mlx4_icm *icm;
136*4882a593Smuzhiyun 	struct mlx4_icm_chunk *chunk = NULL;
137*4882a593Smuzhiyun 	int cur_order;
138*4882a593Smuzhiyun 	gfp_t mask;
139*4882a593Smuzhiyun 	int ret;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* We use sg_set_buf for coherent allocs, which assumes low memory */
142*4882a593Smuzhiyun 	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	icm = kmalloc_node(sizeof(*icm),
145*4882a593Smuzhiyun 			   gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
146*4882a593Smuzhiyun 			   dev->numa_node);
147*4882a593Smuzhiyun 	if (!icm) {
148*4882a593Smuzhiyun 		icm = kmalloc(sizeof(*icm),
149*4882a593Smuzhiyun 			      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
150*4882a593Smuzhiyun 		if (!icm)
151*4882a593Smuzhiyun 			return NULL;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	icm->refcount = 0;
155*4882a593Smuzhiyun 	INIT_LIST_HEAD(&icm->chunk_list);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	while (npages > 0) {
160*4882a593Smuzhiyun 		if (!chunk) {
161*4882a593Smuzhiyun 			chunk = kzalloc_node(sizeof(*chunk),
162*4882a593Smuzhiyun 					     gfp_mask & ~(__GFP_HIGHMEM |
163*4882a593Smuzhiyun 							  __GFP_NOWARN),
164*4882a593Smuzhiyun 					     dev->numa_node);
165*4882a593Smuzhiyun 			if (!chunk) {
166*4882a593Smuzhiyun 				chunk = kzalloc(sizeof(*chunk),
167*4882a593Smuzhiyun 						gfp_mask & ~(__GFP_HIGHMEM |
168*4882a593Smuzhiyun 							     __GFP_NOWARN));
169*4882a593Smuzhiyun 				if (!chunk)
170*4882a593Smuzhiyun 					goto fail;
171*4882a593Smuzhiyun 			}
172*4882a593Smuzhiyun 			chunk->coherent = coherent;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 			if (!coherent)
175*4882a593Smuzhiyun 				sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
176*4882a593Smuzhiyun 			list_add_tail(&chunk->list, &icm->chunk_list);
177*4882a593Smuzhiyun 		}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		while (1 << cur_order > npages)
180*4882a593Smuzhiyun 			--cur_order;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		mask = gfp_mask;
183*4882a593Smuzhiyun 		if (cur_order)
184*4882a593Smuzhiyun 			mask &= ~__GFP_DIRECT_RECLAIM;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		if (coherent)
187*4882a593Smuzhiyun 			ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
188*4882a593Smuzhiyun 						&chunk->buf[chunk->npages],
189*4882a593Smuzhiyun 						cur_order, mask);
190*4882a593Smuzhiyun 		else
191*4882a593Smuzhiyun 			ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
192*4882a593Smuzhiyun 						   cur_order, mask,
193*4882a593Smuzhiyun 						   dev->numa_node);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 		if (ret) {
196*4882a593Smuzhiyun 			if (--cur_order < 0)
197*4882a593Smuzhiyun 				goto fail;
198*4882a593Smuzhiyun 			else
199*4882a593Smuzhiyun 				continue;
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		++chunk->npages;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		if (coherent)
205*4882a593Smuzhiyun 			++chunk->nsg;
206*4882a593Smuzhiyun 		else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
207*4882a593Smuzhiyun 			chunk->nsg = dma_map_sg(&dev->persist->pdev->dev,
208*4882a593Smuzhiyun 						chunk->sg, chunk->npages,
209*4882a593Smuzhiyun 						DMA_BIDIRECTIONAL);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 			if (chunk->nsg <= 0)
212*4882a593Smuzhiyun 				goto fail;
213*4882a593Smuzhiyun 		}
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 		if (chunk->npages == MLX4_ICM_CHUNK_LEN)
216*4882a593Smuzhiyun 			chunk = NULL;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		npages -= 1 << cur_order;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (!coherent && chunk) {
222*4882a593Smuzhiyun 		chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
223*4882a593Smuzhiyun 					chunk->npages, DMA_BIDIRECTIONAL);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		if (chunk->nsg <= 0)
226*4882a593Smuzhiyun 			goto fail;
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	return icm;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun fail:
232*4882a593Smuzhiyun 	mlx4_free_icm(dev, icm, coherent);
233*4882a593Smuzhiyun 	return NULL;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
mlx4_MAP_ICM(struct mlx4_dev * dev,struct mlx4_icm * icm,u64 virt)236*4882a593Smuzhiyun static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
mlx4_UNMAP_ICM(struct mlx4_dev * dev,u64 virt,u32 page_count)241*4882a593Smuzhiyun static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
244*4882a593Smuzhiyun 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
mlx4_MAP_ICM_AUX(struct mlx4_dev * dev,struct mlx4_icm * icm)247*4882a593Smuzhiyun int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
mlx4_UNMAP_ICM_AUX(struct mlx4_dev * dev)252*4882a593Smuzhiyun int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
255*4882a593Smuzhiyun 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
mlx4_table_get(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 obj)258*4882a593Smuzhiyun int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	u32 i = (obj & (table->num_obj - 1)) /
261*4882a593Smuzhiyun 			(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
262*4882a593Smuzhiyun 	int ret = 0;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	mutex_lock(&table->mutex);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (table->icm[i]) {
267*4882a593Smuzhiyun 		++table->icm[i]->refcount;
268*4882a593Smuzhiyun 		goto out;
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
272*4882a593Smuzhiyun 				       (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
273*4882a593Smuzhiyun 				       __GFP_NOWARN, table->coherent);
274*4882a593Smuzhiyun 	if (!table->icm[i]) {
275*4882a593Smuzhiyun 		ret = -ENOMEM;
276*4882a593Smuzhiyun 		goto out;
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
280*4882a593Smuzhiyun 			 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
281*4882a593Smuzhiyun 		mlx4_free_icm(dev, table->icm[i], table->coherent);
282*4882a593Smuzhiyun 		table->icm[i] = NULL;
283*4882a593Smuzhiyun 		ret = -ENOMEM;
284*4882a593Smuzhiyun 		goto out;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	++table->icm[i]->refcount;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun out:
290*4882a593Smuzhiyun 	mutex_unlock(&table->mutex);
291*4882a593Smuzhiyun 	return ret;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
mlx4_table_put(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 obj)294*4882a593Smuzhiyun void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	u32 i;
297*4882a593Smuzhiyun 	u64 offset;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	mutex_lock(&table->mutex);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	if (--table->icm[i]->refcount == 0) {
304*4882a593Smuzhiyun 		offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
305*4882a593Smuzhiyun 		mlx4_UNMAP_ICM(dev, table->virt + offset,
306*4882a593Smuzhiyun 			       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
307*4882a593Smuzhiyun 		mlx4_free_icm(dev, table->icm[i], table->coherent);
308*4882a593Smuzhiyun 		table->icm[i] = NULL;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	mutex_unlock(&table->mutex);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
mlx4_table_find(struct mlx4_icm_table * table,u32 obj,dma_addr_t * dma_handle)314*4882a593Smuzhiyun void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
315*4882a593Smuzhiyun 			dma_addr_t *dma_handle)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	int offset, dma_offset, i;
318*4882a593Smuzhiyun 	u64 idx;
319*4882a593Smuzhiyun 	struct mlx4_icm_chunk *chunk;
320*4882a593Smuzhiyun 	struct mlx4_icm *icm;
321*4882a593Smuzhiyun 	void *addr = NULL;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (!table->lowmem)
324*4882a593Smuzhiyun 		return NULL;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	mutex_lock(&table->mutex);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
329*4882a593Smuzhiyun 	icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
330*4882a593Smuzhiyun 	dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (!icm)
333*4882a593Smuzhiyun 		goto out;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	list_for_each_entry(chunk, &icm->chunk_list, list) {
336*4882a593Smuzhiyun 		for (i = 0; i < chunk->npages; ++i) {
337*4882a593Smuzhiyun 			dma_addr_t dma_addr;
338*4882a593Smuzhiyun 			size_t len;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 			if (table->coherent) {
341*4882a593Smuzhiyun 				len = chunk->buf[i].size;
342*4882a593Smuzhiyun 				dma_addr = chunk->buf[i].dma_addr;
343*4882a593Smuzhiyun 				addr = chunk->buf[i].addr;
344*4882a593Smuzhiyun 			} else {
345*4882a593Smuzhiyun 				struct page *page;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 				len = sg_dma_len(&chunk->sg[i]);
348*4882a593Smuzhiyun 				dma_addr = sg_dma_address(&chunk->sg[i]);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 				/* XXX: we should never do this for highmem
351*4882a593Smuzhiyun 				 * allocation.  This function either needs
352*4882a593Smuzhiyun 				 * to be split, or the kernel virtual address
353*4882a593Smuzhiyun 				 * return needs to be made optional.
354*4882a593Smuzhiyun 				 */
355*4882a593Smuzhiyun 				page = sg_page(&chunk->sg[i]);
356*4882a593Smuzhiyun 				addr = lowmem_page_address(page);
357*4882a593Smuzhiyun 			}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 			if (dma_handle && dma_offset >= 0) {
360*4882a593Smuzhiyun 				if (len > dma_offset)
361*4882a593Smuzhiyun 					*dma_handle = dma_addr + dma_offset;
362*4882a593Smuzhiyun 				dma_offset -= len;
363*4882a593Smuzhiyun 			}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 			/*
366*4882a593Smuzhiyun 			 * DMA mapping can merge pages but not split them,
367*4882a593Smuzhiyun 			 * so if we found the page, dma_handle has already
368*4882a593Smuzhiyun 			 * been assigned to.
369*4882a593Smuzhiyun 			 */
370*4882a593Smuzhiyun 			if (len > offset)
371*4882a593Smuzhiyun 				goto out;
372*4882a593Smuzhiyun 			offset -= len;
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	addr = NULL;
377*4882a593Smuzhiyun out:
378*4882a593Smuzhiyun 	mutex_unlock(&table->mutex);
379*4882a593Smuzhiyun 	return addr ? addr + offset : NULL;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
mlx4_table_get_range(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 start,u32 end)382*4882a593Smuzhiyun int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
383*4882a593Smuzhiyun 			 u32 start, u32 end)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
386*4882a593Smuzhiyun 	int err;
387*4882a593Smuzhiyun 	u32 i;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	for (i = start; i <= end; i += inc) {
390*4882a593Smuzhiyun 		err = mlx4_table_get(dev, table, i);
391*4882a593Smuzhiyun 		if (err)
392*4882a593Smuzhiyun 			goto fail;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	return 0;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun fail:
398*4882a593Smuzhiyun 	while (i > start) {
399*4882a593Smuzhiyun 		i -= inc;
400*4882a593Smuzhiyun 		mlx4_table_put(dev, table, i);
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	return err;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
mlx4_table_put_range(struct mlx4_dev * dev,struct mlx4_icm_table * table,u32 start,u32 end)406*4882a593Smuzhiyun void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
407*4882a593Smuzhiyun 			  u32 start, u32 end)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	u32 i;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
412*4882a593Smuzhiyun 		mlx4_table_put(dev, table, i);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
mlx4_init_icm_table(struct mlx4_dev * dev,struct mlx4_icm_table * table,u64 virt,int obj_size,u32 nobj,int reserved,int use_lowmem,int use_coherent)415*4882a593Smuzhiyun int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
416*4882a593Smuzhiyun 			u64 virt, int obj_size,	u32 nobj, int reserved,
417*4882a593Smuzhiyun 			int use_lowmem, int use_coherent)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	int obj_per_chunk;
420*4882a593Smuzhiyun 	int num_icm;
421*4882a593Smuzhiyun 	unsigned chunk_size;
422*4882a593Smuzhiyun 	int i;
423*4882a593Smuzhiyun 	u64 size;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
426*4882a593Smuzhiyun 	if (WARN_ON(!obj_per_chunk))
427*4882a593Smuzhiyun 		return -EINVAL;
428*4882a593Smuzhiyun 	num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	table->icm      = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
431*4882a593Smuzhiyun 	if (!table->icm)
432*4882a593Smuzhiyun 		return -ENOMEM;
433*4882a593Smuzhiyun 	table->virt     = virt;
434*4882a593Smuzhiyun 	table->num_icm  = num_icm;
435*4882a593Smuzhiyun 	table->num_obj  = nobj;
436*4882a593Smuzhiyun 	table->obj_size = obj_size;
437*4882a593Smuzhiyun 	table->lowmem   = use_lowmem;
438*4882a593Smuzhiyun 	table->coherent = use_coherent;
439*4882a593Smuzhiyun 	mutex_init(&table->mutex);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	size = (u64) nobj * obj_size;
442*4882a593Smuzhiyun 	for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
443*4882a593Smuzhiyun 		chunk_size = MLX4_TABLE_CHUNK_SIZE;
444*4882a593Smuzhiyun 		if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
445*4882a593Smuzhiyun 			chunk_size = PAGE_ALIGN(size -
446*4882a593Smuzhiyun 					i * MLX4_TABLE_CHUNK_SIZE);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 		table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
449*4882a593Smuzhiyun 					       (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
450*4882a593Smuzhiyun 					       __GFP_NOWARN, use_coherent);
451*4882a593Smuzhiyun 		if (!table->icm[i])
452*4882a593Smuzhiyun 			goto err;
453*4882a593Smuzhiyun 		if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
454*4882a593Smuzhiyun 			mlx4_free_icm(dev, table->icm[i], use_coherent);
455*4882a593Smuzhiyun 			table->icm[i] = NULL;
456*4882a593Smuzhiyun 			goto err;
457*4882a593Smuzhiyun 		}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 		/*
460*4882a593Smuzhiyun 		 * Add a reference to this ICM chunk so that it never
461*4882a593Smuzhiyun 		 * gets freed (since it contains reserved firmware objects).
462*4882a593Smuzhiyun 		 */
463*4882a593Smuzhiyun 		++table->icm[i]->refcount;
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	return 0;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun err:
469*4882a593Smuzhiyun 	for (i = 0; i < num_icm; ++i)
470*4882a593Smuzhiyun 		if (table->icm[i]) {
471*4882a593Smuzhiyun 			mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
472*4882a593Smuzhiyun 				       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
473*4882a593Smuzhiyun 			mlx4_free_icm(dev, table->icm[i], use_coherent);
474*4882a593Smuzhiyun 		}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	kvfree(table->icm);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return -ENOMEM;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
mlx4_cleanup_icm_table(struct mlx4_dev * dev,struct mlx4_icm_table * table)481*4882a593Smuzhiyun void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	int i;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	for (i = 0; i < table->num_icm; ++i)
486*4882a593Smuzhiyun 		if (table->icm[i]) {
487*4882a593Smuzhiyun 			mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
488*4882a593Smuzhiyun 				       MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
489*4882a593Smuzhiyun 			mlx4_free_icm(dev, table->icm[i], table->coherent);
490*4882a593Smuzhiyun 		}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	kvfree(table->icm);
493*4882a593Smuzhiyun }
494