xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/mthca/mthca_av.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3*4882a593Smuzhiyun  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun  * OpenIB.org BSD license below:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
13*4882a593Smuzhiyun  *     conditions are met:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
16*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun  *        disclaimer.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun  *        provided with the distribution.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun  * SOFTWARE.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <linux/string.h>
35*4882a593Smuzhiyun #include <linux/slab.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
38*4882a593Smuzhiyun #include <rdma/ib_cache.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include "mthca_dev.h"
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun enum {
43*4882a593Smuzhiyun       MTHCA_RATE_TAVOR_FULL   = 0,
44*4882a593Smuzhiyun       MTHCA_RATE_TAVOR_1X     = 1,
45*4882a593Smuzhiyun       MTHCA_RATE_TAVOR_4X     = 2,
46*4882a593Smuzhiyun       MTHCA_RATE_TAVOR_1X_DDR = 3
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun enum {
50*4882a593Smuzhiyun       MTHCA_RATE_MEMFREE_FULL    = 0,
51*4882a593Smuzhiyun       MTHCA_RATE_MEMFREE_QUARTER = 1,
52*4882a593Smuzhiyun       MTHCA_RATE_MEMFREE_EIGHTH  = 2,
53*4882a593Smuzhiyun       MTHCA_RATE_MEMFREE_HALF    = 3
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct mthca_av {
57*4882a593Smuzhiyun 	__be32 port_pd;
58*4882a593Smuzhiyun 	u8     reserved1;
59*4882a593Smuzhiyun 	u8     g_slid;
60*4882a593Smuzhiyun 	__be16 dlid;
61*4882a593Smuzhiyun 	u8     reserved2;
62*4882a593Smuzhiyun 	u8     gid_index;
63*4882a593Smuzhiyun 	u8     msg_sr;
64*4882a593Smuzhiyun 	u8     hop_limit;
65*4882a593Smuzhiyun 	__be32 sl_tclass_flowlabel;
66*4882a593Smuzhiyun 	__be32 dgid[4];
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
memfree_rate_to_ib(u8 mthca_rate,u8 port_rate)69*4882a593Smuzhiyun static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	switch (mthca_rate) {
72*4882a593Smuzhiyun 	case MTHCA_RATE_MEMFREE_EIGHTH:
73*4882a593Smuzhiyun 		return mult_to_ib_rate(port_rate >> 3);
74*4882a593Smuzhiyun 	case MTHCA_RATE_MEMFREE_QUARTER:
75*4882a593Smuzhiyun 		return mult_to_ib_rate(port_rate >> 2);
76*4882a593Smuzhiyun 	case MTHCA_RATE_MEMFREE_HALF:
77*4882a593Smuzhiyun 		return mult_to_ib_rate(port_rate >> 1);
78*4882a593Smuzhiyun 	case MTHCA_RATE_MEMFREE_FULL:
79*4882a593Smuzhiyun 	default:
80*4882a593Smuzhiyun 		return mult_to_ib_rate(port_rate);
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
tavor_rate_to_ib(u8 mthca_rate,u8 port_rate)84*4882a593Smuzhiyun static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	switch (mthca_rate) {
87*4882a593Smuzhiyun 	case MTHCA_RATE_TAVOR_1X:     return IB_RATE_2_5_GBPS;
88*4882a593Smuzhiyun 	case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
89*4882a593Smuzhiyun 	case MTHCA_RATE_TAVOR_4X:     return IB_RATE_10_GBPS;
90*4882a593Smuzhiyun 	default:		      return mult_to_ib_rate(port_rate);
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
mthca_rate_to_ib(struct mthca_dev * dev,u8 mthca_rate,u8 port)94*4882a593Smuzhiyun enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	if (mthca_is_memfree(dev)) {
97*4882a593Smuzhiyun 		/* Handle old Arbel FW */
98*4882a593Smuzhiyun 		if (dev->limits.stat_rate_support == 0x3 && mthca_rate)
99*4882a593Smuzhiyun 			return IB_RATE_2_5_GBPS;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 		return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]);
102*4882a593Smuzhiyun 	} else
103*4882a593Smuzhiyun 		return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
ib_rate_to_memfree(u8 req_rate,u8 cur_rate)106*4882a593Smuzhiyun static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	if (cur_rate <= req_rate)
109*4882a593Smuzhiyun 		return 0;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/*
112*4882a593Smuzhiyun 	 * Inter-packet delay (IPD) to get from rate X down to a rate
113*4882a593Smuzhiyun 	 * no more than Y is (X - 1) / Y.
114*4882a593Smuzhiyun 	 */
115*4882a593Smuzhiyun 	switch ((cur_rate - 1) / req_rate) {
116*4882a593Smuzhiyun 	case 0:	 return MTHCA_RATE_MEMFREE_FULL;
117*4882a593Smuzhiyun 	case 1:	 return MTHCA_RATE_MEMFREE_HALF;
118*4882a593Smuzhiyun 	case 2:
119*4882a593Smuzhiyun 	case 3:	 return MTHCA_RATE_MEMFREE_QUARTER;
120*4882a593Smuzhiyun 	default: return MTHCA_RATE_MEMFREE_EIGHTH;
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
ib_rate_to_tavor(u8 static_rate)124*4882a593Smuzhiyun static u8 ib_rate_to_tavor(u8 static_rate)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	switch (static_rate) {
127*4882a593Smuzhiyun 	case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
128*4882a593Smuzhiyun 	case IB_RATE_5_GBPS:   return MTHCA_RATE_TAVOR_1X_DDR;
129*4882a593Smuzhiyun 	case IB_RATE_10_GBPS:  return MTHCA_RATE_TAVOR_4X;
130*4882a593Smuzhiyun 	default:	       return MTHCA_RATE_TAVOR_FULL;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
mthca_get_rate(struct mthca_dev * dev,int static_rate,u8 port)134*4882a593Smuzhiyun u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	u8 rate;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1])
139*4882a593Smuzhiyun 		return 0;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (mthca_is_memfree(dev))
142*4882a593Smuzhiyun 		rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate),
143*4882a593Smuzhiyun 					  dev->rate[port - 1]);
144*4882a593Smuzhiyun 	else
145*4882a593Smuzhiyun 		rate = ib_rate_to_tavor(static_rate);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (!(dev->limits.stat_rate_support & (1 << rate)))
148*4882a593Smuzhiyun 		rate = 1;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	return rate;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
mthca_create_ah(struct mthca_dev * dev,struct mthca_pd * pd,struct rdma_ah_attr * ah_attr,struct mthca_ah * ah)153*4882a593Smuzhiyun int mthca_create_ah(struct mthca_dev *dev,
154*4882a593Smuzhiyun 		    struct mthca_pd *pd,
155*4882a593Smuzhiyun 		    struct rdma_ah_attr *ah_attr,
156*4882a593Smuzhiyun 		    struct mthca_ah *ah)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	u32 index = -1;
159*4882a593Smuzhiyun 	struct mthca_av *av = NULL;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	ah->type = MTHCA_AH_PCI_POOL;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (mthca_is_memfree(dev)) {
164*4882a593Smuzhiyun 		ah->av   = kmalloc(sizeof *ah->av, GFP_ATOMIC);
165*4882a593Smuzhiyun 		if (!ah->av)
166*4882a593Smuzhiyun 			return -ENOMEM;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		ah->type = MTHCA_AH_KMALLOC;
169*4882a593Smuzhiyun 		av       = ah->av;
170*4882a593Smuzhiyun 	} else if (!atomic_read(&pd->sqp_count) &&
171*4882a593Smuzhiyun 		 !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
172*4882a593Smuzhiyun 		index = mthca_alloc(&dev->av_table.alloc);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 		/* fall back to allocate in host memory */
175*4882a593Smuzhiyun 		if (index == -1)
176*4882a593Smuzhiyun 			goto on_hca_fail;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 		av = kmalloc(sizeof *av, GFP_ATOMIC);
179*4882a593Smuzhiyun 		if (!av)
180*4882a593Smuzhiyun 			goto on_hca_fail;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		ah->type = MTHCA_AH_ON_HCA;
183*4882a593Smuzhiyun 		ah->avdma  = dev->av_table.ddr_av_base +
184*4882a593Smuzhiyun 			index * MTHCA_AV_SIZE;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun on_hca_fail:
188*4882a593Smuzhiyun 	if (ah->type == MTHCA_AH_PCI_POOL) {
189*4882a593Smuzhiyun 		ah->av = dma_pool_zalloc(dev->av_table.pool,
190*4882a593Smuzhiyun 					 GFP_ATOMIC, &ah->avdma);
191*4882a593Smuzhiyun 		if (!ah->av)
192*4882a593Smuzhiyun 			return -ENOMEM;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		av = ah->av;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	ah->key = pd->ntmr.ibmr.lkey;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	av->port_pd = cpu_to_be32(pd->pd_num |
200*4882a593Smuzhiyun 				  (rdma_ah_get_port_num(ah_attr) << 24));
201*4882a593Smuzhiyun 	av->g_slid  = rdma_ah_get_path_bits(ah_attr);
202*4882a593Smuzhiyun 	av->dlid    = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
203*4882a593Smuzhiyun 	av->msg_sr  = (3 << 4) | /* 2K message */
204*4882a593Smuzhiyun 		mthca_get_rate(dev, rdma_ah_get_static_rate(ah_attr),
205*4882a593Smuzhiyun 			       rdma_ah_get_port_num(ah_attr));
206*4882a593Smuzhiyun 	av->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28);
207*4882a593Smuzhiyun 	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
208*4882a593Smuzhiyun 		const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		av->g_slid |= 0x80;
211*4882a593Smuzhiyun 		av->gid_index = (rdma_ah_get_port_num(ah_attr) - 1) *
212*4882a593Smuzhiyun 				  dev->limits.gid_table_len +
213*4882a593Smuzhiyun 				  grh->sgid_index;
214*4882a593Smuzhiyun 		av->hop_limit = grh->hop_limit;
215*4882a593Smuzhiyun 		av->sl_tclass_flowlabel |=
216*4882a593Smuzhiyun 			cpu_to_be32((grh->traffic_class << 20) |
217*4882a593Smuzhiyun 				    grh->flow_label);
218*4882a593Smuzhiyun 		memcpy(av->dgid, grh->dgid.raw, 16);
219*4882a593Smuzhiyun 	} else {
220*4882a593Smuzhiyun 		/* Arbel workaround -- low byte of GID must be 2 */
221*4882a593Smuzhiyun 		av->dgid[3] = cpu_to_be32(2);
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (0) {
225*4882a593Smuzhiyun 		int j;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		mthca_dbg(dev, "Created UDAV at %p/%08lx:\n",
228*4882a593Smuzhiyun 			  av, (unsigned long) ah->avdma);
229*4882a593Smuzhiyun 		for (j = 0; j < 8; ++j)
230*4882a593Smuzhiyun 			printk(KERN_DEBUG "  [%2x] %08x\n",
231*4882a593Smuzhiyun 			       j * 4, be32_to_cpu(((__be32 *) av)[j]));
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (ah->type == MTHCA_AH_ON_HCA) {
235*4882a593Smuzhiyun 		memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE,
236*4882a593Smuzhiyun 			    av, MTHCA_AV_SIZE);
237*4882a593Smuzhiyun 		kfree(av);
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	return 0;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
mthca_destroy_ah(struct mthca_dev * dev,struct mthca_ah * ah)243*4882a593Smuzhiyun int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	switch (ah->type) {
246*4882a593Smuzhiyun 	case MTHCA_AH_ON_HCA:
247*4882a593Smuzhiyun 		mthca_free(&dev->av_table.alloc,
248*4882a593Smuzhiyun 			   (ah->avdma - dev->av_table.ddr_av_base) /
249*4882a593Smuzhiyun 			   MTHCA_AV_SIZE);
250*4882a593Smuzhiyun 		break;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	case MTHCA_AH_PCI_POOL:
253*4882a593Smuzhiyun 		dma_pool_free(dev->av_table.pool, ah->av, ah->avdma);
254*4882a593Smuzhiyun 		break;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	case MTHCA_AH_KMALLOC:
257*4882a593Smuzhiyun 		kfree(ah->av);
258*4882a593Smuzhiyun 		break;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
mthca_ah_grh_present(struct mthca_ah * ah)264*4882a593Smuzhiyun int mthca_ah_grh_present(struct mthca_ah *ah)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	return !!(ah->av->g_slid & 0x80);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
mthca_read_ah(struct mthca_dev * dev,struct mthca_ah * ah,struct ib_ud_header * header)269*4882a593Smuzhiyun int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
270*4882a593Smuzhiyun 		  struct ib_ud_header *header)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	if (ah->type == MTHCA_AH_ON_HCA)
273*4882a593Smuzhiyun 		return -EINVAL;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	header->lrh.service_level   = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
276*4882a593Smuzhiyun 	header->lrh.destination_lid = ah->av->dlid;
277*4882a593Smuzhiyun 	header->lrh.source_lid      = cpu_to_be16(ah->av->g_slid & 0x7f);
278*4882a593Smuzhiyun 	if (mthca_ah_grh_present(ah)) {
279*4882a593Smuzhiyun 		header->grh.traffic_class =
280*4882a593Smuzhiyun 			(be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
281*4882a593Smuzhiyun 		header->grh.flow_label    =
282*4882a593Smuzhiyun 			ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
283*4882a593Smuzhiyun 		header->grh.hop_limit     = ah->av->hop_limit;
284*4882a593Smuzhiyun 		header->grh.source_gid = ah->ibah.sgid_attr->gid;
285*4882a593Smuzhiyun 		memcpy(header->grh.destination_gid.raw,
286*4882a593Smuzhiyun 		       ah->av->dgid, 16);
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	return 0;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
mthca_ah_query(struct ib_ah * ibah,struct rdma_ah_attr * attr)292*4882a593Smuzhiyun int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	struct mthca_ah *ah   = to_mah(ibah);
295*4882a593Smuzhiyun 	struct mthca_dev *dev = to_mdev(ibah->device);
296*4882a593Smuzhiyun 	u8 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	/* Only implement for MAD and memfree ah for now. */
299*4882a593Smuzhiyun 	if (ah->type == MTHCA_AH_ON_HCA)
300*4882a593Smuzhiyun 		return -ENOSYS;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	memset(attr, 0, sizeof *attr);
303*4882a593Smuzhiyun 	attr->type = ibah->type;
304*4882a593Smuzhiyun 	rdma_ah_set_dlid(attr, be16_to_cpu(ah->av->dlid));
305*4882a593Smuzhiyun 	rdma_ah_set_sl(attr, be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28);
306*4882a593Smuzhiyun 	rdma_ah_set_port_num(attr, port_num);
307*4882a593Smuzhiyun 	rdma_ah_set_static_rate(attr,
308*4882a593Smuzhiyun 				mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
309*4882a593Smuzhiyun 						 port_num));
310*4882a593Smuzhiyun 	rdma_ah_set_path_bits(attr, ah->av->g_slid & 0x7F);
311*4882a593Smuzhiyun 	if (mthca_ah_grh_present(ah)) {
312*4882a593Smuzhiyun 		u32 tc_fl = be32_to_cpu(ah->av->sl_tclass_flowlabel);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		rdma_ah_set_grh(attr, NULL,
315*4882a593Smuzhiyun 				tc_fl & 0xfffff,
316*4882a593Smuzhiyun 				ah->av->gid_index &
317*4882a593Smuzhiyun 				(dev->limits.gid_table_len - 1),
318*4882a593Smuzhiyun 				ah->av->hop_limit,
319*4882a593Smuzhiyun 				(tc_fl >> 20) & 0xff);
320*4882a593Smuzhiyun 		rdma_ah_set_dgid_raw(attr, ah->av->dgid);
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
mthca_init_av_table(struct mthca_dev * dev)326*4882a593Smuzhiyun int mthca_init_av_table(struct mthca_dev *dev)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	int err;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (mthca_is_memfree(dev))
331*4882a593Smuzhiyun 		return 0;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	err = mthca_alloc_init(&dev->av_table.alloc,
334*4882a593Smuzhiyun 			       dev->av_table.num_ddr_avs,
335*4882a593Smuzhiyun 			       dev->av_table.num_ddr_avs - 1,
336*4882a593Smuzhiyun 			       0);
337*4882a593Smuzhiyun 	if (err)
338*4882a593Smuzhiyun 		return err;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	dev->av_table.pool = dma_pool_create("mthca_av", &dev->pdev->dev,
341*4882a593Smuzhiyun 					     MTHCA_AV_SIZE,
342*4882a593Smuzhiyun 					     MTHCA_AV_SIZE, 0);
343*4882a593Smuzhiyun 	if (!dev->av_table.pool)
344*4882a593Smuzhiyun 		goto out_free_alloc;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
347*4882a593Smuzhiyun 		dev->av_table.av_map = ioremap(pci_resource_start(dev->pdev, 4) +
348*4882a593Smuzhiyun 					       dev->av_table.ddr_av_base -
349*4882a593Smuzhiyun 					       dev->ddr_start,
350*4882a593Smuzhiyun 					       dev->av_table.num_ddr_avs *
351*4882a593Smuzhiyun 					       MTHCA_AV_SIZE);
352*4882a593Smuzhiyun 		if (!dev->av_table.av_map)
353*4882a593Smuzhiyun 			goto out_free_pool;
354*4882a593Smuzhiyun 	} else
355*4882a593Smuzhiyun 		dev->av_table.av_map = NULL;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	return 0;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun  out_free_pool:
360*4882a593Smuzhiyun 	dma_pool_destroy(dev->av_table.pool);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun  out_free_alloc:
363*4882a593Smuzhiyun 	mthca_alloc_cleanup(&dev->av_table.alloc);
364*4882a593Smuzhiyun 	return -ENOMEM;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
mthca_cleanup_av_table(struct mthca_dev * dev)367*4882a593Smuzhiyun void mthca_cleanup_av_table(struct mthca_dev *dev)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	if (mthca_is_memfree(dev))
370*4882a593Smuzhiyun 		return;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (dev->av_table.av_map)
373*4882a593Smuzhiyun 		iounmap(dev->av_table.av_map);
374*4882a593Smuzhiyun 	dma_pool_destroy(dev->av_table.pool);
375*4882a593Smuzhiyun 	mthca_alloc_cleanup(&dev->av_table.alloc);
376*4882a593Smuzhiyun }
377