xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/mlx4/alias_GUID.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *        disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *        provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun  /***********************************************************/
33*4882a593Smuzhiyun /*This file support the handling of the Alias GUID feature. */
34*4882a593Smuzhiyun /***********************************************************/
35*4882a593Smuzhiyun #include <rdma/ib_mad.h>
36*4882a593Smuzhiyun #include <rdma/ib_smi.h>
37*4882a593Smuzhiyun #include <rdma/ib_cache.h>
38*4882a593Smuzhiyun #include <rdma/ib_sa.h>
39*4882a593Smuzhiyun #include <rdma/ib_pack.h>
40*4882a593Smuzhiyun #include <linux/mlx4/cmd.h>
41*4882a593Smuzhiyun #include <linux/module.h>
42*4882a593Smuzhiyun #include <linux/init.h>
43*4882a593Smuzhiyun #include <linux/errno.h>
44*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
45*4882a593Smuzhiyun #include <linux/delay.h>
46*4882a593Smuzhiyun #include "mlx4_ib.h"
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun The driver keeps the current state of all guids, as they are in the HW.
50*4882a593Smuzhiyun Whenever we receive an smp mad GUIDInfo record, the data will be cached.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun struct mlx4_alias_guid_work_context {
54*4882a593Smuzhiyun 	u8 port;
55*4882a593Smuzhiyun 	struct mlx4_ib_dev     *dev ;
56*4882a593Smuzhiyun 	struct ib_sa_query     *sa_query;
57*4882a593Smuzhiyun 	struct completion	done;
58*4882a593Smuzhiyun 	int			query_id;
59*4882a593Smuzhiyun 	struct list_head	list;
60*4882a593Smuzhiyun 	int			block_num;
61*4882a593Smuzhiyun 	ib_sa_comp_mask		guid_indexes;
62*4882a593Smuzhiyun 	u8			method;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun struct mlx4_next_alias_guid_work {
66*4882a593Smuzhiyun 	u8 port;
67*4882a593Smuzhiyun 	u8 block_num;
68*4882a593Smuzhiyun 	u8 method;
69*4882a593Smuzhiyun 	struct mlx4_sriov_alias_guid_info_rec_det rec_det;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
73*4882a593Smuzhiyun 				     int *resched_delay_sec);
74*4882a593Smuzhiyun 
mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev * dev,int block_num,u8 port_num,u8 * p_data)75*4882a593Smuzhiyun void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
76*4882a593Smuzhiyun 					 u8 port_num, u8 *p_data)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	int i;
79*4882a593Smuzhiyun 	u64 guid_indexes;
80*4882a593Smuzhiyun 	int slave_id;
81*4882a593Smuzhiyun 	int port_index = port_num - 1;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (!mlx4_is_master(dev->dev))
84*4882a593Smuzhiyun 		return;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
87*4882a593Smuzhiyun 				   ports_guid[port_num - 1].
88*4882a593Smuzhiyun 				   all_rec_per_port[block_num].guid_indexes);
89*4882a593Smuzhiyun 	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
92*4882a593Smuzhiyun 		/* The location of the specific index starts from bit number 4
93*4882a593Smuzhiyun 		 * until bit num 11 */
94*4882a593Smuzhiyun 		if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
95*4882a593Smuzhiyun 			slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
96*4882a593Smuzhiyun 			if (slave_id >= dev->dev->num_slaves) {
97*4882a593Smuzhiyun 				pr_debug("The last slave: %d\n", slave_id);
98*4882a593Smuzhiyun 				return;
99*4882a593Smuzhiyun 			}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 			/* cache the guid: */
102*4882a593Smuzhiyun 			memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
103*4882a593Smuzhiyun 			       &p_data[i * GUID_REC_SIZE],
104*4882a593Smuzhiyun 			       GUID_REC_SIZE);
105*4882a593Smuzhiyun 		} else
106*4882a593Smuzhiyun 			pr_debug("Guid number: %d in block: %d"
107*4882a593Smuzhiyun 				 " was not updated\n", i, block_num);
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
get_cached_alias_guid(struct mlx4_ib_dev * dev,int port,int index)111*4882a593Smuzhiyun static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	if (index >= NUM_ALIAS_GUID_PER_PORT) {
114*4882a593Smuzhiyun 		pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
115*4882a593Smuzhiyun 		return (__force __be64) -1;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 	return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 
mlx4_ib_get_aguid_comp_mask_from_ix(int index)121*4882a593Smuzhiyun ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	return IB_SA_COMP_MASK(4 + index);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev * dev,int slave,int port,int slave_init)126*4882a593Smuzhiyun void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
127*4882a593Smuzhiyun 				    int port,  int slave_init)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	__be64 curr_guid, required_guid;
130*4882a593Smuzhiyun 	int record_num = slave / 8;
131*4882a593Smuzhiyun 	int index = slave % 8;
132*4882a593Smuzhiyun 	int port_index = port - 1;
133*4882a593Smuzhiyun 	unsigned long flags;
134*4882a593Smuzhiyun 	int do_work = 0;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
137*4882a593Smuzhiyun 	if (dev->sriov.alias_guid.ports_guid[port_index].state_flags &
138*4882a593Smuzhiyun 	    GUID_STATE_NEED_PORT_INIT)
139*4882a593Smuzhiyun 		goto unlock;
140*4882a593Smuzhiyun 	if (!slave_init) {
141*4882a593Smuzhiyun 		curr_guid = *(__be64 *)&dev->sriov.
142*4882a593Smuzhiyun 			alias_guid.ports_guid[port_index].
143*4882a593Smuzhiyun 			all_rec_per_port[record_num].
144*4882a593Smuzhiyun 			all_recs[GUID_REC_SIZE * index];
145*4882a593Smuzhiyun 		if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) ||
146*4882a593Smuzhiyun 		    !curr_guid)
147*4882a593Smuzhiyun 			goto unlock;
148*4882a593Smuzhiyun 		required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
149*4882a593Smuzhiyun 	} else {
150*4882a593Smuzhiyun 		required_guid = mlx4_get_admin_guid(dev->dev, slave, port);
151*4882a593Smuzhiyun 		if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
152*4882a593Smuzhiyun 			goto unlock;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 	*(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index].
155*4882a593Smuzhiyun 		all_rec_per_port[record_num].
156*4882a593Smuzhiyun 		all_recs[GUID_REC_SIZE * index] = required_guid;
157*4882a593Smuzhiyun 	dev->sriov.alias_guid.ports_guid[port_index].
158*4882a593Smuzhiyun 		all_rec_per_port[record_num].guid_indexes
159*4882a593Smuzhiyun 		|= mlx4_ib_get_aguid_comp_mask_from_ix(index);
160*4882a593Smuzhiyun 	dev->sriov.alias_guid.ports_guid[port_index].
161*4882a593Smuzhiyun 		all_rec_per_port[record_num].status
162*4882a593Smuzhiyun 		= MLX4_GUID_INFO_STATUS_IDLE;
163*4882a593Smuzhiyun 	/* set to run immediately */
164*4882a593Smuzhiyun 	dev->sriov.alias_guid.ports_guid[port_index].
165*4882a593Smuzhiyun 		all_rec_per_port[record_num].time_to_run = 0;
166*4882a593Smuzhiyun 	dev->sriov.alias_guid.ports_guid[port_index].
167*4882a593Smuzhiyun 		all_rec_per_port[record_num].
168*4882a593Smuzhiyun 		guids_retry_schedule[index] = 0;
169*4882a593Smuzhiyun 	do_work = 1;
170*4882a593Smuzhiyun unlock:
171*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	if (do_work)
174*4882a593Smuzhiyun 		mlx4_ib_init_alias_guid_work(dev, port_index);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun  * Whenever new GUID is set/unset (guid table change) create event and
179*4882a593Smuzhiyun  * notify the relevant slave (master also should be notified).
180*4882a593Smuzhiyun  * If the GUID value is not as we have in the cache the slave will not be
181*4882a593Smuzhiyun  * updated; in this case it waits for the smp_snoop or the port management
182*4882a593Smuzhiyun  * event to call the function and to update the slave.
183*4882a593Smuzhiyun  * block_number - the index of the block (16 blocks available)
184*4882a593Smuzhiyun  * port_number - 1 or 2
185*4882a593Smuzhiyun  */
mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev * dev,int block_num,u8 port_num,u8 * p_data)186*4882a593Smuzhiyun void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
187*4882a593Smuzhiyun 					  int block_num, u8 port_num,
188*4882a593Smuzhiyun 					  u8 *p_data)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	int i;
191*4882a593Smuzhiyun 	u64 guid_indexes;
192*4882a593Smuzhiyun 	int slave_id, slave_port;
193*4882a593Smuzhiyun 	enum slave_port_state new_state;
194*4882a593Smuzhiyun 	enum slave_port_state prev_state;
195*4882a593Smuzhiyun 	__be64 tmp_cur_ag, form_cache_ag;
196*4882a593Smuzhiyun 	enum slave_port_gen_event gen_event;
197*4882a593Smuzhiyun 	struct mlx4_sriov_alias_guid_info_rec_det *rec;
198*4882a593Smuzhiyun 	unsigned long flags;
199*4882a593Smuzhiyun 	__be64 required_value;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (!mlx4_is_master(dev->dev))
202*4882a593Smuzhiyun 		return;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	rec = &dev->sriov.alias_guid.ports_guid[port_num - 1].
205*4882a593Smuzhiyun 			all_rec_per_port[block_num];
206*4882a593Smuzhiyun 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
207*4882a593Smuzhiyun 				   ports_guid[port_num - 1].
208*4882a593Smuzhiyun 				   all_rec_per_port[block_num].guid_indexes);
209*4882a593Smuzhiyun 	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num, guid_indexes);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/*calculate the slaves and notify them*/
212*4882a593Smuzhiyun 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
213*4882a593Smuzhiyun 		/* the location of the specific index runs from bits 4..11 */
214*4882a593Smuzhiyun 		if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
215*4882a593Smuzhiyun 			continue;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
218*4882a593Smuzhiyun 		if (slave_id >= dev->dev->persist->num_vfs + 1)
219*4882a593Smuzhiyun 			return;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num);
222*4882a593Smuzhiyun 		if (slave_port < 0) /* this port isn't available for the VF */
223*4882a593Smuzhiyun 			continue;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
226*4882a593Smuzhiyun 		form_cache_ag = get_cached_alias_guid(dev, port_num,
227*4882a593Smuzhiyun 					(NUM_ALIAS_GUID_IN_REC * block_num) + i);
228*4882a593Smuzhiyun 		/*
229*4882a593Smuzhiyun 		 * Check if guid is not the same as in the cache,
230*4882a593Smuzhiyun 		 * If it is different, wait for the snoop_smp or the port mgmt
231*4882a593Smuzhiyun 		 * change event to update the slave on its port state change
232*4882a593Smuzhiyun 		 */
233*4882a593Smuzhiyun 		if (tmp_cur_ag != form_cache_ag)
234*4882a593Smuzhiyun 			continue;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
237*4882a593Smuzhiyun 		required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
240*4882a593Smuzhiyun 			required_value = 0;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 		if (tmp_cur_ag == required_value) {
243*4882a593Smuzhiyun 			rec->guid_indexes = rec->guid_indexes &
244*4882a593Smuzhiyun 			       ~mlx4_ib_get_aguid_comp_mask_from_ix(i);
245*4882a593Smuzhiyun 		} else {
246*4882a593Smuzhiyun 			/* may notify port down if value is 0 */
247*4882a593Smuzhiyun 			if (tmp_cur_ag != MLX4_NOT_SET_GUID) {
248*4882a593Smuzhiyun 				spin_unlock_irqrestore(&dev->sriov.
249*4882a593Smuzhiyun 					alias_guid.ag_work_lock, flags);
250*4882a593Smuzhiyun 				continue;
251*4882a593Smuzhiyun 			}
252*4882a593Smuzhiyun 		}
253*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock,
254*4882a593Smuzhiyun 				       flags);
255*4882a593Smuzhiyun 		mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
256*4882a593Smuzhiyun 		/*2 cases: Valid GUID, and Invalid Guid*/
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 		if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
259*4882a593Smuzhiyun 			prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
260*4882a593Smuzhiyun 			new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
261*4882a593Smuzhiyun 								  MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
262*4882a593Smuzhiyun 								  &gen_event);
263*4882a593Smuzhiyun 			pr_debug("slave: %d, port: %d prev_port_state: %d,"
264*4882a593Smuzhiyun 				 " new_port_state: %d, gen_event: %d\n",
265*4882a593Smuzhiyun 				 slave_id, port_num, prev_state, new_state, gen_event);
266*4882a593Smuzhiyun 			if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
267*4882a593Smuzhiyun 				pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
268*4882a593Smuzhiyun 					 slave_id, port_num);
269*4882a593Smuzhiyun 				mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
270*4882a593Smuzhiyun 							       port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
271*4882a593Smuzhiyun 			}
272*4882a593Smuzhiyun 		} else { /* request to invalidate GUID */
273*4882a593Smuzhiyun 			set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
274*4882a593Smuzhiyun 						      MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
275*4882a593Smuzhiyun 						      &gen_event);
276*4882a593Smuzhiyun 			if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
277*4882a593Smuzhiyun 				pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
278*4882a593Smuzhiyun 					 slave_id, port_num);
279*4882a593Smuzhiyun 				mlx4_gen_port_state_change_eqe(dev->dev,
280*4882a593Smuzhiyun 							       slave_id,
281*4882a593Smuzhiyun 							       port_num,
282*4882a593Smuzhiyun 							       MLX4_PORT_CHANGE_SUBTYPE_DOWN);
283*4882a593Smuzhiyun 			}
284*4882a593Smuzhiyun 		}
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
aliasguid_query_handler(int status,struct ib_sa_guidinfo_rec * guid_rec,void * context)288*4882a593Smuzhiyun static void aliasguid_query_handler(int status,
289*4882a593Smuzhiyun 				    struct ib_sa_guidinfo_rec *guid_rec,
290*4882a593Smuzhiyun 				    void *context)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	struct mlx4_ib_dev *dev;
293*4882a593Smuzhiyun 	struct mlx4_alias_guid_work_context *cb_ctx = context;
294*4882a593Smuzhiyun 	u8 port_index ;
295*4882a593Smuzhiyun 	int i;
296*4882a593Smuzhiyun 	struct mlx4_sriov_alias_guid_info_rec_det *rec;
297*4882a593Smuzhiyun 	unsigned long flags, flags1;
298*4882a593Smuzhiyun 	ib_sa_comp_mask declined_guid_indexes = 0;
299*4882a593Smuzhiyun 	ib_sa_comp_mask applied_guid_indexes = 0;
300*4882a593Smuzhiyun 	unsigned int resched_delay_sec = 0;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (!context)
303*4882a593Smuzhiyun 		return;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	dev = cb_ctx->dev;
306*4882a593Smuzhiyun 	port_index = cb_ctx->port - 1;
307*4882a593Smuzhiyun 	rec = &dev->sriov.alias_guid.ports_guid[port_index].
308*4882a593Smuzhiyun 		all_rec_per_port[cb_ctx->block_num];
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (status) {
311*4882a593Smuzhiyun 		pr_debug("(port: %d) failed: status = %d\n",
312*4882a593Smuzhiyun 			 cb_ctx->port, status);
313*4882a593Smuzhiyun 		rec->time_to_run = ktime_get_boottime_ns() + 1 * NSEC_PER_SEC;
314*4882a593Smuzhiyun 		goto out;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	if (guid_rec->block_num != cb_ctx->block_num) {
318*4882a593Smuzhiyun 		pr_err("block num mismatch: %d != %d\n",
319*4882a593Smuzhiyun 		       cb_ctx->block_num, guid_rec->block_num);
320*4882a593Smuzhiyun 		goto out;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	pr_debug("lid/port: %d/%d, block_num: %d\n",
324*4882a593Smuzhiyun 		 be16_to_cpu(guid_rec->lid), cb_ctx->port,
325*4882a593Smuzhiyun 		 guid_rec->block_num);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	rec = &dev->sriov.alias_guid.ports_guid[port_index].
328*4882a593Smuzhiyun 		all_rec_per_port[guid_rec->block_num];
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
331*4882a593Smuzhiyun 	for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
332*4882a593Smuzhiyun 		__be64 sm_response, required_val;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		if (!(cb_ctx->guid_indexes &
335*4882a593Smuzhiyun 			mlx4_ib_get_aguid_comp_mask_from_ix(i)))
336*4882a593Smuzhiyun 			continue;
337*4882a593Smuzhiyun 		sm_response = *(__be64 *)&guid_rec->guid_info_list
338*4882a593Smuzhiyun 				[i * GUID_REC_SIZE];
339*4882a593Smuzhiyun 		required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
340*4882a593Smuzhiyun 		if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) {
341*4882a593Smuzhiyun 			if (required_val ==
342*4882a593Smuzhiyun 			    cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
343*4882a593Smuzhiyun 				goto next_entry;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 			/* A new value was set till we got the response */
346*4882a593Smuzhiyun 			pr_debug("need to set new value %llx, record num %d, block_num:%d\n",
347*4882a593Smuzhiyun 				 be64_to_cpu(required_val),
348*4882a593Smuzhiyun 				 i, guid_rec->block_num);
349*4882a593Smuzhiyun 			goto entry_declined;
350*4882a593Smuzhiyun 		}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 		/* check if the SM didn't assign one of the records.
353*4882a593Smuzhiyun 		 * if it didn't, re-ask for.
354*4882a593Smuzhiyun 		 */
355*4882a593Smuzhiyun 		if (sm_response == MLX4_NOT_SET_GUID) {
356*4882a593Smuzhiyun 			if (rec->guids_retry_schedule[i] == 0)
357*4882a593Smuzhiyun 				mlx4_ib_warn(&dev->ib_dev,
358*4882a593Smuzhiyun 					     "%s:Record num %d in  block_num: %d was declined by SM\n",
359*4882a593Smuzhiyun 					     __func__, i,
360*4882a593Smuzhiyun 					     guid_rec->block_num);
361*4882a593Smuzhiyun 			goto entry_declined;
362*4882a593Smuzhiyun 		} else {
363*4882a593Smuzhiyun 		       /* properly assigned record. */
364*4882a593Smuzhiyun 		       /* We save the GUID we just got from the SM in the
365*4882a593Smuzhiyun 			* admin_guid in order to be persistent, and in the
366*4882a593Smuzhiyun 			* request from the sm the process will ask for the same GUID */
367*4882a593Smuzhiyun 			if (required_val &&
368*4882a593Smuzhiyun 			    sm_response != required_val) {
369*4882a593Smuzhiyun 				/* Warn only on first retry */
370*4882a593Smuzhiyun 				if (rec->guids_retry_schedule[i] == 0)
371*4882a593Smuzhiyun 					mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
372*4882a593Smuzhiyun 						     " admin guid after SysAdmin "
373*4882a593Smuzhiyun 						     "configuration. "
374*4882a593Smuzhiyun 						     "Record num %d in block_num:%d "
375*4882a593Smuzhiyun 						     "was declined by SM, "
376*4882a593Smuzhiyun 						     "new val(0x%llx) was kept, SM returned (0x%llx)\n",
377*4882a593Smuzhiyun 						      __func__, i,
378*4882a593Smuzhiyun 						     guid_rec->block_num,
379*4882a593Smuzhiyun 						     be64_to_cpu(required_val),
380*4882a593Smuzhiyun 						     be64_to_cpu(sm_response));
381*4882a593Smuzhiyun 				goto entry_declined;
382*4882a593Smuzhiyun 			} else {
383*4882a593Smuzhiyun 				*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
384*4882a593Smuzhiyun 					sm_response;
385*4882a593Smuzhiyun 				if (required_val == 0)
386*4882a593Smuzhiyun 					mlx4_set_admin_guid(dev->dev,
387*4882a593Smuzhiyun 							    sm_response,
388*4882a593Smuzhiyun 							    (guid_rec->block_num
389*4882a593Smuzhiyun 							    * NUM_ALIAS_GUID_IN_REC) + i,
390*4882a593Smuzhiyun 							    cb_ctx->port);
391*4882a593Smuzhiyun 				goto next_entry;
392*4882a593Smuzhiyun 			}
393*4882a593Smuzhiyun 		}
394*4882a593Smuzhiyun entry_declined:
395*4882a593Smuzhiyun 		declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
396*4882a593Smuzhiyun 		rec->guids_retry_schedule[i] =
397*4882a593Smuzhiyun 			(rec->guids_retry_schedule[i] == 0) ?  1 :
398*4882a593Smuzhiyun 			min((unsigned int)60,
399*4882a593Smuzhiyun 			    rec->guids_retry_schedule[i] * 2);
400*4882a593Smuzhiyun 		/* using the minimum value among all entries in that record */
401*4882a593Smuzhiyun 		resched_delay_sec = (resched_delay_sec == 0) ?
402*4882a593Smuzhiyun 				rec->guids_retry_schedule[i] :
403*4882a593Smuzhiyun 				min(resched_delay_sec,
404*4882a593Smuzhiyun 				    rec->guids_retry_schedule[i]);
405*4882a593Smuzhiyun 		continue;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun next_entry:
408*4882a593Smuzhiyun 		rec->guids_retry_schedule[i] = 0;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	applied_guid_indexes =  cb_ctx->guid_indexes & ~declined_guid_indexes;
412*4882a593Smuzhiyun 	if (declined_guid_indexes ||
413*4882a593Smuzhiyun 	    rec->guid_indexes & ~(applied_guid_indexes)) {
414*4882a593Smuzhiyun 		pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n",
415*4882a593Smuzhiyun 			 guid_rec->block_num,
416*4882a593Smuzhiyun 			 be64_to_cpu((__force __be64)rec->guid_indexes),
417*4882a593Smuzhiyun 			 be64_to_cpu((__force __be64)applied_guid_indexes),
418*4882a593Smuzhiyun 			 be64_to_cpu((__force __be64)declined_guid_indexes));
419*4882a593Smuzhiyun 		rec->time_to_run = ktime_get_boottime_ns() +
420*4882a593Smuzhiyun 			resched_delay_sec * NSEC_PER_SEC;
421*4882a593Smuzhiyun 	} else {
422*4882a593Smuzhiyun 		rec->status = MLX4_GUID_INFO_STATUS_SET;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
425*4882a593Smuzhiyun 	/*
426*4882a593Smuzhiyun 	The func is call here to close the cases when the
427*4882a593Smuzhiyun 	sm doesn't send smp, so in the sa response the driver
428*4882a593Smuzhiyun 	notifies the slave.
429*4882a593Smuzhiyun 	*/
430*4882a593Smuzhiyun 	mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
431*4882a593Smuzhiyun 					     cb_ctx->port,
432*4882a593Smuzhiyun 					     guid_rec->guid_info_list);
433*4882a593Smuzhiyun out:
434*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
435*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
436*4882a593Smuzhiyun 	if (!dev->sriov.is_going_down) {
437*4882a593Smuzhiyun 		get_low_record_time_index(dev, port_index, &resched_delay_sec);
438*4882a593Smuzhiyun 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
439*4882a593Smuzhiyun 				   &dev->sriov.alias_guid.ports_guid[port_index].
440*4882a593Smuzhiyun 				   alias_guid_work,
441*4882a593Smuzhiyun 				   msecs_to_jiffies(resched_delay_sec * 1000));
442*4882a593Smuzhiyun 	}
443*4882a593Smuzhiyun 	if (cb_ctx->sa_query) {
444*4882a593Smuzhiyun 		list_del(&cb_ctx->list);
445*4882a593Smuzhiyun 		kfree(cb_ctx);
446*4882a593Smuzhiyun 	} else
447*4882a593Smuzhiyun 		complete(&cb_ctx->done);
448*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
449*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
invalidate_guid_record(struct mlx4_ib_dev * dev,u8 port,int index)452*4882a593Smuzhiyun static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	int i;
455*4882a593Smuzhiyun 	u64 cur_admin_val;
456*4882a593Smuzhiyun 	ib_sa_comp_mask comp_mask = 0;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
459*4882a593Smuzhiyun 		= MLX4_GUID_INFO_STATUS_SET;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	/* calculate the comp_mask for that record.*/
462*4882a593Smuzhiyun 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
463*4882a593Smuzhiyun 		cur_admin_val =
464*4882a593Smuzhiyun 			*(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
465*4882a593Smuzhiyun 			all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
466*4882a593Smuzhiyun 		/*
467*4882a593Smuzhiyun 		check the admin value: if it's for delete (~00LL) or
468*4882a593Smuzhiyun 		it is the first guid of the first record (hw guid) or
469*4882a593Smuzhiyun 		the records is not in ownership of the sysadmin and the sm doesn't
470*4882a593Smuzhiyun 		need to assign GUIDs, then don't put it up for assignment.
471*4882a593Smuzhiyun 		*/
472*4882a593Smuzhiyun 		if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
473*4882a593Smuzhiyun 		    (!index && !i))
474*4882a593Smuzhiyun 			continue;
475*4882a593Smuzhiyun 		comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 	dev->sriov.alias_guid.ports_guid[port - 1].
478*4882a593Smuzhiyun 		all_rec_per_port[index].guid_indexes |= comp_mask;
479*4882a593Smuzhiyun 	if (dev->sriov.alias_guid.ports_guid[port - 1].
480*4882a593Smuzhiyun 	    all_rec_per_port[index].guid_indexes)
481*4882a593Smuzhiyun 		dev->sriov.alias_guid.ports_guid[port - 1].
482*4882a593Smuzhiyun 		all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
set_guid_rec(struct ib_device * ibdev,struct mlx4_next_alias_guid_work * rec)486*4882a593Smuzhiyun static int set_guid_rec(struct ib_device *ibdev,
487*4882a593Smuzhiyun 			struct mlx4_next_alias_guid_work *rec)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	int err;
490*4882a593Smuzhiyun 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
491*4882a593Smuzhiyun 	struct ib_sa_guidinfo_rec guid_info_rec;
492*4882a593Smuzhiyun 	ib_sa_comp_mask comp_mask;
493*4882a593Smuzhiyun 	struct ib_port_attr attr;
494*4882a593Smuzhiyun 	struct mlx4_alias_guid_work_context *callback_context;
495*4882a593Smuzhiyun 	unsigned long resched_delay, flags, flags1;
496*4882a593Smuzhiyun 	u8 port = rec->port + 1;
497*4882a593Smuzhiyun 	int index = rec->block_num;
498*4882a593Smuzhiyun 	struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det;
499*4882a593Smuzhiyun 	struct list_head *head =
500*4882a593Smuzhiyun 		&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	memset(&attr, 0, sizeof(attr));
503*4882a593Smuzhiyun 	err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
504*4882a593Smuzhiyun 	if (err) {
505*4882a593Smuzhiyun 		pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
506*4882a593Smuzhiyun 			 err, port);
507*4882a593Smuzhiyun 		return err;
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 	/*check the port was configured by the sm, otherwise no need to send */
510*4882a593Smuzhiyun 	if (attr.state != IB_PORT_ACTIVE) {
511*4882a593Smuzhiyun 		pr_debug("port %d not active...rescheduling\n", port);
512*4882a593Smuzhiyun 		resched_delay = 5 * HZ;
513*4882a593Smuzhiyun 		err = -EAGAIN;
514*4882a593Smuzhiyun 		goto new_schedule;
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
518*4882a593Smuzhiyun 	if (!callback_context) {
519*4882a593Smuzhiyun 		err = -ENOMEM;
520*4882a593Smuzhiyun 		resched_delay = HZ * 5;
521*4882a593Smuzhiyun 		goto new_schedule;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 	callback_context->port = port;
524*4882a593Smuzhiyun 	callback_context->dev = dev;
525*4882a593Smuzhiyun 	callback_context->block_num = index;
526*4882a593Smuzhiyun 	callback_context->guid_indexes = rec_det->guid_indexes;
527*4882a593Smuzhiyun 	callback_context->method = rec->method;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	guid_info_rec.lid = ib_lid_be16(attr.lid);
532*4882a593Smuzhiyun 	guid_info_rec.block_num = index;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
535*4882a593Smuzhiyun 	       GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
536*4882a593Smuzhiyun 	comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
537*4882a593Smuzhiyun 		rec_det->guid_indexes;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	init_completion(&callback_context->done);
540*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
541*4882a593Smuzhiyun 	list_add_tail(&callback_context->list, head);
542*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	callback_context->query_id =
545*4882a593Smuzhiyun 		ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
546*4882a593Smuzhiyun 					  ibdev, port, &guid_info_rec,
547*4882a593Smuzhiyun 					  comp_mask, rec->method, 1000,
548*4882a593Smuzhiyun 					  GFP_KERNEL, aliasguid_query_handler,
549*4882a593Smuzhiyun 					  callback_context,
550*4882a593Smuzhiyun 					  &callback_context->sa_query);
551*4882a593Smuzhiyun 	if (callback_context->query_id < 0) {
552*4882a593Smuzhiyun 		pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
553*4882a593Smuzhiyun 			 "%d. will reschedule to the next 1 sec.\n",
554*4882a593Smuzhiyun 			 callback_context->query_id);
555*4882a593Smuzhiyun 		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
556*4882a593Smuzhiyun 		list_del(&callback_context->list);
557*4882a593Smuzhiyun 		kfree(callback_context);
558*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
559*4882a593Smuzhiyun 		resched_delay = 1 * HZ;
560*4882a593Smuzhiyun 		err = -EAGAIN;
561*4882a593Smuzhiyun 		goto new_schedule;
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun 	err = 0;
564*4882a593Smuzhiyun 	goto out;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun new_schedule:
567*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
568*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
569*4882a593Smuzhiyun 	invalidate_guid_record(dev, port, index);
570*4882a593Smuzhiyun 	if (!dev->sriov.is_going_down) {
571*4882a593Smuzhiyun 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
572*4882a593Smuzhiyun 				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
573*4882a593Smuzhiyun 				   resched_delay);
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
576*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun out:
579*4882a593Smuzhiyun 	return err;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
mlx4_ib_guid_port_init(struct mlx4_ib_dev * dev,int port)582*4882a593Smuzhiyun static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	int j, k, entry;
585*4882a593Smuzhiyun 	__be64 guid;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/*Check if the SM doesn't need to assign the GUIDs*/
588*4882a593Smuzhiyun 	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
589*4882a593Smuzhiyun 		for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
590*4882a593Smuzhiyun 			entry = j * NUM_ALIAS_GUID_IN_REC + k;
591*4882a593Smuzhiyun 			/* no request for the 0 entry (hw guid) */
592*4882a593Smuzhiyun 			if (!entry || entry > dev->dev->persist->num_vfs ||
593*4882a593Smuzhiyun 			    !mlx4_is_slave_active(dev->dev, entry))
594*4882a593Smuzhiyun 				continue;
595*4882a593Smuzhiyun 			guid = mlx4_get_admin_guid(dev->dev, entry, port);
596*4882a593Smuzhiyun 			*(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
597*4882a593Smuzhiyun 				all_rec_per_port[j].all_recs
598*4882a593Smuzhiyun 				[GUID_REC_SIZE * k] = guid;
599*4882a593Smuzhiyun 			pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n",
600*4882a593Smuzhiyun 				 entry,
601*4882a593Smuzhiyun 				 be64_to_cpu(guid),
602*4882a593Smuzhiyun 				 port);
603*4882a593Smuzhiyun 		}
604*4882a593Smuzhiyun 	}
605*4882a593Smuzhiyun }
mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev * dev,int port)606*4882a593Smuzhiyun void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	int i;
609*4882a593Smuzhiyun 	unsigned long flags, flags1;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	pr_debug("port %d\n", port);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
614*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags &
617*4882a593Smuzhiyun 		GUID_STATE_NEED_PORT_INIT) {
618*4882a593Smuzhiyun 		mlx4_ib_guid_port_init(dev, port);
619*4882a593Smuzhiyun 		dev->sriov.alias_guid.ports_guid[port - 1].state_flags &=
620*4882a593Smuzhiyun 			(~GUID_STATE_NEED_PORT_INIT);
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 	for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
623*4882a593Smuzhiyun 		invalidate_guid_record(dev, port, i);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
626*4882a593Smuzhiyun 		/*
627*4882a593Smuzhiyun 		make sure no work waits in the queue, if the work is already
628*4882a593Smuzhiyun 		queued(not on the timer) the cancel will fail. That is not a problem
629*4882a593Smuzhiyun 		because we just want the work started.
630*4882a593Smuzhiyun 		*/
631*4882a593Smuzhiyun 		cancel_delayed_work(&dev->sriov.alias_guid.
632*4882a593Smuzhiyun 				      ports_guid[port - 1].alias_guid_work);
633*4882a593Smuzhiyun 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
634*4882a593Smuzhiyun 				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
635*4882a593Smuzhiyun 				   0);
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
638*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
set_required_record(struct mlx4_ib_dev * dev,u8 port,struct mlx4_next_alias_guid_work * next_rec,int record_index)641*4882a593Smuzhiyun static void set_required_record(struct mlx4_ib_dev *dev, u8 port,
642*4882a593Smuzhiyun 				struct mlx4_next_alias_guid_work *next_rec,
643*4882a593Smuzhiyun 				int record_index)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun 	int i;
646*4882a593Smuzhiyun 	int lowset_time_entry = -1;
647*4882a593Smuzhiyun 	int lowest_time = 0;
648*4882a593Smuzhiyun 	ib_sa_comp_mask delete_guid_indexes = 0;
649*4882a593Smuzhiyun 	ib_sa_comp_mask set_guid_indexes = 0;
650*4882a593Smuzhiyun 	struct mlx4_sriov_alias_guid_info_rec_det *rec =
651*4882a593Smuzhiyun 			&dev->sriov.alias_guid.ports_guid[port].
652*4882a593Smuzhiyun 			all_rec_per_port[record_index];
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
655*4882a593Smuzhiyun 		if (!(rec->guid_indexes &
656*4882a593Smuzhiyun 			mlx4_ib_get_aguid_comp_mask_from_ix(i)))
657*4882a593Smuzhiyun 			continue;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 		if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] ==
660*4882a593Smuzhiyun 				cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
661*4882a593Smuzhiyun 			delete_guid_indexes |=
662*4882a593Smuzhiyun 				mlx4_ib_get_aguid_comp_mask_from_ix(i);
663*4882a593Smuzhiyun 		else
664*4882a593Smuzhiyun 			set_guid_indexes |=
665*4882a593Smuzhiyun 				mlx4_ib_get_aguid_comp_mask_from_ix(i);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <=
668*4882a593Smuzhiyun 			lowest_time) {
669*4882a593Smuzhiyun 			lowset_time_entry = i;
670*4882a593Smuzhiyun 			lowest_time = rec->guids_retry_schedule[i];
671*4882a593Smuzhiyun 		}
672*4882a593Smuzhiyun 	}
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	memcpy(&next_rec->rec_det, rec, sizeof(*rec));
675*4882a593Smuzhiyun 	next_rec->port = port;
676*4882a593Smuzhiyun 	next_rec->block_num = record_index;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] ==
679*4882a593Smuzhiyun 				cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) {
680*4882a593Smuzhiyun 		next_rec->rec_det.guid_indexes = delete_guid_indexes;
681*4882a593Smuzhiyun 		next_rec->method = MLX4_GUID_INFO_RECORD_DELETE;
682*4882a593Smuzhiyun 	} else {
683*4882a593Smuzhiyun 		next_rec->rec_det.guid_indexes = set_guid_indexes;
684*4882a593Smuzhiyun 		next_rec->method = MLX4_GUID_INFO_RECORD_SET;
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun /* return index of record that should be updated based on lowest
689*4882a593Smuzhiyun  * rescheduled time
690*4882a593Smuzhiyun  */
get_low_record_time_index(struct mlx4_ib_dev * dev,u8 port,int * resched_delay_sec)691*4882a593Smuzhiyun static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
692*4882a593Smuzhiyun 				     int *resched_delay_sec)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	int record_index = -1;
695*4882a593Smuzhiyun 	u64 low_record_time = 0;
696*4882a593Smuzhiyun 	struct mlx4_sriov_alias_guid_info_rec_det rec;
697*4882a593Smuzhiyun 	int j;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
700*4882a593Smuzhiyun 		rec = dev->sriov.alias_guid.ports_guid[port].
701*4882a593Smuzhiyun 			all_rec_per_port[j];
702*4882a593Smuzhiyun 		if (rec.status == MLX4_GUID_INFO_STATUS_IDLE &&
703*4882a593Smuzhiyun 		    rec.guid_indexes) {
704*4882a593Smuzhiyun 			if (record_index == -1 ||
705*4882a593Smuzhiyun 			    rec.time_to_run < low_record_time) {
706*4882a593Smuzhiyun 				record_index = j;
707*4882a593Smuzhiyun 				low_record_time = rec.time_to_run;
708*4882a593Smuzhiyun 			}
709*4882a593Smuzhiyun 		}
710*4882a593Smuzhiyun 	}
711*4882a593Smuzhiyun 	if (resched_delay_sec) {
712*4882a593Smuzhiyun 		u64 curr_time = ktime_get_boottime_ns();
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 		*resched_delay_sec = (low_record_time < curr_time) ? 0 :
715*4882a593Smuzhiyun 			div_u64((low_record_time - curr_time), NSEC_PER_SEC);
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	return record_index;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun /* The function returns the next record that was
722*4882a593Smuzhiyun  * not configured (or failed to be configured) */
get_next_record_to_update(struct mlx4_ib_dev * dev,u8 port,struct mlx4_next_alias_guid_work * rec)723*4882a593Smuzhiyun static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
724*4882a593Smuzhiyun 				     struct mlx4_next_alias_guid_work *rec)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	unsigned long flags;
727*4882a593Smuzhiyun 	int record_index;
728*4882a593Smuzhiyun 	int ret = 0;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
731*4882a593Smuzhiyun 	record_index = get_low_record_time_index(dev, port, NULL);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	if (record_index < 0) {
734*4882a593Smuzhiyun 		ret = -ENOENT;
735*4882a593Smuzhiyun 		goto out;
736*4882a593Smuzhiyun 	}
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	set_required_record(dev, port, rec, record_index);
739*4882a593Smuzhiyun out:
740*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
741*4882a593Smuzhiyun 	return ret;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
alias_guid_work(struct work_struct * work)744*4882a593Smuzhiyun static void alias_guid_work(struct work_struct *work)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	struct delayed_work *delay = to_delayed_work(work);
747*4882a593Smuzhiyun 	int ret = 0;
748*4882a593Smuzhiyun 	struct mlx4_next_alias_guid_work *rec;
749*4882a593Smuzhiyun 	struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
750*4882a593Smuzhiyun 		container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
751*4882a593Smuzhiyun 			     alias_guid_work);
752*4882a593Smuzhiyun 	struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
753*4882a593Smuzhiyun 	struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
754*4882a593Smuzhiyun 						struct mlx4_ib_sriov,
755*4882a593Smuzhiyun 						alias_guid);
756*4882a593Smuzhiyun 	struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	rec = kzalloc(sizeof *rec, GFP_KERNEL);
759*4882a593Smuzhiyun 	if (!rec)
760*4882a593Smuzhiyun 		return;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
763*4882a593Smuzhiyun 	ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
764*4882a593Smuzhiyun 	if (ret) {
765*4882a593Smuzhiyun 		pr_debug("No more records to update.\n");
766*4882a593Smuzhiyun 		goto out;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	set_guid_rec(&dev->ib_dev, rec);
770*4882a593Smuzhiyun out:
771*4882a593Smuzhiyun 	kfree(rec);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 
mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev * dev,int port)775*4882a593Smuzhiyun void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun 	unsigned long flags, flags1;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (!mlx4_is_master(dev->dev))
780*4882a593Smuzhiyun 		return;
781*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
782*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
783*4882a593Smuzhiyun 	if (!dev->sriov.is_going_down) {
784*4882a593Smuzhiyun 		/* If there is pending one should cancel then run, otherwise
785*4882a593Smuzhiyun 		  * won't run till previous one is ended as same work
786*4882a593Smuzhiyun 		  * struct is used.
787*4882a593Smuzhiyun 		  */
788*4882a593Smuzhiyun 		cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port].
789*4882a593Smuzhiyun 				    alias_guid_work);
790*4882a593Smuzhiyun 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
791*4882a593Smuzhiyun 			   &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
792*4882a593Smuzhiyun 	}
793*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
794*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun 
mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev * dev)797*4882a593Smuzhiyun void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	int i;
800*4882a593Smuzhiyun 	struct mlx4_ib_sriov *sriov = &dev->sriov;
801*4882a593Smuzhiyun 	struct mlx4_alias_guid_work_context *cb_ctx;
802*4882a593Smuzhiyun 	struct mlx4_sriov_alias_guid_port_rec_det *det;
803*4882a593Smuzhiyun 	struct ib_sa_query *sa_query;
804*4882a593Smuzhiyun 	unsigned long flags;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	for (i = 0 ; i < dev->num_ports; i++) {
807*4882a593Smuzhiyun 		det = &sriov->alias_guid.ports_guid[i];
808*4882a593Smuzhiyun 		cancel_delayed_work_sync(&det->alias_guid_work);
809*4882a593Smuzhiyun 		spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
810*4882a593Smuzhiyun 		while (!list_empty(&det->cb_list)) {
811*4882a593Smuzhiyun 			cb_ctx = list_entry(det->cb_list.next,
812*4882a593Smuzhiyun 					    struct mlx4_alias_guid_work_context,
813*4882a593Smuzhiyun 					    list);
814*4882a593Smuzhiyun 			sa_query = cb_ctx->sa_query;
815*4882a593Smuzhiyun 			cb_ctx->sa_query = NULL;
816*4882a593Smuzhiyun 			list_del(&cb_ctx->list);
817*4882a593Smuzhiyun 			spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
818*4882a593Smuzhiyun 			ib_sa_cancel_query(cb_ctx->query_id, sa_query);
819*4882a593Smuzhiyun 			wait_for_completion(&cb_ctx->done);
820*4882a593Smuzhiyun 			kfree(cb_ctx);
821*4882a593Smuzhiyun 			spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
822*4882a593Smuzhiyun 		}
823*4882a593Smuzhiyun 		spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun 	for (i = 0 ; i < dev->num_ports; i++) {
826*4882a593Smuzhiyun 		flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
827*4882a593Smuzhiyun 		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
828*4882a593Smuzhiyun 	}
829*4882a593Smuzhiyun 	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
830*4882a593Smuzhiyun 	kfree(dev->sriov.alias_guid.sa_client);
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun 
mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev * dev)833*4882a593Smuzhiyun int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun 	char alias_wq_name[15];
836*4882a593Smuzhiyun 	int ret = 0;
837*4882a593Smuzhiyun 	int i, j;
838*4882a593Smuzhiyun 	union ib_gid gid;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	if (!mlx4_is_master(dev->dev))
841*4882a593Smuzhiyun 		return 0;
842*4882a593Smuzhiyun 	dev->sriov.alias_guid.sa_client =
843*4882a593Smuzhiyun 		kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
844*4882a593Smuzhiyun 	if (!dev->sriov.alias_guid.sa_client)
845*4882a593Smuzhiyun 		return -ENOMEM;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	ib_sa_register_client(dev->sriov.alias_guid.sa_client);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	for (i = 1; i <= dev->num_ports; ++i) {
852*4882a593Smuzhiyun 		if (dev->ib_dev.ops.query_gid(&dev->ib_dev, i, 0, &gid)) {
853*4882a593Smuzhiyun 			ret = -EFAULT;
854*4882a593Smuzhiyun 			goto err_unregister;
855*4882a593Smuzhiyun 		}
856*4882a593Smuzhiyun 	}
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	for (i = 0 ; i < dev->num_ports; i++) {
859*4882a593Smuzhiyun 		memset(&dev->sriov.alias_guid.ports_guid[i], 0,
860*4882a593Smuzhiyun 		       sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
861*4882a593Smuzhiyun 		dev->sriov.alias_guid.ports_guid[i].state_flags |=
862*4882a593Smuzhiyun 				GUID_STATE_NEED_PORT_INIT;
863*4882a593Smuzhiyun 		for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
864*4882a593Smuzhiyun 			/* mark each val as it was deleted */
865*4882a593Smuzhiyun 			memset(dev->sriov.alias_guid.ports_guid[i].
866*4882a593Smuzhiyun 				all_rec_per_port[j].all_recs, 0xFF,
867*4882a593Smuzhiyun 				sizeof(dev->sriov.alias_guid.ports_guid[i].
868*4882a593Smuzhiyun 				all_rec_per_port[j].all_recs));
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 		INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
871*4882a593Smuzhiyun 		/*prepare the records, set them to be allocated by sm*/
872*4882a593Smuzhiyun 		if (mlx4_ib_sm_guid_assign)
873*4882a593Smuzhiyun 			for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++)
874*4882a593Smuzhiyun 				mlx4_set_admin_guid(dev->dev, 0, j, i + 1);
875*4882a593Smuzhiyun 		for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
876*4882a593Smuzhiyun 			invalidate_guid_record(dev, i + 1, j);
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 		dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
879*4882a593Smuzhiyun 		dev->sriov.alias_guid.ports_guid[i].port  = i;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 		snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
882*4882a593Smuzhiyun 		dev->sriov.alias_guid.ports_guid[i].wq =
883*4882a593Smuzhiyun 			alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM);
884*4882a593Smuzhiyun 		if (!dev->sriov.alias_guid.ports_guid[i].wq) {
885*4882a593Smuzhiyun 			ret = -ENOMEM;
886*4882a593Smuzhiyun 			goto err_thread;
887*4882a593Smuzhiyun 		}
888*4882a593Smuzhiyun 		INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
889*4882a593Smuzhiyun 			  alias_guid_work);
890*4882a593Smuzhiyun 	}
891*4882a593Smuzhiyun 	return 0;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun err_thread:
894*4882a593Smuzhiyun 	for (--i; i >= 0; i--) {
895*4882a593Smuzhiyun 		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
896*4882a593Smuzhiyun 		dev->sriov.alias_guid.ports_guid[i].wq = NULL;
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun err_unregister:
900*4882a593Smuzhiyun 	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
901*4882a593Smuzhiyun 	kfree(dev->sriov.alias_guid.sa_client);
902*4882a593Smuzhiyun 	dev->sriov.alias_guid.sa_client = NULL;
903*4882a593Smuzhiyun 	pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);
904*4882a593Smuzhiyun 	return ret;
905*4882a593Smuzhiyun }
906