xref: /OK3568_Linux_fs/kernel/net/smc/smc_ism.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Shared Memory Communications Direct over ISM devices (SMC-D)
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Functions for ISM device.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright IBM Corp. 2018
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/spinlock.h>
10*4882a593Smuzhiyun #include <linux/mutex.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <asm/page.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "smc.h"
15*4882a593Smuzhiyun #include "smc_core.h"
16*4882a593Smuzhiyun #include "smc_ism.h"
17*4882a593Smuzhiyun #include "smc_pnet.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct smcd_dev_list smcd_dev_list = {
20*4882a593Smuzhiyun 	.list = LIST_HEAD_INIT(smcd_dev_list.list),
21*4882a593Smuzhiyun 	.mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
22*4882a593Smuzhiyun };
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun bool smc_ism_v2_capable;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* Test if an ISM communication is possible - same CPC */
smc_ism_cantalk(u64 peer_gid,unsigned short vlan_id,struct smcd_dev * smcd)27*4882a593Smuzhiyun int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0,
30*4882a593Smuzhiyun 					   vlan_id);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
smc_ism_write(struct smcd_dev * smcd,const struct smc_ism_position * pos,void * data,size_t len)33*4882a593Smuzhiyun int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos,
34*4882a593Smuzhiyun 		  void *data, size_t len)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	int rc;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	rc = smcd->ops->move_data(smcd, pos->token, pos->index, pos->signal,
39*4882a593Smuzhiyun 				  pos->offset, data, len);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	return rc < 0 ? rc : 0;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
smc_ism_get_system_eid(struct smcd_dev * smcd,u8 ** eid)44*4882a593Smuzhiyun void smc_ism_get_system_eid(struct smcd_dev *smcd, u8 **eid)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	smcd->ops->get_system_eid(smcd, eid);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
smc_ism_get_chid(struct smcd_dev * smcd)49*4882a593Smuzhiyun u16 smc_ism_get_chid(struct smcd_dev *smcd)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	return smcd->ops->get_chid(smcd);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* Set a connection using this DMBE. */
smc_ism_set_conn(struct smc_connection * conn)55*4882a593Smuzhiyun void smc_ism_set_conn(struct smc_connection *conn)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	unsigned long flags;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
60*4882a593Smuzhiyun 	conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn;
61*4882a593Smuzhiyun 	spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /* Unset a connection using this DMBE. */
smc_ism_unset_conn(struct smc_connection * conn)65*4882a593Smuzhiyun void smc_ism_unset_conn(struct smc_connection *conn)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	unsigned long flags;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	if (!conn->rmb_desc)
70*4882a593Smuzhiyun 		return;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
73*4882a593Smuzhiyun 	conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL;
74*4882a593Smuzhiyun 	spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* Register a VLAN identifier with the ISM device. Use a reference count
78*4882a593Smuzhiyun  * and add a VLAN identifier only when the first DMB using this VLAN is
79*4882a593Smuzhiyun  * registered.
80*4882a593Smuzhiyun  */
smc_ism_get_vlan(struct smcd_dev * smcd,unsigned short vlanid)81*4882a593Smuzhiyun int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	struct smc_ism_vlanid *new_vlan, *vlan;
84*4882a593Smuzhiyun 	unsigned long flags;
85*4882a593Smuzhiyun 	int rc = 0;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if (!vlanid)			/* No valid vlan id */
88*4882a593Smuzhiyun 		return -EINVAL;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* create new vlan entry, in case we need it */
91*4882a593Smuzhiyun 	new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL);
92*4882a593Smuzhiyun 	if (!new_vlan)
93*4882a593Smuzhiyun 		return -ENOMEM;
94*4882a593Smuzhiyun 	new_vlan->vlanid = vlanid;
95*4882a593Smuzhiyun 	refcount_set(&new_vlan->refcnt, 1);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	/* if there is an existing entry, increase count and return */
98*4882a593Smuzhiyun 	spin_lock_irqsave(&smcd->lock, flags);
99*4882a593Smuzhiyun 	list_for_each_entry(vlan, &smcd->vlan, list) {
100*4882a593Smuzhiyun 		if (vlan->vlanid == vlanid) {
101*4882a593Smuzhiyun 			refcount_inc(&vlan->refcnt);
102*4882a593Smuzhiyun 			kfree(new_vlan);
103*4882a593Smuzhiyun 			goto out;
104*4882a593Smuzhiyun 		}
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* no existing entry found.
108*4882a593Smuzhiyun 	 * add new entry to device; might fail, e.g., if HW limit reached
109*4882a593Smuzhiyun 	 */
110*4882a593Smuzhiyun 	if (smcd->ops->add_vlan_id(smcd, vlanid)) {
111*4882a593Smuzhiyun 		kfree(new_vlan);
112*4882a593Smuzhiyun 		rc = -EIO;
113*4882a593Smuzhiyun 		goto out;
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 	list_add_tail(&new_vlan->list, &smcd->vlan);
116*4882a593Smuzhiyun out:
117*4882a593Smuzhiyun 	spin_unlock_irqrestore(&smcd->lock, flags);
118*4882a593Smuzhiyun 	return rc;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* Unregister a VLAN identifier with the ISM device. Use a reference count
122*4882a593Smuzhiyun  * and remove a VLAN identifier only when the last DMB using this VLAN is
123*4882a593Smuzhiyun  * unregistered.
124*4882a593Smuzhiyun  */
smc_ism_put_vlan(struct smcd_dev * smcd,unsigned short vlanid)125*4882a593Smuzhiyun int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	struct smc_ism_vlanid *vlan;
128*4882a593Smuzhiyun 	unsigned long flags;
129*4882a593Smuzhiyun 	bool found = false;
130*4882a593Smuzhiyun 	int rc = 0;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (!vlanid)			/* No valid vlan id */
133*4882a593Smuzhiyun 		return -EINVAL;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	spin_lock_irqsave(&smcd->lock, flags);
136*4882a593Smuzhiyun 	list_for_each_entry(vlan, &smcd->vlan, list) {
137*4882a593Smuzhiyun 		if (vlan->vlanid == vlanid) {
138*4882a593Smuzhiyun 			if (!refcount_dec_and_test(&vlan->refcnt))
139*4882a593Smuzhiyun 				goto out;
140*4882a593Smuzhiyun 			found = true;
141*4882a593Smuzhiyun 			break;
142*4882a593Smuzhiyun 		}
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 	if (!found) {
145*4882a593Smuzhiyun 		rc = -ENOENT;
146*4882a593Smuzhiyun 		goto out;		/* VLAN id not in table */
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* Found and the last reference just gone */
150*4882a593Smuzhiyun 	if (smcd->ops->del_vlan_id(smcd, vlanid))
151*4882a593Smuzhiyun 		rc = -EIO;
152*4882a593Smuzhiyun 	list_del(&vlan->list);
153*4882a593Smuzhiyun 	kfree(vlan);
154*4882a593Smuzhiyun out:
155*4882a593Smuzhiyun 	spin_unlock_irqrestore(&smcd->lock, flags);
156*4882a593Smuzhiyun 	return rc;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
smc_ism_unregister_dmb(struct smcd_dev * smcd,struct smc_buf_desc * dmb_desc)159*4882a593Smuzhiyun int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	struct smcd_dmb dmb;
162*4882a593Smuzhiyun 	int rc = 0;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (!dmb_desc->dma_addr)
165*4882a593Smuzhiyun 		return rc;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	memset(&dmb, 0, sizeof(dmb));
168*4882a593Smuzhiyun 	dmb.dmb_tok = dmb_desc->token;
169*4882a593Smuzhiyun 	dmb.sba_idx = dmb_desc->sba_idx;
170*4882a593Smuzhiyun 	dmb.cpu_addr = dmb_desc->cpu_addr;
171*4882a593Smuzhiyun 	dmb.dma_addr = dmb_desc->dma_addr;
172*4882a593Smuzhiyun 	dmb.dmb_len = dmb_desc->len;
173*4882a593Smuzhiyun 	rc = smcd->ops->unregister_dmb(smcd, &dmb);
174*4882a593Smuzhiyun 	if (!rc || rc == ISM_ERROR) {
175*4882a593Smuzhiyun 		dmb_desc->cpu_addr = NULL;
176*4882a593Smuzhiyun 		dmb_desc->dma_addr = 0;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return rc;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
smc_ism_register_dmb(struct smc_link_group * lgr,int dmb_len,struct smc_buf_desc * dmb_desc)182*4882a593Smuzhiyun int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
183*4882a593Smuzhiyun 			 struct smc_buf_desc *dmb_desc)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct smcd_dmb dmb;
186*4882a593Smuzhiyun 	int rc;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	memset(&dmb, 0, sizeof(dmb));
189*4882a593Smuzhiyun 	dmb.dmb_len = dmb_len;
190*4882a593Smuzhiyun 	dmb.sba_idx = dmb_desc->sba_idx;
191*4882a593Smuzhiyun 	dmb.vlan_id = lgr->vlan_id;
192*4882a593Smuzhiyun 	dmb.rgid = lgr->peer_gid;
193*4882a593Smuzhiyun 	rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb);
194*4882a593Smuzhiyun 	if (!rc) {
195*4882a593Smuzhiyun 		dmb_desc->sba_idx = dmb.sba_idx;
196*4882a593Smuzhiyun 		dmb_desc->token = dmb.dmb_tok;
197*4882a593Smuzhiyun 		dmb_desc->cpu_addr = dmb.cpu_addr;
198*4882a593Smuzhiyun 		dmb_desc->dma_addr = dmb.dma_addr;
199*4882a593Smuzhiyun 		dmb_desc->len = dmb.dmb_len;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 	return rc;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun struct smc_ism_event_work {
205*4882a593Smuzhiyun 	struct work_struct work;
206*4882a593Smuzhiyun 	struct smcd_dev *smcd;
207*4882a593Smuzhiyun 	struct smcd_event event;
208*4882a593Smuzhiyun };
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #define ISM_EVENT_REQUEST		0x0001
211*4882a593Smuzhiyun #define ISM_EVENT_RESPONSE		0x0002
212*4882a593Smuzhiyun #define ISM_EVENT_REQUEST_IR		0x00000001
213*4882a593Smuzhiyun #define ISM_EVENT_CODE_SHUTDOWN		0x80
214*4882a593Smuzhiyun #define ISM_EVENT_CODE_TESTLINK		0x83
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun union smcd_sw_event_info {
217*4882a593Smuzhiyun 	u64	info;
218*4882a593Smuzhiyun 	struct {
219*4882a593Smuzhiyun 		u8		uid[SMC_LGR_ID_SIZE];
220*4882a593Smuzhiyun 		unsigned short	vlan_id;
221*4882a593Smuzhiyun 		u16		code;
222*4882a593Smuzhiyun 	};
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun 
smcd_handle_sw_event(struct smc_ism_event_work * wrk)225*4882a593Smuzhiyun static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	union smcd_sw_event_info ev_info;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	ev_info.info = wrk->event.info;
230*4882a593Smuzhiyun 	switch (wrk->event.code) {
231*4882a593Smuzhiyun 	case ISM_EVENT_CODE_SHUTDOWN:	/* Peer shut down DMBs */
232*4882a593Smuzhiyun 		smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id);
233*4882a593Smuzhiyun 		break;
234*4882a593Smuzhiyun 	case ISM_EVENT_CODE_TESTLINK:	/* Activity timer */
235*4882a593Smuzhiyun 		if (ev_info.code == ISM_EVENT_REQUEST) {
236*4882a593Smuzhiyun 			ev_info.code = ISM_EVENT_RESPONSE;
237*4882a593Smuzhiyun 			wrk->smcd->ops->signal_event(wrk->smcd,
238*4882a593Smuzhiyun 						     wrk->event.tok,
239*4882a593Smuzhiyun 						     ISM_EVENT_REQUEST_IR,
240*4882a593Smuzhiyun 						     ISM_EVENT_CODE_TESTLINK,
241*4882a593Smuzhiyun 						     ev_info.info);
242*4882a593Smuzhiyun 			}
243*4882a593Smuzhiyun 		break;
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
smc_ism_signal_shutdown(struct smc_link_group * lgr)247*4882a593Smuzhiyun int smc_ism_signal_shutdown(struct smc_link_group *lgr)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	int rc;
250*4882a593Smuzhiyun 	union smcd_sw_event_info ev_info;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (lgr->peer_shutdown)
253*4882a593Smuzhiyun 		return 0;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
256*4882a593Smuzhiyun 	ev_info.vlan_id = lgr->vlan_id;
257*4882a593Smuzhiyun 	ev_info.code = ISM_EVENT_REQUEST;
258*4882a593Smuzhiyun 	rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
259*4882a593Smuzhiyun 					  ISM_EVENT_REQUEST_IR,
260*4882a593Smuzhiyun 					  ISM_EVENT_CODE_SHUTDOWN,
261*4882a593Smuzhiyun 					  ev_info.info);
262*4882a593Smuzhiyun 	return rc;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /* worker for SMC-D events */
smc_ism_event_work(struct work_struct * work)266*4882a593Smuzhiyun static void smc_ism_event_work(struct work_struct *work)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct smc_ism_event_work *wrk =
269*4882a593Smuzhiyun 		container_of(work, struct smc_ism_event_work, work);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	switch (wrk->event.type) {
272*4882a593Smuzhiyun 	case ISM_EVENT_GID:	/* GID event, token is peer GID */
273*4882a593Smuzhiyun 		smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK);
274*4882a593Smuzhiyun 		break;
275*4882a593Smuzhiyun 	case ISM_EVENT_DMB:
276*4882a593Smuzhiyun 		break;
277*4882a593Smuzhiyun 	case ISM_EVENT_SWR:	/* Software defined event */
278*4882a593Smuzhiyun 		smcd_handle_sw_event(wrk);
279*4882a593Smuzhiyun 		break;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 	kfree(wrk);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
smcd_release(struct device * dev)284*4882a593Smuzhiyun static void smcd_release(struct device *dev)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	kfree(smcd->conn);
289*4882a593Smuzhiyun 	kfree(smcd);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
smcd_alloc_dev(struct device * parent,const char * name,const struct smcd_ops * ops,int max_dmbs)292*4882a593Smuzhiyun struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
293*4882a593Smuzhiyun 				const struct smcd_ops *ops, int max_dmbs)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	struct smcd_dev *smcd;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
298*4882a593Smuzhiyun 	if (!smcd)
299*4882a593Smuzhiyun 		return NULL;
300*4882a593Smuzhiyun 	smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
301*4882a593Smuzhiyun 			     GFP_KERNEL);
302*4882a593Smuzhiyun 	if (!smcd->conn) {
303*4882a593Smuzhiyun 		kfree(smcd);
304*4882a593Smuzhiyun 		return NULL;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
308*4882a593Smuzhiyun 						 WQ_MEM_RECLAIM, name);
309*4882a593Smuzhiyun 	if (!smcd->event_wq) {
310*4882a593Smuzhiyun 		kfree(smcd->conn);
311*4882a593Smuzhiyun 		kfree(smcd);
312*4882a593Smuzhiyun 		return NULL;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	smcd->dev.parent = parent;
316*4882a593Smuzhiyun 	smcd->dev.release = smcd_release;
317*4882a593Smuzhiyun 	device_initialize(&smcd->dev);
318*4882a593Smuzhiyun 	dev_set_name(&smcd->dev, name);
319*4882a593Smuzhiyun 	smcd->ops = ops;
320*4882a593Smuzhiyun 	if (smc_pnetid_by_dev_port(parent, 0, smcd->pnetid))
321*4882a593Smuzhiyun 		smc_pnetid_by_table_smcd(smcd);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	spin_lock_init(&smcd->lock);
324*4882a593Smuzhiyun 	spin_lock_init(&smcd->lgr_lock);
325*4882a593Smuzhiyun 	INIT_LIST_HEAD(&smcd->vlan);
326*4882a593Smuzhiyun 	INIT_LIST_HEAD(&smcd->lgr_list);
327*4882a593Smuzhiyun 	init_waitqueue_head(&smcd->lgrs_deleted);
328*4882a593Smuzhiyun 	return smcd;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(smcd_alloc_dev);
331*4882a593Smuzhiyun 
smcd_register_dev(struct smcd_dev * smcd)332*4882a593Smuzhiyun int smcd_register_dev(struct smcd_dev *smcd)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	int rc;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	mutex_lock(&smcd_dev_list.mutex);
337*4882a593Smuzhiyun 	if (list_empty(&smcd_dev_list.list)) {
338*4882a593Smuzhiyun 		u8 *system_eid = NULL;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		smc_ism_get_system_eid(smcd, &system_eid);
341*4882a593Smuzhiyun 		if (system_eid[24] != '0' || system_eid[28] != '0')
342*4882a593Smuzhiyun 			smc_ism_v2_capable = true;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 	/* sort list: devices without pnetid before devices with pnetid */
345*4882a593Smuzhiyun 	if (smcd->pnetid[0])
346*4882a593Smuzhiyun 		list_add_tail(&smcd->list, &smcd_dev_list.list);
347*4882a593Smuzhiyun 	else
348*4882a593Smuzhiyun 		list_add(&smcd->list, &smcd_dev_list.list);
349*4882a593Smuzhiyun 	mutex_unlock(&smcd_dev_list.mutex);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
352*4882a593Smuzhiyun 			    dev_name(&smcd->dev), smcd->pnetid,
353*4882a593Smuzhiyun 			    smcd->pnetid_by_user ? " (user defined)" : "");
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	rc = device_add(&smcd->dev);
356*4882a593Smuzhiyun 	if (rc) {
357*4882a593Smuzhiyun 		mutex_lock(&smcd_dev_list.mutex);
358*4882a593Smuzhiyun 		list_del(&smcd->list);
359*4882a593Smuzhiyun 		mutex_unlock(&smcd_dev_list.mutex);
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	return rc;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(smcd_register_dev);
365*4882a593Smuzhiyun 
smcd_unregister_dev(struct smcd_dev * smcd)366*4882a593Smuzhiyun void smcd_unregister_dev(struct smcd_dev *smcd)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	pr_warn_ratelimited("smc: removing smcd device %s\n",
369*4882a593Smuzhiyun 			    dev_name(&smcd->dev));
370*4882a593Smuzhiyun 	mutex_lock(&smcd_dev_list.mutex);
371*4882a593Smuzhiyun 	list_del_init(&smcd->list);
372*4882a593Smuzhiyun 	mutex_unlock(&smcd_dev_list.mutex);
373*4882a593Smuzhiyun 	smcd->going_away = 1;
374*4882a593Smuzhiyun 	smc_smcd_terminate_all(smcd);
375*4882a593Smuzhiyun 	flush_workqueue(smcd->event_wq);
376*4882a593Smuzhiyun 	destroy_workqueue(smcd->event_wq);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	device_del(&smcd->dev);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(smcd_unregister_dev);
381*4882a593Smuzhiyun 
smcd_free_dev(struct smcd_dev * smcd)382*4882a593Smuzhiyun void smcd_free_dev(struct smcd_dev *smcd)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	put_device(&smcd->dev);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(smcd_free_dev);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun /* SMCD Device event handler. Called from ISM device interrupt handler.
389*4882a593Smuzhiyun  * Parameters are smcd device pointer,
390*4882a593Smuzhiyun  * - event->type (0 --> DMB, 1 --> GID),
391*4882a593Smuzhiyun  * - event->code (event code),
392*4882a593Smuzhiyun  * - event->tok (either DMB token when event type 0, or GID when event type 1)
393*4882a593Smuzhiyun  * - event->time (time of day)
394*4882a593Smuzhiyun  * - event->info (debug info).
395*4882a593Smuzhiyun  *
396*4882a593Smuzhiyun  * Context:
397*4882a593Smuzhiyun  * - Function called in IRQ context from ISM device driver event handler.
398*4882a593Smuzhiyun  */
smcd_handle_event(struct smcd_dev * smcd,struct smcd_event * event)399*4882a593Smuzhiyun void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	struct smc_ism_event_work *wrk;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (smcd->going_away)
404*4882a593Smuzhiyun 		return;
405*4882a593Smuzhiyun 	/* copy event to event work queue, and let it be handled there */
406*4882a593Smuzhiyun 	wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC);
407*4882a593Smuzhiyun 	if (!wrk)
408*4882a593Smuzhiyun 		return;
409*4882a593Smuzhiyun 	INIT_WORK(&wrk->work, smc_ism_event_work);
410*4882a593Smuzhiyun 	wrk->smcd = smcd;
411*4882a593Smuzhiyun 	wrk->event = *event;
412*4882a593Smuzhiyun 	queue_work(smcd->event_wq, &wrk->work);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(smcd_handle_event);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun /* SMCD Device interrupt handler. Called from ISM device interrupt handler.
417*4882a593Smuzhiyun  * Parameters are smcd device pointer and DMB number. Find the connection and
418*4882a593Smuzhiyun  * schedule the tasklet for this connection.
419*4882a593Smuzhiyun  *
420*4882a593Smuzhiyun  * Context:
421*4882a593Smuzhiyun  * - Function called in IRQ context from ISM device driver IRQ handler.
422*4882a593Smuzhiyun  */
smcd_handle_irq(struct smcd_dev * smcd,unsigned int dmbno)423*4882a593Smuzhiyun void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	struct smc_connection *conn = NULL;
426*4882a593Smuzhiyun 	unsigned long flags;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	spin_lock_irqsave(&smcd->lock, flags);
429*4882a593Smuzhiyun 	conn = smcd->conn[dmbno];
430*4882a593Smuzhiyun 	if (conn && !conn->killed)
431*4882a593Smuzhiyun 		tasklet_schedule(&conn->rx_tsklet);
432*4882a593Smuzhiyun 	spin_unlock_irqrestore(&smcd->lock, flags);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(smcd_handle_irq);
435*4882a593Smuzhiyun 
smc_ism_init(void)436*4882a593Smuzhiyun void __init smc_ism_init(void)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	smc_ism_v2_capable = false;
439*4882a593Smuzhiyun }
440