xref: /OK3568_Linux_fs/kernel/drivers/greybus/connection.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Greybus connections
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2014 Google Inc.
6*4882a593Smuzhiyun  * Copyright 2014 Linaro Ltd.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/workqueue.h>
10*4882a593Smuzhiyun #include <linux/greybus.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "greybus_trace.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT	1000
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun static void gb_connection_kref_release(struct kref *kref);
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static DEFINE_SPINLOCK(gb_connections_lock);
19*4882a593Smuzhiyun static DEFINE_MUTEX(gb_connection_mutex);
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /* Caller holds gb_connection_mutex. */
gb_connection_cport_in_use(struct gb_interface * intf,u16 cport_id)22*4882a593Smuzhiyun static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	struct gb_host_device *hd = intf->hd;
25*4882a593Smuzhiyun 	struct gb_connection *connection;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	list_for_each_entry(connection, &hd->connections, hd_links) {
28*4882a593Smuzhiyun 		if (connection->intf == intf &&
29*4882a593Smuzhiyun 		    connection->intf_cport_id == cport_id)
30*4882a593Smuzhiyun 			return true;
31*4882a593Smuzhiyun 	}
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	return false;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
gb_connection_get(struct gb_connection * connection)36*4882a593Smuzhiyun static void gb_connection_get(struct gb_connection *connection)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	kref_get(&connection->kref);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	trace_gb_connection_get(connection);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
gb_connection_put(struct gb_connection * connection)43*4882a593Smuzhiyun static void gb_connection_put(struct gb_connection *connection)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	trace_gb_connection_put(connection);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	kref_put(&connection->kref, gb_connection_kref_release);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * Returns a reference-counted pointer to the connection if found.
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun static struct gb_connection *
gb_connection_hd_find(struct gb_host_device * hd,u16 cport_id)54*4882a593Smuzhiyun gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	struct gb_connection *connection;
57*4882a593Smuzhiyun 	unsigned long flags;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	spin_lock_irqsave(&gb_connections_lock, flags);
60*4882a593Smuzhiyun 	list_for_each_entry(connection, &hd->connections, hd_links)
61*4882a593Smuzhiyun 		if (connection->hd_cport_id == cport_id) {
62*4882a593Smuzhiyun 			gb_connection_get(connection);
63*4882a593Smuzhiyun 			goto found;
64*4882a593Smuzhiyun 		}
65*4882a593Smuzhiyun 	connection = NULL;
66*4882a593Smuzhiyun found:
67*4882a593Smuzhiyun 	spin_unlock_irqrestore(&gb_connections_lock, flags);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return connection;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun  * Callback from the host driver to let us know that data has been
74*4882a593Smuzhiyun  * received on the bundle.
75*4882a593Smuzhiyun  */
greybus_data_rcvd(struct gb_host_device * hd,u16 cport_id,u8 * data,size_t length)76*4882a593Smuzhiyun void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
77*4882a593Smuzhiyun 		       u8 *data, size_t length)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct gb_connection *connection;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	trace_gb_hd_in(hd);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	connection = gb_connection_hd_find(hd, cport_id);
84*4882a593Smuzhiyun 	if (!connection) {
85*4882a593Smuzhiyun 		dev_err(&hd->dev,
86*4882a593Smuzhiyun 			"nonexistent connection (%zu bytes dropped)\n", length);
87*4882a593Smuzhiyun 		return;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 	gb_connection_recv(connection, data, length);
90*4882a593Smuzhiyun 	gb_connection_put(connection);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(greybus_data_rcvd);
93*4882a593Smuzhiyun 
gb_connection_kref_release(struct kref * kref)94*4882a593Smuzhiyun static void gb_connection_kref_release(struct kref *kref)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct gb_connection *connection;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	connection = container_of(kref, struct gb_connection, kref);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	trace_gb_connection_release(connection);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	kfree(connection);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
gb_connection_init_name(struct gb_connection * connection)105*4882a593Smuzhiyun static void gb_connection_init_name(struct gb_connection *connection)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	u16 hd_cport_id = connection->hd_cport_id;
108*4882a593Smuzhiyun 	u16 cport_id = 0;
109*4882a593Smuzhiyun 	u8 intf_id = 0;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (connection->intf) {
112*4882a593Smuzhiyun 		intf_id = connection->intf->interface_id;
113*4882a593Smuzhiyun 		cport_id = connection->intf_cport_id;
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	snprintf(connection->name, sizeof(connection->name),
117*4882a593Smuzhiyun 		 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun  * _gb_connection_create() - create a Greybus connection
122*4882a593Smuzhiyun  * @hd:			host device of the connection
123*4882a593Smuzhiyun  * @hd_cport_id:	host-device cport id, or -1 for dynamic allocation
124*4882a593Smuzhiyun  * @intf:		remote interface, or NULL for static connections
125*4882a593Smuzhiyun  * @bundle:		remote-interface bundle (may be NULL)
126*4882a593Smuzhiyun  * @cport_id:		remote-interface cport id, or 0 for static connections
127*4882a593Smuzhiyun  * @handler:		request handler (may be NULL)
128*4882a593Smuzhiyun  * @flags:		connection flags
129*4882a593Smuzhiyun  *
130*4882a593Smuzhiyun  * Create a Greybus connection, representing the bidirectional link
131*4882a593Smuzhiyun  * between a CPort on a (local) Greybus host device and a CPort on
132*4882a593Smuzhiyun  * another Greybus interface.
133*4882a593Smuzhiyun  *
134*4882a593Smuzhiyun  * A connection also maintains the state of operations sent over the
135*4882a593Smuzhiyun  * connection.
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * Serialised against concurrent create and destroy using the
138*4882a593Smuzhiyun  * gb_connection_mutex.
139*4882a593Smuzhiyun  *
140*4882a593Smuzhiyun  * Return: A pointer to the new connection if successful, or an ERR_PTR
141*4882a593Smuzhiyun  * otherwise.
142*4882a593Smuzhiyun  */
143*4882a593Smuzhiyun static struct gb_connection *
_gb_connection_create(struct gb_host_device * hd,int hd_cport_id,struct gb_interface * intf,struct gb_bundle * bundle,int cport_id,gb_request_handler_t handler,unsigned long flags)144*4882a593Smuzhiyun _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
145*4882a593Smuzhiyun 		      struct gb_interface *intf,
146*4882a593Smuzhiyun 		      struct gb_bundle *bundle, int cport_id,
147*4882a593Smuzhiyun 		      gb_request_handler_t handler,
148*4882a593Smuzhiyun 		      unsigned long flags)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	struct gb_connection *connection;
151*4882a593Smuzhiyun 	int ret;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	mutex_lock(&gb_connection_mutex);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (intf && gb_connection_cport_in_use(intf, cport_id)) {
156*4882a593Smuzhiyun 		dev_err(&intf->dev, "cport %u already in use\n", cport_id);
157*4882a593Smuzhiyun 		ret = -EBUSY;
158*4882a593Smuzhiyun 		goto err_unlock;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
162*4882a593Smuzhiyun 	if (ret < 0) {
163*4882a593Smuzhiyun 		dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
164*4882a593Smuzhiyun 		goto err_unlock;
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 	hd_cport_id = ret;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	connection = kzalloc(sizeof(*connection), GFP_KERNEL);
169*4882a593Smuzhiyun 	if (!connection) {
170*4882a593Smuzhiyun 		ret = -ENOMEM;
171*4882a593Smuzhiyun 		goto err_hd_cport_release;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	connection->hd_cport_id = hd_cport_id;
175*4882a593Smuzhiyun 	connection->intf_cport_id = cport_id;
176*4882a593Smuzhiyun 	connection->hd = hd;
177*4882a593Smuzhiyun 	connection->intf = intf;
178*4882a593Smuzhiyun 	connection->bundle = bundle;
179*4882a593Smuzhiyun 	connection->handler = handler;
180*4882a593Smuzhiyun 	connection->flags = flags;
181*4882a593Smuzhiyun 	if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
182*4882a593Smuzhiyun 		connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
183*4882a593Smuzhiyun 	connection->state = GB_CONNECTION_STATE_DISABLED;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	atomic_set(&connection->op_cycle, 0);
186*4882a593Smuzhiyun 	mutex_init(&connection->mutex);
187*4882a593Smuzhiyun 	spin_lock_init(&connection->lock);
188*4882a593Smuzhiyun 	INIT_LIST_HEAD(&connection->operations);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
191*4882a593Smuzhiyun 					 dev_name(&hd->dev), hd_cport_id);
192*4882a593Smuzhiyun 	if (!connection->wq) {
193*4882a593Smuzhiyun 		ret = -ENOMEM;
194*4882a593Smuzhiyun 		goto err_free_connection;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	kref_init(&connection->kref);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	gb_connection_init_name(connection);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	spin_lock_irq(&gb_connections_lock);
202*4882a593Smuzhiyun 	list_add(&connection->hd_links, &hd->connections);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	if (bundle)
205*4882a593Smuzhiyun 		list_add(&connection->bundle_links, &bundle->connections);
206*4882a593Smuzhiyun 	else
207*4882a593Smuzhiyun 		INIT_LIST_HEAD(&connection->bundle_links);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	spin_unlock_irq(&gb_connections_lock);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	mutex_unlock(&gb_connection_mutex);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	trace_gb_connection_create(connection);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	return connection;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun err_free_connection:
218*4882a593Smuzhiyun 	kfree(connection);
219*4882a593Smuzhiyun err_hd_cport_release:
220*4882a593Smuzhiyun 	gb_hd_cport_release(hd, hd_cport_id);
221*4882a593Smuzhiyun err_unlock:
222*4882a593Smuzhiyun 	mutex_unlock(&gb_connection_mutex);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return ERR_PTR(ret);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun struct gb_connection *
gb_connection_create_static(struct gb_host_device * hd,u16 hd_cport_id,gb_request_handler_t handler)228*4882a593Smuzhiyun gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
229*4882a593Smuzhiyun 			    gb_request_handler_t handler)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
232*4882a593Smuzhiyun 				     GB_CONNECTION_FLAG_HIGH_PRIO);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun struct gb_connection *
gb_connection_create_control(struct gb_interface * intf)236*4882a593Smuzhiyun gb_connection_create_control(struct gb_interface *intf)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
239*4882a593Smuzhiyun 				     GB_CONNECTION_FLAG_CONTROL |
240*4882a593Smuzhiyun 				     GB_CONNECTION_FLAG_HIGH_PRIO);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun struct gb_connection *
gb_connection_create(struct gb_bundle * bundle,u16 cport_id,gb_request_handler_t handler)244*4882a593Smuzhiyun gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
245*4882a593Smuzhiyun 		     gb_request_handler_t handler)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct gb_interface *intf = bundle->intf;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
250*4882a593Smuzhiyun 				     handler, 0);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_create);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun struct gb_connection *
gb_connection_create_flags(struct gb_bundle * bundle,u16 cport_id,gb_request_handler_t handler,unsigned long flags)255*4882a593Smuzhiyun gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
256*4882a593Smuzhiyun 			   gb_request_handler_t handler,
257*4882a593Smuzhiyun 			   unsigned long flags)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct gb_interface *intf = bundle->intf;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
262*4882a593Smuzhiyun 		flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
265*4882a593Smuzhiyun 				     handler, flags);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_create_flags);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun struct gb_connection *
gb_connection_create_offloaded(struct gb_bundle * bundle,u16 cport_id,unsigned long flags)270*4882a593Smuzhiyun gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
271*4882a593Smuzhiyun 			       unsigned long flags)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	flags |= GB_CONNECTION_FLAG_OFFLOADED;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	return gb_connection_create_flags(bundle, cport_id, NULL, flags);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
278*4882a593Smuzhiyun 
gb_connection_hd_cport_enable(struct gb_connection * connection)279*4882a593Smuzhiyun static int gb_connection_hd_cport_enable(struct gb_connection *connection)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
282*4882a593Smuzhiyun 	int ret;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (!hd->driver->cport_enable)
285*4882a593Smuzhiyun 		return 0;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
288*4882a593Smuzhiyun 				       connection->flags);
289*4882a593Smuzhiyun 	if (ret) {
290*4882a593Smuzhiyun 		dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
291*4882a593Smuzhiyun 			connection->name, ret);
292*4882a593Smuzhiyun 		return ret;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	return 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
gb_connection_hd_cport_disable(struct gb_connection * connection)298*4882a593Smuzhiyun static void gb_connection_hd_cport_disable(struct gb_connection *connection)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
301*4882a593Smuzhiyun 	int ret;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	if (!hd->driver->cport_disable)
304*4882a593Smuzhiyun 		return;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
307*4882a593Smuzhiyun 	if (ret) {
308*4882a593Smuzhiyun 		dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
309*4882a593Smuzhiyun 			connection->name, ret);
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
gb_connection_hd_cport_connected(struct gb_connection * connection)313*4882a593Smuzhiyun static int gb_connection_hd_cport_connected(struct gb_connection *connection)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
316*4882a593Smuzhiyun 	int ret;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (!hd->driver->cport_connected)
319*4882a593Smuzhiyun 		return 0;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
322*4882a593Smuzhiyun 	if (ret) {
323*4882a593Smuzhiyun 		dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
324*4882a593Smuzhiyun 			connection->name, ret);
325*4882a593Smuzhiyun 		return ret;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return 0;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
gb_connection_hd_cport_flush(struct gb_connection * connection)331*4882a593Smuzhiyun static int gb_connection_hd_cport_flush(struct gb_connection *connection)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
334*4882a593Smuzhiyun 	int ret;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (!hd->driver->cport_flush)
337*4882a593Smuzhiyun 		return 0;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
340*4882a593Smuzhiyun 	if (ret) {
341*4882a593Smuzhiyun 		dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
342*4882a593Smuzhiyun 			connection->name, ret);
343*4882a593Smuzhiyun 		return ret;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
gb_connection_hd_cport_quiesce(struct gb_connection * connection)349*4882a593Smuzhiyun static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
352*4882a593Smuzhiyun 	size_t peer_space;
353*4882a593Smuzhiyun 	int ret;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (!hd->driver->cport_quiesce)
356*4882a593Smuzhiyun 		return 0;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	peer_space = sizeof(struct gb_operation_msg_hdr) +
359*4882a593Smuzhiyun 			sizeof(struct gb_cport_shutdown_request);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (connection->mode_switch)
362*4882a593Smuzhiyun 		peer_space += sizeof(struct gb_operation_msg_hdr);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
365*4882a593Smuzhiyun 					peer_space,
366*4882a593Smuzhiyun 					GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
367*4882a593Smuzhiyun 	if (ret) {
368*4882a593Smuzhiyun 		dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
369*4882a593Smuzhiyun 			connection->name, ret);
370*4882a593Smuzhiyun 		return ret;
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	return 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
gb_connection_hd_cport_clear(struct gb_connection * connection)376*4882a593Smuzhiyun static int gb_connection_hd_cport_clear(struct gb_connection *connection)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
379*4882a593Smuzhiyun 	int ret;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	if (!hd->driver->cport_clear)
382*4882a593Smuzhiyun 		return 0;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
385*4882a593Smuzhiyun 	if (ret) {
386*4882a593Smuzhiyun 		dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
387*4882a593Smuzhiyun 			connection->name, ret);
388*4882a593Smuzhiyun 		return ret;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun  * Request the SVC to create a connection from AP's cport to interface's
396*4882a593Smuzhiyun  * cport.
397*4882a593Smuzhiyun  */
398*4882a593Smuzhiyun static int
gb_connection_svc_connection_create(struct gb_connection * connection)399*4882a593Smuzhiyun gb_connection_svc_connection_create(struct gb_connection *connection)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
402*4882a593Smuzhiyun 	struct gb_interface *intf;
403*4882a593Smuzhiyun 	u8 cport_flags;
404*4882a593Smuzhiyun 	int ret;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	if (gb_connection_is_static(connection))
407*4882a593Smuzhiyun 		return 0;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	intf = connection->intf;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	/*
412*4882a593Smuzhiyun 	 * Enable either E2EFC or CSD, unless no flow control is requested.
413*4882a593Smuzhiyun 	 */
414*4882a593Smuzhiyun 	cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
415*4882a593Smuzhiyun 	if (gb_connection_flow_control_disabled(connection)) {
416*4882a593Smuzhiyun 		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
417*4882a593Smuzhiyun 	} else if (gb_connection_e2efc_enabled(connection)) {
418*4882a593Smuzhiyun 		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
419*4882a593Smuzhiyun 				GB_SVC_CPORT_FLAG_E2EFC;
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	ret = gb_svc_connection_create(hd->svc,
423*4882a593Smuzhiyun 				       hd->svc->ap_intf_id,
424*4882a593Smuzhiyun 				       connection->hd_cport_id,
425*4882a593Smuzhiyun 				       intf->interface_id,
426*4882a593Smuzhiyun 				       connection->intf_cport_id,
427*4882a593Smuzhiyun 				       cport_flags);
428*4882a593Smuzhiyun 	if (ret) {
429*4882a593Smuzhiyun 		dev_err(&connection->hd->dev,
430*4882a593Smuzhiyun 			"%s: failed to create svc connection: %d\n",
431*4882a593Smuzhiyun 			connection->name, ret);
432*4882a593Smuzhiyun 		return ret;
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	return 0;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun static void
gb_connection_svc_connection_destroy(struct gb_connection * connection)439*4882a593Smuzhiyun gb_connection_svc_connection_destroy(struct gb_connection *connection)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	if (gb_connection_is_static(connection))
442*4882a593Smuzhiyun 		return;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	gb_svc_connection_destroy(connection->hd->svc,
445*4882a593Smuzhiyun 				  connection->hd->svc->ap_intf_id,
446*4882a593Smuzhiyun 				  connection->hd_cport_id,
447*4882a593Smuzhiyun 				  connection->intf->interface_id,
448*4882a593Smuzhiyun 				  connection->intf_cport_id);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /* Inform Interface about active CPorts */
gb_connection_control_connected(struct gb_connection * connection)452*4882a593Smuzhiyun static int gb_connection_control_connected(struct gb_connection *connection)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct gb_control *control;
455*4882a593Smuzhiyun 	u16 cport_id = connection->intf_cport_id;
456*4882a593Smuzhiyun 	int ret;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (gb_connection_is_static(connection))
459*4882a593Smuzhiyun 		return 0;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	if (gb_connection_is_control(connection))
462*4882a593Smuzhiyun 		return 0;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	control = connection->intf->control;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	ret = gb_control_connected_operation(control, cport_id);
467*4882a593Smuzhiyun 	if (ret) {
468*4882a593Smuzhiyun 		dev_err(&connection->bundle->dev,
469*4882a593Smuzhiyun 			"failed to connect cport: %d\n", ret);
470*4882a593Smuzhiyun 		return ret;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	return 0;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun static void
gb_connection_control_disconnecting(struct gb_connection * connection)477*4882a593Smuzhiyun gb_connection_control_disconnecting(struct gb_connection *connection)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	struct gb_control *control;
480*4882a593Smuzhiyun 	u16 cport_id = connection->intf_cport_id;
481*4882a593Smuzhiyun 	int ret;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	if (gb_connection_is_static(connection))
484*4882a593Smuzhiyun 		return;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	control = connection->intf->control;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	ret = gb_control_disconnecting_operation(control, cport_id);
489*4882a593Smuzhiyun 	if (ret) {
490*4882a593Smuzhiyun 		dev_err(&connection->hd->dev,
491*4882a593Smuzhiyun 			"%s: failed to send disconnecting: %d\n",
492*4882a593Smuzhiyun 			connection->name, ret);
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun static void
gb_connection_control_disconnected(struct gb_connection * connection)497*4882a593Smuzhiyun gb_connection_control_disconnected(struct gb_connection *connection)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	struct gb_control *control;
500*4882a593Smuzhiyun 	u16 cport_id = connection->intf_cport_id;
501*4882a593Smuzhiyun 	int ret;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	if (gb_connection_is_static(connection))
504*4882a593Smuzhiyun 		return;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	control = connection->intf->control;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	if (gb_connection_is_control(connection)) {
509*4882a593Smuzhiyun 		if (connection->mode_switch) {
510*4882a593Smuzhiyun 			ret = gb_control_mode_switch_operation(control);
511*4882a593Smuzhiyun 			if (ret) {
512*4882a593Smuzhiyun 				/*
513*4882a593Smuzhiyun 				 * Allow mode switch to time out waiting for
514*4882a593Smuzhiyun 				 * mailbox event.
515*4882a593Smuzhiyun 				 */
516*4882a593Smuzhiyun 				return;
517*4882a593Smuzhiyun 			}
518*4882a593Smuzhiyun 		}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		return;
521*4882a593Smuzhiyun 	}
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	ret = gb_control_disconnected_operation(control, cport_id);
524*4882a593Smuzhiyun 	if (ret) {
525*4882a593Smuzhiyun 		dev_warn(&connection->bundle->dev,
526*4882a593Smuzhiyun 			 "failed to disconnect cport: %d\n", ret);
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
gb_connection_shutdown_operation(struct gb_connection * connection,u8 phase)530*4882a593Smuzhiyun static int gb_connection_shutdown_operation(struct gb_connection *connection,
531*4882a593Smuzhiyun 					    u8 phase)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	struct gb_cport_shutdown_request *req;
534*4882a593Smuzhiyun 	struct gb_operation *operation;
535*4882a593Smuzhiyun 	int ret;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	operation = gb_operation_create_core(connection,
538*4882a593Smuzhiyun 					     GB_REQUEST_TYPE_CPORT_SHUTDOWN,
539*4882a593Smuzhiyun 					     sizeof(*req), 0, 0,
540*4882a593Smuzhiyun 					     GFP_KERNEL);
541*4882a593Smuzhiyun 	if (!operation)
542*4882a593Smuzhiyun 		return -ENOMEM;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	req = operation->request->payload;
545*4882a593Smuzhiyun 	req->phase = phase;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	ret = gb_operation_request_send_sync(operation);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	gb_operation_put(operation);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	return ret;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
gb_connection_cport_shutdown(struct gb_connection * connection,u8 phase)554*4882a593Smuzhiyun static int gb_connection_cport_shutdown(struct gb_connection *connection,
555*4882a593Smuzhiyun 					u8 phase)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
558*4882a593Smuzhiyun 	const struct gb_hd_driver *drv = hd->driver;
559*4882a593Smuzhiyun 	int ret;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	if (gb_connection_is_static(connection))
562*4882a593Smuzhiyun 		return 0;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (gb_connection_is_offloaded(connection)) {
565*4882a593Smuzhiyun 		if (!drv->cport_shutdown)
566*4882a593Smuzhiyun 			return 0;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 		ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
569*4882a593Smuzhiyun 					  GB_OPERATION_TIMEOUT_DEFAULT);
570*4882a593Smuzhiyun 	} else {
571*4882a593Smuzhiyun 		ret = gb_connection_shutdown_operation(connection, phase);
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (ret) {
575*4882a593Smuzhiyun 		dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
576*4882a593Smuzhiyun 			connection->name, phase, ret);
577*4882a593Smuzhiyun 		return ret;
578*4882a593Smuzhiyun 	}
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	return 0;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun static int
gb_connection_cport_shutdown_phase_1(struct gb_connection * connection)584*4882a593Smuzhiyun gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	return gb_connection_cport_shutdown(connection, 1);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun static int
gb_connection_cport_shutdown_phase_2(struct gb_connection * connection)590*4882a593Smuzhiyun gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	return gb_connection_cport_shutdown(connection, 2);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun /*
596*4882a593Smuzhiyun  * Cancel all active operations on a connection.
597*4882a593Smuzhiyun  *
598*4882a593Smuzhiyun  * Locking: Called with connection lock held and state set to DISABLED or
599*4882a593Smuzhiyun  * DISCONNECTING.
600*4882a593Smuzhiyun  */
gb_connection_cancel_operations(struct gb_connection * connection,int errno)601*4882a593Smuzhiyun static void gb_connection_cancel_operations(struct gb_connection *connection,
602*4882a593Smuzhiyun 					    int errno)
603*4882a593Smuzhiyun 	__must_hold(&connection->lock)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun 	struct gb_operation *operation;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	while (!list_empty(&connection->operations)) {
608*4882a593Smuzhiyun 		operation = list_last_entry(&connection->operations,
609*4882a593Smuzhiyun 					    struct gb_operation, links);
610*4882a593Smuzhiyun 		gb_operation_get(operation);
611*4882a593Smuzhiyun 		spin_unlock_irq(&connection->lock);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 		if (gb_operation_is_incoming(operation))
614*4882a593Smuzhiyun 			gb_operation_cancel_incoming(operation, errno);
615*4882a593Smuzhiyun 		else
616*4882a593Smuzhiyun 			gb_operation_cancel(operation, errno);
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 		gb_operation_put(operation);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		spin_lock_irq(&connection->lock);
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun /*
625*4882a593Smuzhiyun  * Cancel all active incoming operations on a connection.
626*4882a593Smuzhiyun  *
627*4882a593Smuzhiyun  * Locking: Called with connection lock held and state set to ENABLED_TX.
628*4882a593Smuzhiyun  */
629*4882a593Smuzhiyun static void
gb_connection_flush_incoming_operations(struct gb_connection * connection,int errno)630*4882a593Smuzhiyun gb_connection_flush_incoming_operations(struct gb_connection *connection,
631*4882a593Smuzhiyun 					int errno)
632*4882a593Smuzhiyun 	__must_hold(&connection->lock)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	struct gb_operation *operation;
635*4882a593Smuzhiyun 	bool incoming;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	while (!list_empty(&connection->operations)) {
638*4882a593Smuzhiyun 		incoming = false;
639*4882a593Smuzhiyun 		list_for_each_entry(operation, &connection->operations,
640*4882a593Smuzhiyun 				    links) {
641*4882a593Smuzhiyun 			if (gb_operation_is_incoming(operation)) {
642*4882a593Smuzhiyun 				gb_operation_get(operation);
643*4882a593Smuzhiyun 				incoming = true;
644*4882a593Smuzhiyun 				break;
645*4882a593Smuzhiyun 			}
646*4882a593Smuzhiyun 		}
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 		if (!incoming)
649*4882a593Smuzhiyun 			break;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 		spin_unlock_irq(&connection->lock);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		/* FIXME: flush, not cancel? */
654*4882a593Smuzhiyun 		gb_operation_cancel_incoming(operation, errno);
655*4882a593Smuzhiyun 		gb_operation_put(operation);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 		spin_lock_irq(&connection->lock);
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun /*
662*4882a593Smuzhiyun  * _gb_connection_enable() - enable a connection
663*4882a593Smuzhiyun  * @connection:		connection to enable
664*4882a593Smuzhiyun  * @rx:			whether to enable incoming requests
665*4882a593Smuzhiyun  *
666*4882a593Smuzhiyun  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
667*4882a593Smuzhiyun  * ENABLED_TX->ENABLED state transitions.
668*4882a593Smuzhiyun  *
669*4882a593Smuzhiyun  * Locking: Caller holds connection->mutex.
670*4882a593Smuzhiyun  */
_gb_connection_enable(struct gb_connection * connection,bool rx)671*4882a593Smuzhiyun static int _gb_connection_enable(struct gb_connection *connection, bool rx)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	int ret;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	/* Handle ENABLED_TX -> ENABLED transitions. */
676*4882a593Smuzhiyun 	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
677*4882a593Smuzhiyun 		if (!(connection->handler && rx))
678*4882a593Smuzhiyun 			return 0;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 		spin_lock_irq(&connection->lock);
681*4882a593Smuzhiyun 		connection->state = GB_CONNECTION_STATE_ENABLED;
682*4882a593Smuzhiyun 		spin_unlock_irq(&connection->lock);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		return 0;
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	ret = gb_connection_hd_cport_enable(connection);
688*4882a593Smuzhiyun 	if (ret)
689*4882a593Smuzhiyun 		return ret;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	ret = gb_connection_svc_connection_create(connection);
692*4882a593Smuzhiyun 	if (ret)
693*4882a593Smuzhiyun 		goto err_hd_cport_clear;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	ret = gb_connection_hd_cport_connected(connection);
696*4882a593Smuzhiyun 	if (ret)
697*4882a593Smuzhiyun 		goto err_svc_connection_destroy;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	spin_lock_irq(&connection->lock);
700*4882a593Smuzhiyun 	if (connection->handler && rx)
701*4882a593Smuzhiyun 		connection->state = GB_CONNECTION_STATE_ENABLED;
702*4882a593Smuzhiyun 	else
703*4882a593Smuzhiyun 		connection->state = GB_CONNECTION_STATE_ENABLED_TX;
704*4882a593Smuzhiyun 	spin_unlock_irq(&connection->lock);
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	ret = gb_connection_control_connected(connection);
707*4882a593Smuzhiyun 	if (ret)
708*4882a593Smuzhiyun 		goto err_control_disconnecting;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	return 0;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun err_control_disconnecting:
713*4882a593Smuzhiyun 	spin_lock_irq(&connection->lock);
714*4882a593Smuzhiyun 	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
715*4882a593Smuzhiyun 	gb_connection_cancel_operations(connection, -ESHUTDOWN);
716*4882a593Smuzhiyun 	spin_unlock_irq(&connection->lock);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	/* Transmit queue should already be empty. */
719*4882a593Smuzhiyun 	gb_connection_hd_cport_flush(connection);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	gb_connection_control_disconnecting(connection);
722*4882a593Smuzhiyun 	gb_connection_cport_shutdown_phase_1(connection);
723*4882a593Smuzhiyun 	gb_connection_hd_cport_quiesce(connection);
724*4882a593Smuzhiyun 	gb_connection_cport_shutdown_phase_2(connection);
725*4882a593Smuzhiyun 	gb_connection_control_disconnected(connection);
726*4882a593Smuzhiyun 	connection->state = GB_CONNECTION_STATE_DISABLED;
727*4882a593Smuzhiyun err_svc_connection_destroy:
728*4882a593Smuzhiyun 	gb_connection_svc_connection_destroy(connection);
729*4882a593Smuzhiyun err_hd_cport_clear:
730*4882a593Smuzhiyun 	gb_connection_hd_cport_clear(connection);
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	gb_connection_hd_cport_disable(connection);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	return ret;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
gb_connection_enable(struct gb_connection * connection)737*4882a593Smuzhiyun int gb_connection_enable(struct gb_connection *connection)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	int ret = 0;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	mutex_lock(&connection->mutex);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	if (connection->state == GB_CONNECTION_STATE_ENABLED)
744*4882a593Smuzhiyun 		goto out_unlock;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	ret = _gb_connection_enable(connection, true);
747*4882a593Smuzhiyun 	if (!ret)
748*4882a593Smuzhiyun 		trace_gb_connection_enable(connection);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun out_unlock:
751*4882a593Smuzhiyun 	mutex_unlock(&connection->mutex);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	return ret;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_enable);
756*4882a593Smuzhiyun 
gb_connection_enable_tx(struct gb_connection * connection)757*4882a593Smuzhiyun int gb_connection_enable_tx(struct gb_connection *connection)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	int ret = 0;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	mutex_lock(&connection->mutex);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	if (connection->state == GB_CONNECTION_STATE_ENABLED) {
764*4882a593Smuzhiyun 		ret = -EINVAL;
765*4882a593Smuzhiyun 		goto out_unlock;
766*4882a593Smuzhiyun 	}
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
769*4882a593Smuzhiyun 		goto out_unlock;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	ret = _gb_connection_enable(connection, false);
772*4882a593Smuzhiyun 	if (!ret)
773*4882a593Smuzhiyun 		trace_gb_connection_enable(connection);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun out_unlock:
776*4882a593Smuzhiyun 	mutex_unlock(&connection->mutex);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	return ret;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
781*4882a593Smuzhiyun 
gb_connection_disable_rx(struct gb_connection * connection)782*4882a593Smuzhiyun void gb_connection_disable_rx(struct gb_connection *connection)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun 	mutex_lock(&connection->mutex);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	spin_lock_irq(&connection->lock);
787*4882a593Smuzhiyun 	if (connection->state != GB_CONNECTION_STATE_ENABLED) {
788*4882a593Smuzhiyun 		spin_unlock_irq(&connection->lock);
789*4882a593Smuzhiyun 		goto out_unlock;
790*4882a593Smuzhiyun 	}
791*4882a593Smuzhiyun 	connection->state = GB_CONNECTION_STATE_ENABLED_TX;
792*4882a593Smuzhiyun 	gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
793*4882a593Smuzhiyun 	spin_unlock_irq(&connection->lock);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	trace_gb_connection_disable(connection);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun out_unlock:
798*4882a593Smuzhiyun 	mutex_unlock(&connection->mutex);
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
801*4882a593Smuzhiyun 
gb_connection_mode_switch_prepare(struct gb_connection * connection)802*4882a593Smuzhiyun void gb_connection_mode_switch_prepare(struct gb_connection *connection)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun 	connection->mode_switch = true;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun 
gb_connection_mode_switch_complete(struct gb_connection * connection)807*4882a593Smuzhiyun void gb_connection_mode_switch_complete(struct gb_connection *connection)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	gb_connection_svc_connection_destroy(connection);
810*4882a593Smuzhiyun 	gb_connection_hd_cport_clear(connection);
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	gb_connection_hd_cport_disable(connection);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	connection->mode_switch = false;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
gb_connection_disable(struct gb_connection * connection)817*4882a593Smuzhiyun void gb_connection_disable(struct gb_connection *connection)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun 	mutex_lock(&connection->mutex);
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	if (connection->state == GB_CONNECTION_STATE_DISABLED)
822*4882a593Smuzhiyun 		goto out_unlock;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	trace_gb_connection_disable(connection);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	spin_lock_irq(&connection->lock);
827*4882a593Smuzhiyun 	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
828*4882a593Smuzhiyun 	gb_connection_cancel_operations(connection, -ESHUTDOWN);
829*4882a593Smuzhiyun 	spin_unlock_irq(&connection->lock);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	gb_connection_hd_cport_flush(connection);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	gb_connection_control_disconnecting(connection);
834*4882a593Smuzhiyun 	gb_connection_cport_shutdown_phase_1(connection);
835*4882a593Smuzhiyun 	gb_connection_hd_cport_quiesce(connection);
836*4882a593Smuzhiyun 	gb_connection_cport_shutdown_phase_2(connection);
837*4882a593Smuzhiyun 	gb_connection_control_disconnected(connection);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	connection->state = GB_CONNECTION_STATE_DISABLED;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	/* control-connection tear down is deferred when mode switching */
842*4882a593Smuzhiyun 	if (!connection->mode_switch) {
843*4882a593Smuzhiyun 		gb_connection_svc_connection_destroy(connection);
844*4882a593Smuzhiyun 		gb_connection_hd_cport_clear(connection);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		gb_connection_hd_cport_disable(connection);
847*4882a593Smuzhiyun 	}
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun out_unlock:
850*4882a593Smuzhiyun 	mutex_unlock(&connection->mutex);
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_disable);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun /* Disable a connection without communicating with the remote end. */
gb_connection_disable_forced(struct gb_connection * connection)855*4882a593Smuzhiyun void gb_connection_disable_forced(struct gb_connection *connection)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun 	mutex_lock(&connection->mutex);
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	if (connection->state == GB_CONNECTION_STATE_DISABLED)
860*4882a593Smuzhiyun 		goto out_unlock;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	trace_gb_connection_disable(connection);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	spin_lock_irq(&connection->lock);
865*4882a593Smuzhiyun 	connection->state = GB_CONNECTION_STATE_DISABLED;
866*4882a593Smuzhiyun 	gb_connection_cancel_operations(connection, -ESHUTDOWN);
867*4882a593Smuzhiyun 	spin_unlock_irq(&connection->lock);
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	gb_connection_hd_cport_flush(connection);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	gb_connection_svc_connection_destroy(connection);
872*4882a593Smuzhiyun 	gb_connection_hd_cport_clear(connection);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	gb_connection_hd_cport_disable(connection);
875*4882a593Smuzhiyun out_unlock:
876*4882a593Smuzhiyun 	mutex_unlock(&connection->mutex);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun /* Caller must have disabled the connection before destroying it. */
gb_connection_destroy(struct gb_connection * connection)881*4882a593Smuzhiyun void gb_connection_destroy(struct gb_connection *connection)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun 	if (!connection)
884*4882a593Smuzhiyun 		return;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
887*4882a593Smuzhiyun 		gb_connection_disable(connection);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	mutex_lock(&gb_connection_mutex);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	spin_lock_irq(&gb_connections_lock);
892*4882a593Smuzhiyun 	list_del(&connection->bundle_links);
893*4882a593Smuzhiyun 	list_del(&connection->hd_links);
894*4882a593Smuzhiyun 	spin_unlock_irq(&gb_connections_lock);
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	destroy_workqueue(connection->wq);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	gb_hd_cport_release(connection->hd, connection->hd_cport_id);
899*4882a593Smuzhiyun 	connection->hd_cport_id = CPORT_ID_BAD;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	mutex_unlock(&gb_connection_mutex);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	gb_connection_put(connection);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_destroy);
906*4882a593Smuzhiyun 
gb_connection_latency_tag_enable(struct gb_connection * connection)907*4882a593Smuzhiyun void gb_connection_latency_tag_enable(struct gb_connection *connection)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
910*4882a593Smuzhiyun 	int ret;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	if (!hd->driver->latency_tag_enable)
913*4882a593Smuzhiyun 		return;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
916*4882a593Smuzhiyun 	if (ret) {
917*4882a593Smuzhiyun 		dev_err(&connection->hd->dev,
918*4882a593Smuzhiyun 			"%s: failed to enable latency tag: %d\n",
919*4882a593Smuzhiyun 			connection->name, ret);
920*4882a593Smuzhiyun 	}
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
923*4882a593Smuzhiyun 
gb_connection_latency_tag_disable(struct gb_connection * connection)924*4882a593Smuzhiyun void gb_connection_latency_tag_disable(struct gb_connection *connection)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun 	struct gb_host_device *hd = connection->hd;
927*4882a593Smuzhiyun 	int ret;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	if (!hd->driver->latency_tag_disable)
930*4882a593Smuzhiyun 		return;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
933*4882a593Smuzhiyun 	if (ret) {
934*4882a593Smuzhiyun 		dev_err(&connection->hd->dev,
935*4882a593Smuzhiyun 			"%s: failed to disable latency tag: %d\n",
936*4882a593Smuzhiyun 			connection->name, ret);
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
940