xref: /OK3568_Linux_fs/kernel/drivers/input/serio/serio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  The Serio abstraction module
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (c) 1999-2004 Vojtech Pavlik
6*4882a593Smuzhiyun  *  Copyright (c) 2004 Dmitry Torokhov
7*4882a593Smuzhiyun  *  Copyright (c) 2003 Daniele Bellucci
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/stddef.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/serio.h>
18*4882a593Smuzhiyun #include <linux/errno.h>
19*4882a593Smuzhiyun #include <linux/sched.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun #include <linux/workqueue.h>
22*4882a593Smuzhiyun #include <linux/mutex.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
25*4882a593Smuzhiyun MODULE_DESCRIPTION("Serio abstraction core");
26*4882a593Smuzhiyun MODULE_LICENSE("GPL");
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * serio_mutex protects entire serio subsystem and is taken every time
30*4882a593Smuzhiyun  * serio port or driver registered or unregistered.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun static DEFINE_MUTEX(serio_mutex);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static LIST_HEAD(serio_list);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static void serio_add_port(struct serio *serio);
37*4882a593Smuzhiyun static int serio_reconnect_port(struct serio *serio);
38*4882a593Smuzhiyun static void serio_disconnect_port(struct serio *serio);
39*4882a593Smuzhiyun static void serio_reconnect_subtree(struct serio *serio);
40*4882a593Smuzhiyun static void serio_attach_driver(struct serio_driver *drv);
41*4882a593Smuzhiyun 
serio_connect_driver(struct serio * serio,struct serio_driver * drv)42*4882a593Smuzhiyun static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	int retval;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	mutex_lock(&serio->drv_mutex);
47*4882a593Smuzhiyun 	retval = drv->connect(serio, drv);
48*4882a593Smuzhiyun 	mutex_unlock(&serio->drv_mutex);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	return retval;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
serio_reconnect_driver(struct serio * serio)53*4882a593Smuzhiyun static int serio_reconnect_driver(struct serio *serio)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	int retval = -1;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	mutex_lock(&serio->drv_mutex);
58*4882a593Smuzhiyun 	if (serio->drv && serio->drv->reconnect)
59*4882a593Smuzhiyun 		retval = serio->drv->reconnect(serio);
60*4882a593Smuzhiyun 	mutex_unlock(&serio->drv_mutex);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	return retval;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
serio_disconnect_driver(struct serio * serio)65*4882a593Smuzhiyun static void serio_disconnect_driver(struct serio *serio)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	mutex_lock(&serio->drv_mutex);
68*4882a593Smuzhiyun 	if (serio->drv)
69*4882a593Smuzhiyun 		serio->drv->disconnect(serio);
70*4882a593Smuzhiyun 	mutex_unlock(&serio->drv_mutex);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
serio_match_port(const struct serio_device_id * ids,struct serio * serio)73*4882a593Smuzhiyun static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	while (ids->type || ids->proto) {
76*4882a593Smuzhiyun 		if ((ids->type == SERIO_ANY || ids->type == serio->id.type) &&
77*4882a593Smuzhiyun 		    (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) &&
78*4882a593Smuzhiyun 		    (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) &&
79*4882a593Smuzhiyun 		    (ids->id == SERIO_ANY || ids->id == serio->id.id))
80*4882a593Smuzhiyun 			return 1;
81*4882a593Smuzhiyun 		ids++;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 	return 0;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun  * Basic serio -> driver core mappings
88*4882a593Smuzhiyun  */
89*4882a593Smuzhiyun 
serio_bind_driver(struct serio * serio,struct serio_driver * drv)90*4882a593Smuzhiyun static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	int error;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (serio_match_port(drv->id_table, serio)) {
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 		serio->dev.driver = &drv->driver;
97*4882a593Smuzhiyun 		if (serio_connect_driver(serio, drv)) {
98*4882a593Smuzhiyun 			serio->dev.driver = NULL;
99*4882a593Smuzhiyun 			return -ENODEV;
100*4882a593Smuzhiyun 		}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 		error = device_bind_driver(&serio->dev);
103*4882a593Smuzhiyun 		if (error) {
104*4882a593Smuzhiyun 			dev_warn(&serio->dev,
105*4882a593Smuzhiyun 				 "device_bind_driver() failed for %s (%s) and %s, error: %d\n",
106*4882a593Smuzhiyun 				 serio->phys, serio->name,
107*4882a593Smuzhiyun 				 drv->description, error);
108*4882a593Smuzhiyun 			serio_disconnect_driver(serio);
109*4882a593Smuzhiyun 			serio->dev.driver = NULL;
110*4882a593Smuzhiyun 			return error;
111*4882a593Smuzhiyun 		}
112*4882a593Smuzhiyun 	}
113*4882a593Smuzhiyun 	return 0;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
serio_find_driver(struct serio * serio)116*4882a593Smuzhiyun static void serio_find_driver(struct serio *serio)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	int error;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	error = device_attach(&serio->dev);
121*4882a593Smuzhiyun 	if (error < 0 && error != -EPROBE_DEFER)
122*4882a593Smuzhiyun 		dev_warn(&serio->dev,
123*4882a593Smuzhiyun 			 "device_attach() failed for %s (%s), error: %d\n",
124*4882a593Smuzhiyun 			 serio->phys, serio->name, error);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun  * Serio event processing.
130*4882a593Smuzhiyun  */
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun enum serio_event_type {
133*4882a593Smuzhiyun 	SERIO_RESCAN_PORT,
134*4882a593Smuzhiyun 	SERIO_RECONNECT_PORT,
135*4882a593Smuzhiyun 	SERIO_RECONNECT_SUBTREE,
136*4882a593Smuzhiyun 	SERIO_REGISTER_PORT,
137*4882a593Smuzhiyun 	SERIO_ATTACH_DRIVER,
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun struct serio_event {
141*4882a593Smuzhiyun 	enum serio_event_type type;
142*4882a593Smuzhiyun 	void *object;
143*4882a593Smuzhiyun 	struct module *owner;
144*4882a593Smuzhiyun 	struct list_head node;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun static DEFINE_SPINLOCK(serio_event_lock);	/* protects serio_event_list */
148*4882a593Smuzhiyun static LIST_HEAD(serio_event_list);
149*4882a593Smuzhiyun 
serio_get_event(void)150*4882a593Smuzhiyun static struct serio_event *serio_get_event(void)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct serio_event *event = NULL;
153*4882a593Smuzhiyun 	unsigned long flags;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	spin_lock_irqsave(&serio_event_lock, flags);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (!list_empty(&serio_event_list)) {
158*4882a593Smuzhiyun 		event = list_first_entry(&serio_event_list,
159*4882a593Smuzhiyun 					 struct serio_event, node);
160*4882a593Smuzhiyun 		list_del_init(&event->node);
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	spin_unlock_irqrestore(&serio_event_lock, flags);
164*4882a593Smuzhiyun 	return event;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
serio_free_event(struct serio_event * event)167*4882a593Smuzhiyun static void serio_free_event(struct serio_event *event)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	module_put(event->owner);
170*4882a593Smuzhiyun 	kfree(event);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
serio_remove_duplicate_events(void * object,enum serio_event_type type)173*4882a593Smuzhiyun static void serio_remove_duplicate_events(void *object,
174*4882a593Smuzhiyun 					  enum serio_event_type type)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct serio_event *e, *next;
177*4882a593Smuzhiyun 	unsigned long flags;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	spin_lock_irqsave(&serio_event_lock, flags);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	list_for_each_entry_safe(e, next, &serio_event_list, node) {
182*4882a593Smuzhiyun 		if (object == e->object) {
183*4882a593Smuzhiyun 			/*
184*4882a593Smuzhiyun 			 * If this event is of different type we should not
185*4882a593Smuzhiyun 			 * look further - we only suppress duplicate events
186*4882a593Smuzhiyun 			 * that were sent back-to-back.
187*4882a593Smuzhiyun 			 */
188*4882a593Smuzhiyun 			if (type != e->type)
189*4882a593Smuzhiyun 				break;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 			list_del_init(&e->node);
192*4882a593Smuzhiyun 			serio_free_event(e);
193*4882a593Smuzhiyun 		}
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	spin_unlock_irqrestore(&serio_event_lock, flags);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
serio_handle_event(struct work_struct * work)199*4882a593Smuzhiyun static void serio_handle_event(struct work_struct *work)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct serio_event *event;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	mutex_lock(&serio_mutex);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	while ((event = serio_get_event())) {
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		switch (event->type) {
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		case SERIO_REGISTER_PORT:
210*4882a593Smuzhiyun 			serio_add_port(event->object);
211*4882a593Smuzhiyun 			break;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		case SERIO_RECONNECT_PORT:
214*4882a593Smuzhiyun 			serio_reconnect_port(event->object);
215*4882a593Smuzhiyun 			break;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		case SERIO_RESCAN_PORT:
218*4882a593Smuzhiyun 			serio_disconnect_port(event->object);
219*4882a593Smuzhiyun 			serio_find_driver(event->object);
220*4882a593Smuzhiyun 			break;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		case SERIO_RECONNECT_SUBTREE:
223*4882a593Smuzhiyun 			serio_reconnect_subtree(event->object);
224*4882a593Smuzhiyun 			break;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 		case SERIO_ATTACH_DRIVER:
227*4882a593Smuzhiyun 			serio_attach_driver(event->object);
228*4882a593Smuzhiyun 			break;
229*4882a593Smuzhiyun 		}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		serio_remove_duplicate_events(event->object, event->type);
232*4882a593Smuzhiyun 		serio_free_event(event);
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	mutex_unlock(&serio_mutex);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun static DECLARE_WORK(serio_event_work, serio_handle_event);
239*4882a593Smuzhiyun 
serio_queue_event(void * object,struct module * owner,enum serio_event_type event_type)240*4882a593Smuzhiyun static int serio_queue_event(void *object, struct module *owner,
241*4882a593Smuzhiyun 			     enum serio_event_type event_type)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	unsigned long flags;
244*4882a593Smuzhiyun 	struct serio_event *event;
245*4882a593Smuzhiyun 	int retval = 0;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	spin_lock_irqsave(&serio_event_lock, flags);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/*
250*4882a593Smuzhiyun 	 * Scan event list for the other events for the same serio port,
251*4882a593Smuzhiyun 	 * starting with the most recent one. If event is the same we
252*4882a593Smuzhiyun 	 * do not need add new one. If event is of different type we
253*4882a593Smuzhiyun 	 * need to add this event and should not look further because
254*4882a593Smuzhiyun 	 * we need to preseve sequence of distinct events.
255*4882a593Smuzhiyun 	 */
256*4882a593Smuzhiyun 	list_for_each_entry_reverse(event, &serio_event_list, node) {
257*4882a593Smuzhiyun 		if (event->object == object) {
258*4882a593Smuzhiyun 			if (event->type == event_type)
259*4882a593Smuzhiyun 				goto out;
260*4882a593Smuzhiyun 			break;
261*4882a593Smuzhiyun 		}
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
265*4882a593Smuzhiyun 	if (!event) {
266*4882a593Smuzhiyun 		pr_err("Not enough memory to queue event %d\n", event_type);
267*4882a593Smuzhiyun 		retval = -ENOMEM;
268*4882a593Smuzhiyun 		goto out;
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	if (!try_module_get(owner)) {
272*4882a593Smuzhiyun 		pr_warn("Can't get module reference, dropping event %d\n",
273*4882a593Smuzhiyun 			event_type);
274*4882a593Smuzhiyun 		kfree(event);
275*4882a593Smuzhiyun 		retval = -EINVAL;
276*4882a593Smuzhiyun 		goto out;
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	event->type = event_type;
280*4882a593Smuzhiyun 	event->object = object;
281*4882a593Smuzhiyun 	event->owner = owner;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	list_add_tail(&event->node, &serio_event_list);
284*4882a593Smuzhiyun 	queue_work(system_long_wq, &serio_event_work);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun out:
287*4882a593Smuzhiyun 	spin_unlock_irqrestore(&serio_event_lock, flags);
288*4882a593Smuzhiyun 	return retval;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun  * Remove all events that have been submitted for a given
293*4882a593Smuzhiyun  * object, be it serio port or driver.
294*4882a593Smuzhiyun  */
serio_remove_pending_events(void * object)295*4882a593Smuzhiyun static void serio_remove_pending_events(void *object)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct serio_event *event, *next;
298*4882a593Smuzhiyun 	unsigned long flags;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	spin_lock_irqsave(&serio_event_lock, flags);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	list_for_each_entry_safe(event, next, &serio_event_list, node) {
303*4882a593Smuzhiyun 		if (event->object == object) {
304*4882a593Smuzhiyun 			list_del_init(&event->node);
305*4882a593Smuzhiyun 			serio_free_event(event);
306*4882a593Smuzhiyun 		}
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	spin_unlock_irqrestore(&serio_event_lock, flags);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun  * Locate child serio port (if any) that has not been fully registered yet.
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  * Children are registered by driver's connect() handler so there can't be a
316*4882a593Smuzhiyun  * grandchild pending registration together with a child.
317*4882a593Smuzhiyun  */
serio_get_pending_child(struct serio * parent)318*4882a593Smuzhiyun static struct serio *serio_get_pending_child(struct serio *parent)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	struct serio_event *event;
321*4882a593Smuzhiyun 	struct serio *serio, *child = NULL;
322*4882a593Smuzhiyun 	unsigned long flags;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	spin_lock_irqsave(&serio_event_lock, flags);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	list_for_each_entry(event, &serio_event_list, node) {
327*4882a593Smuzhiyun 		if (event->type == SERIO_REGISTER_PORT) {
328*4882a593Smuzhiyun 			serio = event->object;
329*4882a593Smuzhiyun 			if (serio->parent == parent) {
330*4882a593Smuzhiyun 				child = serio;
331*4882a593Smuzhiyun 				break;
332*4882a593Smuzhiyun 			}
333*4882a593Smuzhiyun 		}
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	spin_unlock_irqrestore(&serio_event_lock, flags);
337*4882a593Smuzhiyun 	return child;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun  * Serio port operations
342*4882a593Smuzhiyun  */
343*4882a593Smuzhiyun 
serio_show_description(struct device * dev,struct device_attribute * attr,char * buf)344*4882a593Smuzhiyun static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
347*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", serio->name);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)350*4882a593Smuzhiyun static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n",
355*4882a593Smuzhiyun 			serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
type_show(struct device * dev,struct device_attribute * attr,char * buf)358*4882a593Smuzhiyun static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
361*4882a593Smuzhiyun 	return sprintf(buf, "%02x\n", serio->id.type);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
proto_show(struct device * dev,struct device_attribute * attr,char * buf)364*4882a593Smuzhiyun static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
367*4882a593Smuzhiyun 	return sprintf(buf, "%02x\n", serio->id.proto);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
id_show(struct device * dev,struct device_attribute * attr,char * buf)370*4882a593Smuzhiyun static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
373*4882a593Smuzhiyun 	return sprintf(buf, "%02x\n", serio->id.id);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
extra_show(struct device * dev,struct device_attribute * attr,char * buf)376*4882a593Smuzhiyun static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
379*4882a593Smuzhiyun 	return sprintf(buf, "%02x\n", serio->id.extra);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
drvctl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)382*4882a593Smuzhiyun static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
385*4882a593Smuzhiyun 	struct device_driver *drv;
386*4882a593Smuzhiyun 	int error;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	error = mutex_lock_interruptible(&serio_mutex);
389*4882a593Smuzhiyun 	if (error)
390*4882a593Smuzhiyun 		return error;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (!strncmp(buf, "none", count)) {
393*4882a593Smuzhiyun 		serio_disconnect_port(serio);
394*4882a593Smuzhiyun 	} else if (!strncmp(buf, "reconnect", count)) {
395*4882a593Smuzhiyun 		serio_reconnect_subtree(serio);
396*4882a593Smuzhiyun 	} else if (!strncmp(buf, "rescan", count)) {
397*4882a593Smuzhiyun 		serio_disconnect_port(serio);
398*4882a593Smuzhiyun 		serio_find_driver(serio);
399*4882a593Smuzhiyun 		serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
400*4882a593Smuzhiyun 	} else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
401*4882a593Smuzhiyun 		serio_disconnect_port(serio);
402*4882a593Smuzhiyun 		error = serio_bind_driver(serio, to_serio_driver(drv));
403*4882a593Smuzhiyun 		serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
404*4882a593Smuzhiyun 	} else {
405*4882a593Smuzhiyun 		error = -EINVAL;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	mutex_unlock(&serio_mutex);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	return error ? error : count;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
serio_show_bind_mode(struct device * dev,struct device_attribute * attr,char * buf)413*4882a593Smuzhiyun static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
416*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto");
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
serio_set_bind_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)419*4882a593Smuzhiyun static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
422*4882a593Smuzhiyun 	int retval;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	retval = count;
425*4882a593Smuzhiyun 	if (!strncmp(buf, "manual", count)) {
426*4882a593Smuzhiyun 		serio->manual_bind = true;
427*4882a593Smuzhiyun 	} else if (!strncmp(buf, "auto", count)) {
428*4882a593Smuzhiyun 		serio->manual_bind = false;
429*4882a593Smuzhiyun 	} else {
430*4882a593Smuzhiyun 		retval = -EINVAL;
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	return retval;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
firmware_id_show(struct device * dev,struct device_attribute * attr,char * buf)436*4882a593Smuzhiyun static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", serio->firmware_id);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun static DEVICE_ATTR_RO(type);
444*4882a593Smuzhiyun static DEVICE_ATTR_RO(proto);
445*4882a593Smuzhiyun static DEVICE_ATTR_RO(id);
446*4882a593Smuzhiyun static DEVICE_ATTR_RO(extra);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun static struct attribute *serio_device_id_attrs[] = {
449*4882a593Smuzhiyun 	&dev_attr_type.attr,
450*4882a593Smuzhiyun 	&dev_attr_proto.attr,
451*4882a593Smuzhiyun 	&dev_attr_id.attr,
452*4882a593Smuzhiyun 	&dev_attr_extra.attr,
453*4882a593Smuzhiyun 	NULL
454*4882a593Smuzhiyun };
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun static const struct attribute_group serio_id_attr_group = {
457*4882a593Smuzhiyun 	.name	= "id",
458*4882a593Smuzhiyun 	.attrs	= serio_device_id_attrs,
459*4882a593Smuzhiyun };
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun static DEVICE_ATTR_RO(modalias);
462*4882a593Smuzhiyun static DEVICE_ATTR_WO(drvctl);
463*4882a593Smuzhiyun static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
464*4882a593Smuzhiyun static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
465*4882a593Smuzhiyun static DEVICE_ATTR_RO(firmware_id);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun static struct attribute *serio_device_attrs[] = {
468*4882a593Smuzhiyun 	&dev_attr_modalias.attr,
469*4882a593Smuzhiyun 	&dev_attr_description.attr,
470*4882a593Smuzhiyun 	&dev_attr_drvctl.attr,
471*4882a593Smuzhiyun 	&dev_attr_bind_mode.attr,
472*4882a593Smuzhiyun 	&dev_attr_firmware_id.attr,
473*4882a593Smuzhiyun 	NULL
474*4882a593Smuzhiyun };
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun static const struct attribute_group serio_device_attr_group = {
477*4882a593Smuzhiyun 	.attrs	= serio_device_attrs,
478*4882a593Smuzhiyun };
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun static const struct attribute_group *serio_device_attr_groups[] = {
481*4882a593Smuzhiyun 	&serio_id_attr_group,
482*4882a593Smuzhiyun 	&serio_device_attr_group,
483*4882a593Smuzhiyun 	NULL
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun 
serio_release_port(struct device * dev)486*4882a593Smuzhiyun static void serio_release_port(struct device *dev)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	kfree(serio);
491*4882a593Smuzhiyun 	module_put(THIS_MODULE);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun /*
495*4882a593Smuzhiyun  * Prepare serio port for registration.
496*4882a593Smuzhiyun  */
serio_init_port(struct serio * serio)497*4882a593Smuzhiyun static void serio_init_port(struct serio *serio)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	static atomic_t serio_no = ATOMIC_INIT(-1);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	__module_get(THIS_MODULE);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	INIT_LIST_HEAD(&serio->node);
504*4882a593Smuzhiyun 	INIT_LIST_HEAD(&serio->child_node);
505*4882a593Smuzhiyun 	INIT_LIST_HEAD(&serio->children);
506*4882a593Smuzhiyun 	spin_lock_init(&serio->lock);
507*4882a593Smuzhiyun 	mutex_init(&serio->drv_mutex);
508*4882a593Smuzhiyun 	device_initialize(&serio->dev);
509*4882a593Smuzhiyun 	dev_set_name(&serio->dev, "serio%lu",
510*4882a593Smuzhiyun 		     (unsigned long)atomic_inc_return(&serio_no));
511*4882a593Smuzhiyun 	serio->dev.bus = &serio_bus;
512*4882a593Smuzhiyun 	serio->dev.release = serio_release_port;
513*4882a593Smuzhiyun 	serio->dev.groups = serio_device_attr_groups;
514*4882a593Smuzhiyun 	if (serio->parent) {
515*4882a593Smuzhiyun 		serio->dev.parent = &serio->parent->dev;
516*4882a593Smuzhiyun 		serio->depth = serio->parent->depth + 1;
517*4882a593Smuzhiyun 	} else
518*4882a593Smuzhiyun 		serio->depth = 0;
519*4882a593Smuzhiyun 	lockdep_set_subclass(&serio->lock, serio->depth);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun /*
523*4882a593Smuzhiyun  * Complete serio port registration.
524*4882a593Smuzhiyun  * Driver core will attempt to find appropriate driver for the port.
525*4882a593Smuzhiyun  */
serio_add_port(struct serio * serio)526*4882a593Smuzhiyun static void serio_add_port(struct serio *serio)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	struct serio *parent = serio->parent;
529*4882a593Smuzhiyun 	int error;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	if (parent) {
532*4882a593Smuzhiyun 		serio_pause_rx(parent);
533*4882a593Smuzhiyun 		list_add_tail(&serio->child_node, &parent->children);
534*4882a593Smuzhiyun 		serio_continue_rx(parent);
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	list_add_tail(&serio->node, &serio_list);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	if (serio->start)
540*4882a593Smuzhiyun 		serio->start(serio);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	error = device_add(&serio->dev);
543*4882a593Smuzhiyun 	if (error)
544*4882a593Smuzhiyun 		dev_err(&serio->dev,
545*4882a593Smuzhiyun 			"device_add() failed for %s (%s), error: %d\n",
546*4882a593Smuzhiyun 			serio->phys, serio->name, error);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun  * serio_destroy_port() completes unregistration process and removes
551*4882a593Smuzhiyun  * port from the system
552*4882a593Smuzhiyun  */
serio_destroy_port(struct serio * serio)553*4882a593Smuzhiyun static void serio_destroy_port(struct serio *serio)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	struct serio *child;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	while ((child = serio_get_pending_child(serio)) != NULL) {
558*4882a593Smuzhiyun 		serio_remove_pending_events(child);
559*4882a593Smuzhiyun 		put_device(&child->dev);
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	if (serio->stop)
563*4882a593Smuzhiyun 		serio->stop(serio);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (serio->parent) {
566*4882a593Smuzhiyun 		serio_pause_rx(serio->parent);
567*4882a593Smuzhiyun 		list_del_init(&serio->child_node);
568*4882a593Smuzhiyun 		serio_continue_rx(serio->parent);
569*4882a593Smuzhiyun 		serio->parent = NULL;
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	if (device_is_registered(&serio->dev))
573*4882a593Smuzhiyun 		device_del(&serio->dev);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	list_del_init(&serio->node);
576*4882a593Smuzhiyun 	serio_remove_pending_events(serio);
577*4882a593Smuzhiyun 	put_device(&serio->dev);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun  * Reconnect serio port (re-initialize attached device).
582*4882a593Smuzhiyun  * If reconnect fails (old device is no longer attached or
583*4882a593Smuzhiyun  * there was no device to begin with) we do full rescan in
584*4882a593Smuzhiyun  * hope of finding a driver for the port.
585*4882a593Smuzhiyun  */
serio_reconnect_port(struct serio * serio)586*4882a593Smuzhiyun static int serio_reconnect_port(struct serio *serio)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun 	int error = serio_reconnect_driver(serio);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	if (error) {
591*4882a593Smuzhiyun 		serio_disconnect_port(serio);
592*4882a593Smuzhiyun 		serio_find_driver(serio);
593*4882a593Smuzhiyun 	}
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	return error;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun  * Reconnect serio port and all its children (re-initialize attached
600*4882a593Smuzhiyun  * devices).
601*4882a593Smuzhiyun  */
serio_reconnect_subtree(struct serio * root)602*4882a593Smuzhiyun static void serio_reconnect_subtree(struct serio *root)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct serio *s = root;
605*4882a593Smuzhiyun 	int error;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	do {
608*4882a593Smuzhiyun 		error = serio_reconnect_port(s);
609*4882a593Smuzhiyun 		if (!error) {
610*4882a593Smuzhiyun 			/*
611*4882a593Smuzhiyun 			 * Reconnect was successful, move on to do the
612*4882a593Smuzhiyun 			 * first child.
613*4882a593Smuzhiyun 			 */
614*4882a593Smuzhiyun 			if (!list_empty(&s->children)) {
615*4882a593Smuzhiyun 				s = list_first_entry(&s->children,
616*4882a593Smuzhiyun 						     struct serio, child_node);
617*4882a593Smuzhiyun 				continue;
618*4882a593Smuzhiyun 			}
619*4882a593Smuzhiyun 		}
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 		/*
622*4882a593Smuzhiyun 		 * Either it was a leaf node or reconnect failed and it
623*4882a593Smuzhiyun 		 * became a leaf node. Continue reconnecting starting with
624*4882a593Smuzhiyun 		 * the next sibling of the parent node.
625*4882a593Smuzhiyun 		 */
626*4882a593Smuzhiyun 		while (s != root) {
627*4882a593Smuzhiyun 			struct serio *parent = s->parent;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 			if (!list_is_last(&s->child_node, &parent->children)) {
630*4882a593Smuzhiyun 				s = list_entry(s->child_node.next,
631*4882a593Smuzhiyun 					       struct serio, child_node);
632*4882a593Smuzhiyun 				break;
633*4882a593Smuzhiyun 			}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 			s = parent;
636*4882a593Smuzhiyun 		}
637*4882a593Smuzhiyun 	} while (s != root);
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun /*
641*4882a593Smuzhiyun  * serio_disconnect_port() unbinds a port from its driver. As a side effect
642*4882a593Smuzhiyun  * all children ports are unbound and destroyed.
643*4882a593Smuzhiyun  */
serio_disconnect_port(struct serio * serio)644*4882a593Smuzhiyun static void serio_disconnect_port(struct serio *serio)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	struct serio *s = serio;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	/*
649*4882a593Smuzhiyun 	 * Children ports should be disconnected and destroyed
650*4882a593Smuzhiyun 	 * first; we travel the tree in depth-first order.
651*4882a593Smuzhiyun 	 */
652*4882a593Smuzhiyun 	while (!list_empty(&serio->children)) {
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 		/* Locate a leaf */
655*4882a593Smuzhiyun 		while (!list_empty(&s->children))
656*4882a593Smuzhiyun 			s = list_first_entry(&s->children,
657*4882a593Smuzhiyun 					     struct serio, child_node);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 		/*
660*4882a593Smuzhiyun 		 * Prune this leaf node unless it is the one we
661*4882a593Smuzhiyun 		 * started with.
662*4882a593Smuzhiyun 		 */
663*4882a593Smuzhiyun 		if (s != serio) {
664*4882a593Smuzhiyun 			struct serio *parent = s->parent;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 			device_release_driver(&s->dev);
667*4882a593Smuzhiyun 			serio_destroy_port(s);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 			s = parent;
670*4882a593Smuzhiyun 		}
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	/*
674*4882a593Smuzhiyun 	 * OK, no children left, now disconnect this port.
675*4882a593Smuzhiyun 	 */
676*4882a593Smuzhiyun 	device_release_driver(&serio->dev);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
serio_rescan(struct serio * serio)679*4882a593Smuzhiyun void serio_rescan(struct serio *serio)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun 	serio_queue_event(serio, NULL, SERIO_RESCAN_PORT);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun EXPORT_SYMBOL(serio_rescan);
684*4882a593Smuzhiyun 
serio_reconnect(struct serio * serio)685*4882a593Smuzhiyun void serio_reconnect(struct serio *serio)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun EXPORT_SYMBOL(serio_reconnect);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun /*
692*4882a593Smuzhiyun  * Submits register request to kseriod for subsequent execution.
693*4882a593Smuzhiyun  * Note that port registration is always asynchronous.
694*4882a593Smuzhiyun  */
__serio_register_port(struct serio * serio,struct module * owner)695*4882a593Smuzhiyun void __serio_register_port(struct serio *serio, struct module *owner)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	serio_init_port(serio);
698*4882a593Smuzhiyun 	serio_queue_event(serio, owner, SERIO_REGISTER_PORT);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun EXPORT_SYMBOL(__serio_register_port);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun /*
703*4882a593Smuzhiyun  * Synchronously unregisters serio port.
704*4882a593Smuzhiyun  */
serio_unregister_port(struct serio * serio)705*4882a593Smuzhiyun void serio_unregister_port(struct serio *serio)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	mutex_lock(&serio_mutex);
708*4882a593Smuzhiyun 	serio_disconnect_port(serio);
709*4882a593Smuzhiyun 	serio_destroy_port(serio);
710*4882a593Smuzhiyun 	mutex_unlock(&serio_mutex);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun EXPORT_SYMBOL(serio_unregister_port);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun /*
715*4882a593Smuzhiyun  * Safely unregisters children ports if they are present.
716*4882a593Smuzhiyun  */
serio_unregister_child_port(struct serio * serio)717*4882a593Smuzhiyun void serio_unregister_child_port(struct serio *serio)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	struct serio *s, *next;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	mutex_lock(&serio_mutex);
722*4882a593Smuzhiyun 	list_for_each_entry_safe(s, next, &serio->children, child_node) {
723*4882a593Smuzhiyun 		serio_disconnect_port(s);
724*4882a593Smuzhiyun 		serio_destroy_port(s);
725*4882a593Smuzhiyun 	}
726*4882a593Smuzhiyun 	mutex_unlock(&serio_mutex);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun EXPORT_SYMBOL(serio_unregister_child_port);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun /*
732*4882a593Smuzhiyun  * Serio driver operations
733*4882a593Smuzhiyun  */
734*4882a593Smuzhiyun 
description_show(struct device_driver * drv,char * buf)735*4882a593Smuzhiyun static ssize_t description_show(struct device_driver *drv, char *buf)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	struct serio_driver *driver = to_serio_driver(drv);
738*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun static DRIVER_ATTR_RO(description);
741*4882a593Smuzhiyun 
bind_mode_show(struct device_driver * drv,char * buf)742*4882a593Smuzhiyun static ssize_t bind_mode_show(struct device_driver *drv, char *buf)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun 	struct serio_driver *serio_drv = to_serio_driver(drv);
745*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto");
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
bind_mode_store(struct device_driver * drv,const char * buf,size_t count)748*4882a593Smuzhiyun static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun 	struct serio_driver *serio_drv = to_serio_driver(drv);
751*4882a593Smuzhiyun 	int retval;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	retval = count;
754*4882a593Smuzhiyun 	if (!strncmp(buf, "manual", count)) {
755*4882a593Smuzhiyun 		serio_drv->manual_bind = true;
756*4882a593Smuzhiyun 	} else if (!strncmp(buf, "auto", count)) {
757*4882a593Smuzhiyun 		serio_drv->manual_bind = false;
758*4882a593Smuzhiyun 	} else {
759*4882a593Smuzhiyun 		retval = -EINVAL;
760*4882a593Smuzhiyun 	}
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	return retval;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun static DRIVER_ATTR_RW(bind_mode);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun static struct attribute *serio_driver_attrs[] = {
767*4882a593Smuzhiyun 	&driver_attr_description.attr,
768*4882a593Smuzhiyun 	&driver_attr_bind_mode.attr,
769*4882a593Smuzhiyun 	NULL,
770*4882a593Smuzhiyun };
771*4882a593Smuzhiyun ATTRIBUTE_GROUPS(serio_driver);
772*4882a593Smuzhiyun 
serio_driver_probe(struct device * dev)773*4882a593Smuzhiyun static int serio_driver_probe(struct device *dev)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
776*4882a593Smuzhiyun 	struct serio_driver *drv = to_serio_driver(dev->driver);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	return serio_connect_driver(serio, drv);
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun 
serio_driver_remove(struct device * dev)781*4882a593Smuzhiyun static int serio_driver_remove(struct device *dev)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	serio_disconnect_driver(serio);
786*4882a593Smuzhiyun 	return 0;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
serio_cleanup(struct serio * serio)789*4882a593Smuzhiyun static void serio_cleanup(struct serio *serio)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	mutex_lock(&serio->drv_mutex);
792*4882a593Smuzhiyun 	if (serio->drv && serio->drv->cleanup)
793*4882a593Smuzhiyun 		serio->drv->cleanup(serio);
794*4882a593Smuzhiyun 	mutex_unlock(&serio->drv_mutex);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun 
serio_shutdown(struct device * dev)797*4882a593Smuzhiyun static void serio_shutdown(struct device *dev)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	serio_cleanup(serio);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun 
serio_attach_driver(struct serio_driver * drv)804*4882a593Smuzhiyun static void serio_attach_driver(struct serio_driver *drv)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	int error;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	error = driver_attach(&drv->driver);
809*4882a593Smuzhiyun 	if (error)
810*4882a593Smuzhiyun 		pr_warn("driver_attach() failed for %s with error %d\n",
811*4882a593Smuzhiyun 			drv->driver.name, error);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun 
__serio_register_driver(struct serio_driver * drv,struct module * owner,const char * mod_name)814*4882a593Smuzhiyun int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun 	bool manual_bind = drv->manual_bind;
817*4882a593Smuzhiyun 	int error;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	drv->driver.bus = &serio_bus;
820*4882a593Smuzhiyun 	drv->driver.owner = owner;
821*4882a593Smuzhiyun 	drv->driver.mod_name = mod_name;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	/*
824*4882a593Smuzhiyun 	 * Temporarily disable automatic binding because probing
825*4882a593Smuzhiyun 	 * takes long time and we are better off doing it in kseriod
826*4882a593Smuzhiyun 	 */
827*4882a593Smuzhiyun 	drv->manual_bind = true;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	error = driver_register(&drv->driver);
830*4882a593Smuzhiyun 	if (error) {
831*4882a593Smuzhiyun 		pr_err("driver_register() failed for %s, error: %d\n",
832*4882a593Smuzhiyun 			drv->driver.name, error);
833*4882a593Smuzhiyun 		return error;
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	/*
837*4882a593Smuzhiyun 	 * Restore original bind mode and let kseriod bind the
838*4882a593Smuzhiyun 	 * driver to free ports
839*4882a593Smuzhiyun 	 */
840*4882a593Smuzhiyun 	if (!manual_bind) {
841*4882a593Smuzhiyun 		drv->manual_bind = false;
842*4882a593Smuzhiyun 		error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER);
843*4882a593Smuzhiyun 		if (error) {
844*4882a593Smuzhiyun 			driver_unregister(&drv->driver);
845*4882a593Smuzhiyun 			return error;
846*4882a593Smuzhiyun 		}
847*4882a593Smuzhiyun 	}
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	return 0;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun EXPORT_SYMBOL(__serio_register_driver);
852*4882a593Smuzhiyun 
serio_unregister_driver(struct serio_driver * drv)853*4882a593Smuzhiyun void serio_unregister_driver(struct serio_driver *drv)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun 	struct serio *serio;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	mutex_lock(&serio_mutex);
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	drv->manual_bind = true;	/* so serio_find_driver ignores it */
860*4882a593Smuzhiyun 	serio_remove_pending_events(drv);
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun start_over:
863*4882a593Smuzhiyun 	list_for_each_entry(serio, &serio_list, node) {
864*4882a593Smuzhiyun 		if (serio->drv == drv) {
865*4882a593Smuzhiyun 			serio_disconnect_port(serio);
866*4882a593Smuzhiyun 			serio_find_driver(serio);
867*4882a593Smuzhiyun 			/* we could've deleted some ports, restart */
868*4882a593Smuzhiyun 			goto start_over;
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 	}
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	driver_unregister(&drv->driver);
873*4882a593Smuzhiyun 	mutex_unlock(&serio_mutex);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun EXPORT_SYMBOL(serio_unregister_driver);
876*4882a593Smuzhiyun 
serio_set_drv(struct serio * serio,struct serio_driver * drv)877*4882a593Smuzhiyun static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun 	serio_pause_rx(serio);
880*4882a593Smuzhiyun 	serio->drv = drv;
881*4882a593Smuzhiyun 	serio_continue_rx(serio);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun 
serio_bus_match(struct device * dev,struct device_driver * drv)884*4882a593Smuzhiyun static int serio_bus_match(struct device *dev, struct device_driver *drv)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
887*4882a593Smuzhiyun 	struct serio_driver *serio_drv = to_serio_driver(drv);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (serio->manual_bind || serio_drv->manual_bind)
890*4882a593Smuzhiyun 		return 0;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	return serio_match_port(serio_drv->id_table, serio);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun #define SERIO_ADD_UEVENT_VAR(fmt, val...)				\
896*4882a593Smuzhiyun 	do {								\
897*4882a593Smuzhiyun 		int err = add_uevent_var(env, fmt, val);		\
898*4882a593Smuzhiyun 		if (err)						\
899*4882a593Smuzhiyun 			return err;					\
900*4882a593Smuzhiyun 	} while (0)
901*4882a593Smuzhiyun 
serio_uevent(struct device * dev,struct kobj_uevent_env * env)902*4882a593Smuzhiyun static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	struct serio *serio;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	if (!dev)
907*4882a593Smuzhiyun 		return -ENODEV;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	serio = to_serio_port(dev);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type);
912*4882a593Smuzhiyun 	SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
913*4882a593Smuzhiyun 	SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
914*4882a593Smuzhiyun 	SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
917*4882a593Smuzhiyun 				serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	if (serio->firmware_id[0])
920*4882a593Smuzhiyun 		SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
921*4882a593Smuzhiyun 				     serio->firmware_id);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	return 0;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun #undef SERIO_ADD_UEVENT_VAR
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun #ifdef CONFIG_PM
serio_suspend(struct device * dev)928*4882a593Smuzhiyun static int serio_suspend(struct device *dev)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	serio_cleanup(serio);
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	return 0;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun 
serio_resume(struct device * dev)937*4882a593Smuzhiyun static int serio_resume(struct device *dev)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	struct serio *serio = to_serio_port(dev);
940*4882a593Smuzhiyun 	int error = -ENOENT;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	mutex_lock(&serio->drv_mutex);
943*4882a593Smuzhiyun 	if (serio->drv && serio->drv->fast_reconnect) {
944*4882a593Smuzhiyun 		error = serio->drv->fast_reconnect(serio);
945*4882a593Smuzhiyun 		if (error && error != -ENOENT)
946*4882a593Smuzhiyun 			dev_warn(dev, "fast reconnect failed with error %d\n",
947*4882a593Smuzhiyun 				 error);
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 	mutex_unlock(&serio->drv_mutex);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	if (error) {
952*4882a593Smuzhiyun 		/*
953*4882a593Smuzhiyun 		 * Driver reconnect can take a while, so better let
954*4882a593Smuzhiyun 		 * kseriod deal with it.
955*4882a593Smuzhiyun 		 */
956*4882a593Smuzhiyun 		serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT);
957*4882a593Smuzhiyun 	}
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	return 0;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun static const struct dev_pm_ops serio_pm_ops = {
963*4882a593Smuzhiyun 	.suspend	= serio_suspend,
964*4882a593Smuzhiyun 	.resume		= serio_resume,
965*4882a593Smuzhiyun 	.poweroff	= serio_suspend,
966*4882a593Smuzhiyun 	.restore	= serio_resume,
967*4882a593Smuzhiyun };
968*4882a593Smuzhiyun #endif /* CONFIG_PM */
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun /* called from serio_driver->connect/disconnect methods under serio_mutex */
serio_open(struct serio * serio,struct serio_driver * drv)971*4882a593Smuzhiyun int serio_open(struct serio *serio, struct serio_driver *drv)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	serio_set_drv(serio, drv);
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	if (serio->open && serio->open(serio)) {
976*4882a593Smuzhiyun 		serio_set_drv(serio, NULL);
977*4882a593Smuzhiyun 		return -1;
978*4882a593Smuzhiyun 	}
979*4882a593Smuzhiyun 	return 0;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun EXPORT_SYMBOL(serio_open);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun /* called from serio_driver->connect/disconnect methods under serio_mutex */
serio_close(struct serio * serio)984*4882a593Smuzhiyun void serio_close(struct serio *serio)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun 	if (serio->close)
987*4882a593Smuzhiyun 		serio->close(serio);
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	serio_set_drv(serio, NULL);
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun EXPORT_SYMBOL(serio_close);
992*4882a593Smuzhiyun 
serio_interrupt(struct serio * serio,unsigned char data,unsigned int dfl)993*4882a593Smuzhiyun irqreturn_t serio_interrupt(struct serio *serio,
994*4882a593Smuzhiyun 		unsigned char data, unsigned int dfl)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun 	unsigned long flags;
997*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_NONE;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	spin_lock_irqsave(&serio->lock, flags);
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun         if (likely(serio->drv)) {
1002*4882a593Smuzhiyun                 ret = serio->drv->interrupt(serio, data, dfl);
1003*4882a593Smuzhiyun 	} else if (!dfl && device_is_registered(&serio->dev)) {
1004*4882a593Smuzhiyun 		serio_rescan(serio);
1005*4882a593Smuzhiyun 		ret = IRQ_HANDLED;
1006*4882a593Smuzhiyun 	}
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	spin_unlock_irqrestore(&serio->lock, flags);
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	return ret;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun EXPORT_SYMBOL(serio_interrupt);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun struct bus_type serio_bus = {
1015*4882a593Smuzhiyun 	.name		= "serio",
1016*4882a593Smuzhiyun 	.drv_groups	= serio_driver_groups,
1017*4882a593Smuzhiyun 	.match		= serio_bus_match,
1018*4882a593Smuzhiyun 	.uevent		= serio_uevent,
1019*4882a593Smuzhiyun 	.probe		= serio_driver_probe,
1020*4882a593Smuzhiyun 	.remove		= serio_driver_remove,
1021*4882a593Smuzhiyun 	.shutdown	= serio_shutdown,
1022*4882a593Smuzhiyun #ifdef CONFIG_PM
1023*4882a593Smuzhiyun 	.pm		= &serio_pm_ops,
1024*4882a593Smuzhiyun #endif
1025*4882a593Smuzhiyun };
1026*4882a593Smuzhiyun EXPORT_SYMBOL(serio_bus);
1027*4882a593Smuzhiyun 
serio_init(void)1028*4882a593Smuzhiyun static int __init serio_init(void)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun 	int error;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	error = bus_register(&serio_bus);
1033*4882a593Smuzhiyun 	if (error) {
1034*4882a593Smuzhiyun 		pr_err("Failed to register serio bus, error: %d\n", error);
1035*4882a593Smuzhiyun 		return error;
1036*4882a593Smuzhiyun 	}
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	return 0;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
serio_exit(void)1041*4882a593Smuzhiyun static void __exit serio_exit(void)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	bus_unregister(&serio_bus);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	/*
1046*4882a593Smuzhiyun 	 * There should not be any outstanding events but work may
1047*4882a593Smuzhiyun 	 * still be scheduled so simply cancel it.
1048*4882a593Smuzhiyun 	 */
1049*4882a593Smuzhiyun 	cancel_work_sync(&serio_event_work);
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun subsys_initcall(serio_init);
1053*4882a593Smuzhiyun module_exit(serio_exit);
1054