xref: /OK3568_Linux_fs/kernel/drivers/base/power/wakeup.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/wakeup.c - System wakeup events framework
4  *
5  * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6  */
7 #define pr_fmt(fmt) "PM: " fmt
8 
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/capability.h>
13 #include <linux/export.h>
14 #include <linux/suspend.h>
15 #include <linux/seq_file.h>
16 #include <linux/debugfs.h>
17 #include <linux/pm_wakeirq.h>
18 #include <linux/irq.h>
19 #include <linux/irqdesc.h>
20 #include <linux/wakeup_reason.h>
21 #include <trace/events/power.h>
22 
23 #include "power.h"
24 
25 #ifndef CONFIG_SUSPEND
26 suspend_state_t pm_suspend_target_state;
27 #define pm_suspend_target_state	(PM_SUSPEND_ON)
28 #endif
29 
30 #define list_for_each_entry_rcu_locked(pos, head, member) \
31 	list_for_each_entry_rcu(pos, head, member, \
32 		srcu_read_lock_held(&wakeup_srcu))
33 /*
34  * If set, the suspend/hibernate code will abort transitions to a sleep state
35  * if wakeup events are registered during or immediately before the transition.
36  */
37 bool events_check_enabled __read_mostly;
38 
39 /* First wakeup IRQ seen by the kernel in the last cycle. */
40 static unsigned int wakeup_irq[2] __read_mostly;
41 static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
42 
43 /* If greater than 0 and the system is suspending, terminate the suspend. */
44 static atomic_t pm_abort_suspend __read_mostly;
45 
46 /*
47  * Combined counters of registered wakeup events and wakeup events in progress.
48  * They need to be modified together atomically, so it's better to use one
49  * atomic variable to hold them both.
50  */
51 static atomic_t combined_event_count = ATOMIC_INIT(0);
52 
53 #define IN_PROGRESS_BITS	(sizeof(int) * 4)
54 #define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
55 
split_counters(unsigned int * cnt,unsigned int * inpr)56 static void split_counters(unsigned int *cnt, unsigned int *inpr)
57 {
58 	unsigned int comb = atomic_read(&combined_event_count);
59 
60 	*cnt = (comb >> IN_PROGRESS_BITS);
61 	*inpr = comb & MAX_IN_PROGRESS;
62 }
63 
64 /* A preserved old value of the events counter. */
65 static unsigned int saved_count;
66 
67 static DEFINE_RAW_SPINLOCK(events_lock);
68 
69 static void pm_wakeup_timer_fn(struct timer_list *t);
70 
71 static LIST_HEAD(wakeup_sources);
72 
73 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
74 
75 DEFINE_STATIC_SRCU(wakeup_srcu);
76 
77 static struct wakeup_source deleted_ws = {
78 	.name = "deleted",
79 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
80 };
81 
82 static DEFINE_IDA(wakeup_ida);
83 
84 /**
85  * wakeup_source_create - Create a struct wakeup_source object.
86  * @name: Name of the new wakeup source.
87  */
wakeup_source_create(const char * name)88 struct wakeup_source *wakeup_source_create(const char *name)
89 {
90 	struct wakeup_source *ws;
91 	const char *ws_name;
92 	int id;
93 
94 	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
95 	if (!ws)
96 		goto err_ws;
97 
98 	ws_name = kstrdup_const(name, GFP_KERNEL);
99 	if (!ws_name)
100 		goto err_name;
101 	ws->name = ws_name;
102 
103 	id = ida_alloc(&wakeup_ida, GFP_KERNEL);
104 	if (id < 0)
105 		goto err_id;
106 	ws->id = id;
107 
108 	return ws;
109 
110 err_id:
111 	kfree_const(ws->name);
112 err_name:
113 	kfree(ws);
114 err_ws:
115 	return NULL;
116 }
117 EXPORT_SYMBOL_GPL(wakeup_source_create);
118 
119 /*
120  * Record wakeup_source statistics being deleted into a dummy wakeup_source.
121  */
wakeup_source_record(struct wakeup_source * ws)122 static void wakeup_source_record(struct wakeup_source *ws)
123 {
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&deleted_ws.lock, flags);
127 
128 	if (ws->event_count) {
129 		deleted_ws.total_time =
130 			ktime_add(deleted_ws.total_time, ws->total_time);
131 		deleted_ws.prevent_sleep_time =
132 			ktime_add(deleted_ws.prevent_sleep_time,
133 				  ws->prevent_sleep_time);
134 		deleted_ws.max_time =
135 			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
136 				deleted_ws.max_time : ws->max_time;
137 		deleted_ws.event_count += ws->event_count;
138 		deleted_ws.active_count += ws->active_count;
139 		deleted_ws.relax_count += ws->relax_count;
140 		deleted_ws.expire_count += ws->expire_count;
141 		deleted_ws.wakeup_count += ws->wakeup_count;
142 	}
143 
144 	spin_unlock_irqrestore(&deleted_ws.lock, flags);
145 }
146 
wakeup_source_free(struct wakeup_source * ws)147 static void wakeup_source_free(struct wakeup_source *ws)
148 {
149 	ida_free(&wakeup_ida, ws->id);
150 	kfree_const(ws->name);
151 	kfree(ws);
152 }
153 
154 /**
155  * wakeup_source_destroy - Destroy a struct wakeup_source object.
156  * @ws: Wakeup source to destroy.
157  *
158  * Use only for wakeup source objects created with wakeup_source_create().
159  */
wakeup_source_destroy(struct wakeup_source * ws)160 void wakeup_source_destroy(struct wakeup_source *ws)
161 {
162 	if (!ws)
163 		return;
164 
165 	__pm_relax(ws);
166 	wakeup_source_record(ws);
167 	wakeup_source_free(ws);
168 }
169 EXPORT_SYMBOL_GPL(wakeup_source_destroy);
170 
171 /**
172  * wakeup_source_add - Add given object to the list of wakeup sources.
173  * @ws: Wakeup source object to add to the list.
174  */
wakeup_source_add(struct wakeup_source * ws)175 void wakeup_source_add(struct wakeup_source *ws)
176 {
177 	unsigned long flags;
178 
179 	if (WARN_ON(!ws))
180 		return;
181 
182 	spin_lock_init(&ws->lock);
183 	timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
184 	ws->active = false;
185 
186 	raw_spin_lock_irqsave(&events_lock, flags);
187 	list_add_rcu(&ws->entry, &wakeup_sources);
188 	raw_spin_unlock_irqrestore(&events_lock, flags);
189 }
190 EXPORT_SYMBOL_GPL(wakeup_source_add);
191 
192 /**
193  * wakeup_source_remove - Remove given object from the wakeup sources list.
194  * @ws: Wakeup source object to remove from the list.
195  */
wakeup_source_remove(struct wakeup_source * ws)196 void wakeup_source_remove(struct wakeup_source *ws)
197 {
198 	unsigned long flags;
199 
200 	if (WARN_ON(!ws))
201 		return;
202 
203 	raw_spin_lock_irqsave(&events_lock, flags);
204 	list_del_rcu(&ws->entry);
205 	raw_spin_unlock_irqrestore(&events_lock, flags);
206 	synchronize_srcu(&wakeup_srcu);
207 
208 	del_timer_sync(&ws->timer);
209 	/*
210 	 * Clear timer.function to make wakeup_source_not_registered() treat
211 	 * this wakeup source as not registered.
212 	 */
213 	ws->timer.function = NULL;
214 }
215 EXPORT_SYMBOL_GPL(wakeup_source_remove);
216 
217 /**
218  * wakeup_source_register - Create wakeup source and add it to the list.
219  * @dev: Device this wakeup source is associated with (or NULL if virtual).
220  * @name: Name of the wakeup source to register.
221  */
wakeup_source_register(struct device * dev,const char * name)222 struct wakeup_source *wakeup_source_register(struct device *dev,
223 					     const char *name)
224 {
225 	struct wakeup_source *ws;
226 	int ret;
227 
228 	ws = wakeup_source_create(name);
229 	if (ws) {
230 		if (!dev || device_is_registered(dev)) {
231 			ret = wakeup_source_sysfs_add(dev, ws);
232 			if (ret) {
233 				wakeup_source_free(ws);
234 				return NULL;
235 			}
236 		}
237 		wakeup_source_add(ws);
238 	}
239 	return ws;
240 }
241 EXPORT_SYMBOL_GPL(wakeup_source_register);
242 
243 /**
244  * wakeup_source_unregister - Remove wakeup source from the list and remove it.
245  * @ws: Wakeup source object to unregister.
246  */
wakeup_source_unregister(struct wakeup_source * ws)247 void wakeup_source_unregister(struct wakeup_source *ws)
248 {
249 	if (ws) {
250 		wakeup_source_remove(ws);
251 		if (ws->dev)
252 			wakeup_source_sysfs_remove(ws);
253 
254 		wakeup_source_destroy(ws);
255 	}
256 }
257 EXPORT_SYMBOL_GPL(wakeup_source_unregister);
258 
259 /**
260  * wakeup_sources_read_lock - Lock wakeup source list for read.
261  *
262  * Returns an index of srcu lock for struct wakeup_srcu.
263  * This index must be passed to the matching wakeup_sources_read_unlock().
264  */
wakeup_sources_read_lock(void)265 int wakeup_sources_read_lock(void)
266 {
267 	return srcu_read_lock(&wakeup_srcu);
268 }
269 EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
270 
271 /**
272  * wakeup_sources_read_unlock - Unlock wakeup source list.
273  * @idx: return value from corresponding wakeup_sources_read_lock()
274  */
wakeup_sources_read_unlock(int idx)275 void wakeup_sources_read_unlock(int idx)
276 {
277 	srcu_read_unlock(&wakeup_srcu, idx);
278 }
279 EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
280 
281 /**
282  * wakeup_sources_walk_start - Begin a walk on wakeup source list
283  *
284  * Returns first object of the list of wakeup sources.
285  *
286  * Note that to be safe, wakeup sources list needs to be locked by calling
287  * wakeup_source_read_lock() for this.
288  */
wakeup_sources_walk_start(void)289 struct wakeup_source *wakeup_sources_walk_start(void)
290 {
291 	struct list_head *ws_head = &wakeup_sources;
292 
293 	return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
294 }
295 EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
296 
297 /**
298  * wakeup_sources_walk_next - Get next wakeup source from the list
299  * @ws: Previous wakeup source object
300  *
301  * Note that to be safe, wakeup sources list needs to be locked by calling
302  * wakeup_source_read_lock() for this.
303  */
wakeup_sources_walk_next(struct wakeup_source * ws)304 struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
305 {
306 	struct list_head *ws_head = &wakeup_sources;
307 
308 	return list_next_or_null_rcu(ws_head, &ws->entry,
309 				struct wakeup_source, entry);
310 }
311 EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
312 
313 /**
314  * device_wakeup_attach - Attach a wakeup source object to a device object.
315  * @dev: Device to handle.
316  * @ws: Wakeup source object to attach to @dev.
317  *
318  * This causes @dev to be treated as a wakeup device.
319  */
device_wakeup_attach(struct device * dev,struct wakeup_source * ws)320 static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
321 {
322 	spin_lock_irq(&dev->power.lock);
323 	if (dev->power.wakeup) {
324 		spin_unlock_irq(&dev->power.lock);
325 		return -EEXIST;
326 	}
327 	dev->power.wakeup = ws;
328 	if (dev->power.wakeirq)
329 		device_wakeup_attach_irq(dev, dev->power.wakeirq);
330 	spin_unlock_irq(&dev->power.lock);
331 	return 0;
332 }
333 
334 /**
335  * device_wakeup_enable - Enable given device to be a wakeup source.
336  * @dev: Device to handle.
337  *
338  * Create a wakeup source object, register it and attach it to @dev.
339  */
device_wakeup_enable(struct device * dev)340 int device_wakeup_enable(struct device *dev)
341 {
342 	struct wakeup_source *ws;
343 	int ret;
344 
345 	if (!dev || !dev->power.can_wakeup)
346 		return -EINVAL;
347 
348 	if (pm_suspend_target_state != PM_SUSPEND_ON)
349 		dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
350 
351 	ws = wakeup_source_register(dev, dev_name(dev));
352 	if (!ws)
353 		return -ENOMEM;
354 
355 	ret = device_wakeup_attach(dev, ws);
356 	if (ret)
357 		wakeup_source_unregister(ws);
358 
359 	return ret;
360 }
361 EXPORT_SYMBOL_GPL(device_wakeup_enable);
362 
363 /**
364  * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
365  * @dev: Device to handle
366  * @wakeirq: Device specific wakeirq entry
367  *
368  * Attach a device wakeirq to the wakeup source so the device
369  * wake IRQ can be configured automatically for suspend and
370  * resume.
371  *
372  * Call under the device's power.lock lock.
373  */
device_wakeup_attach_irq(struct device * dev,struct wake_irq * wakeirq)374 void device_wakeup_attach_irq(struct device *dev,
375 			     struct wake_irq *wakeirq)
376 {
377 	struct wakeup_source *ws;
378 
379 	ws = dev->power.wakeup;
380 	if (!ws)
381 		return;
382 
383 	if (ws->wakeirq)
384 		dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
385 
386 	ws->wakeirq = wakeirq;
387 }
388 
389 /**
390  * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
391  * @dev: Device to handle
392  *
393  * Removes a device wakeirq from the wakeup source.
394  *
395  * Call under the device's power.lock lock.
396  */
device_wakeup_detach_irq(struct device * dev)397 void device_wakeup_detach_irq(struct device *dev)
398 {
399 	struct wakeup_source *ws;
400 
401 	ws = dev->power.wakeup;
402 	if (ws)
403 		ws->wakeirq = NULL;
404 }
405 
406 /**
407  * device_wakeup_arm_wake_irqs(void)
408  *
409  * Itereates over the list of device wakeirqs to arm them.
410  */
device_wakeup_arm_wake_irqs(void)411 void device_wakeup_arm_wake_irqs(void)
412 {
413 	struct wakeup_source *ws;
414 	int srcuidx;
415 
416 	srcuidx = srcu_read_lock(&wakeup_srcu);
417 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
418 		dev_pm_arm_wake_irq(ws->wakeirq);
419 	srcu_read_unlock(&wakeup_srcu, srcuidx);
420 }
421 
422 /**
423  * device_wakeup_disarm_wake_irqs(void)
424  *
425  * Itereates over the list of device wakeirqs to disarm them.
426  */
device_wakeup_disarm_wake_irqs(void)427 void device_wakeup_disarm_wake_irqs(void)
428 {
429 	struct wakeup_source *ws;
430 	int srcuidx;
431 
432 	srcuidx = srcu_read_lock(&wakeup_srcu);
433 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
434 		dev_pm_disarm_wake_irq(ws->wakeirq);
435 	srcu_read_unlock(&wakeup_srcu, srcuidx);
436 }
437 
438 /**
439  * device_wakeup_detach - Detach a device's wakeup source object from it.
440  * @dev: Device to detach the wakeup source object from.
441  *
442  * After it returns, @dev will not be treated as a wakeup device any more.
443  */
device_wakeup_detach(struct device * dev)444 static struct wakeup_source *device_wakeup_detach(struct device *dev)
445 {
446 	struct wakeup_source *ws;
447 
448 	spin_lock_irq(&dev->power.lock);
449 	ws = dev->power.wakeup;
450 	dev->power.wakeup = NULL;
451 	spin_unlock_irq(&dev->power.lock);
452 	return ws;
453 }
454 
455 /**
456  * device_wakeup_disable - Do not regard a device as a wakeup source any more.
457  * @dev: Device to handle.
458  *
459  * Detach the @dev's wakeup source object from it, unregister this wakeup source
460  * object and destroy it.
461  */
device_wakeup_disable(struct device * dev)462 int device_wakeup_disable(struct device *dev)
463 {
464 	struct wakeup_source *ws;
465 
466 	if (!dev || !dev->power.can_wakeup)
467 		return -EINVAL;
468 
469 	ws = device_wakeup_detach(dev);
470 	wakeup_source_unregister(ws);
471 	return 0;
472 }
473 EXPORT_SYMBOL_GPL(device_wakeup_disable);
474 
475 /**
476  * device_set_wakeup_capable - Set/reset device wakeup capability flag.
477  * @dev: Device to handle.
478  * @capable: Whether or not @dev is capable of waking up the system from sleep.
479  *
480  * If @capable is set, set the @dev's power.can_wakeup flag and add its
481  * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
482  * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
483  *
484  * This function may sleep and it can't be called from any context where
485  * sleeping is not allowed.
486  */
device_set_wakeup_capable(struct device * dev,bool capable)487 void device_set_wakeup_capable(struct device *dev, bool capable)
488 {
489 	if (!!dev->power.can_wakeup == !!capable)
490 		return;
491 
492 	dev->power.can_wakeup = capable;
493 	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
494 		if (capable) {
495 			int ret = wakeup_sysfs_add(dev);
496 
497 			if (ret)
498 				dev_info(dev, "Wakeup sysfs attributes not added\n");
499 		} else {
500 			wakeup_sysfs_remove(dev);
501 		}
502 	}
503 }
504 EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
505 
506 /**
507  * device_init_wakeup - Device wakeup initialization.
508  * @dev: Device to handle.
509  * @enable: Whether or not to enable @dev as a wakeup device.
510  *
511  * By default, most devices should leave wakeup disabled.  The exceptions are
512  * devices that everyone expects to be wakeup sources: keyboards, power buttons,
513  * possibly network interfaces, etc.  Also, devices that don't generate their
514  * own wakeup requests but merely forward requests from one bus to another
515  * (like PCI bridges) should have wakeup enabled by default.
516  */
device_init_wakeup(struct device * dev,bool enable)517 int device_init_wakeup(struct device *dev, bool enable)
518 {
519 	int ret = 0;
520 
521 	if (!dev)
522 		return -EINVAL;
523 
524 	if (enable) {
525 		device_set_wakeup_capable(dev, true);
526 		ret = device_wakeup_enable(dev);
527 	} else {
528 		device_wakeup_disable(dev);
529 		device_set_wakeup_capable(dev, false);
530 	}
531 
532 	return ret;
533 }
534 EXPORT_SYMBOL_GPL(device_init_wakeup);
535 
536 /**
537  * device_set_wakeup_enable - Enable or disable a device to wake up the system.
538  * @dev: Device to handle.
539  */
device_set_wakeup_enable(struct device * dev,bool enable)540 int device_set_wakeup_enable(struct device *dev, bool enable)
541 {
542 	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
543 }
544 EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
545 
546 /**
547  * wakeup_source_not_registered - validate the given wakeup source.
548  * @ws: Wakeup source to be validated.
549  */
wakeup_source_not_registered(struct wakeup_source * ws)550 static bool wakeup_source_not_registered(struct wakeup_source *ws)
551 {
552 	/*
553 	 * Use timer struct to check if the given source is initialized
554 	 * by wakeup_source_add.
555 	 */
556 	return ws->timer.function != pm_wakeup_timer_fn;
557 }
558 
559 /*
560  * The functions below use the observation that each wakeup event starts a
561  * period in which the system should not be suspended.  The moment this period
562  * will end depends on how the wakeup event is going to be processed after being
563  * detected and all of the possible cases can be divided into two distinct
564  * groups.
565  *
566  * First, a wakeup event may be detected by the same functional unit that will
567  * carry out the entire processing of it and possibly will pass it to user space
568  * for further processing.  In that case the functional unit that has detected
569  * the event may later "close" the "no suspend" period associated with it
570  * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
571  * pm_relax(), balanced with each other, is supposed to be used in such
572  * situations.
573  *
574  * Second, a wakeup event may be detected by one functional unit and processed
575  * by another one.  In that case the unit that has detected it cannot really
576  * "close" the "no suspend" period associated with it, unless it knows in
577  * advance what's going to happen to the event during processing.  This
578  * knowledge, however, may not be available to it, so it can simply specify time
579  * to wait before the system can be suspended and pass it as the second
580  * argument of pm_wakeup_event().
581  *
582  * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
583  * "no suspend" period will be ended either by the pm_relax(), or by the timer
584  * function executed when the timer expires, whichever comes first.
585  */
586 
587 /**
588  * wakup_source_activate - Mark given wakeup source as active.
589  * @ws: Wakeup source to handle.
590  *
591  * Update the @ws' statistics and, if @ws has just been activated, notify the PM
592  * core of the event by incrementing the counter of of wakeup events being
593  * processed.
594  */
wakeup_source_activate(struct wakeup_source * ws)595 static void wakeup_source_activate(struct wakeup_source *ws)
596 {
597 	unsigned int cec;
598 
599 	if (WARN_ONCE(wakeup_source_not_registered(ws),
600 			"unregistered wakeup source\n"))
601 		return;
602 
603 	ws->active = true;
604 	ws->active_count++;
605 	ws->last_time = ktime_get();
606 	if (ws->autosleep_enabled)
607 		ws->start_prevent_time = ws->last_time;
608 
609 	/* Increment the counter of events in progress. */
610 	cec = atomic_inc_return(&combined_event_count);
611 
612 	trace_wakeup_source_activate(ws->name, cec);
613 }
614 
615 /**
616  * wakeup_source_report_event - Report wakeup event using the given source.
617  * @ws: Wakeup source to report the event for.
618  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
619  */
wakeup_source_report_event(struct wakeup_source * ws,bool hard)620 static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
621 {
622 	ws->event_count++;
623 	/* This is racy, but the counter is approximate anyway. */
624 	if (events_check_enabled)
625 		ws->wakeup_count++;
626 
627 	if (!ws->active)
628 		wakeup_source_activate(ws);
629 
630 	if (hard)
631 		pm_system_wakeup();
632 }
633 
634 /**
635  * __pm_stay_awake - Notify the PM core of a wakeup event.
636  * @ws: Wakeup source object associated with the source of the event.
637  *
638  * It is safe to call this function from interrupt context.
639  */
__pm_stay_awake(struct wakeup_source * ws)640 void __pm_stay_awake(struct wakeup_source *ws)
641 {
642 	unsigned long flags;
643 
644 	if (!ws)
645 		return;
646 
647 	spin_lock_irqsave(&ws->lock, flags);
648 
649 	wakeup_source_report_event(ws, false);
650 	del_timer(&ws->timer);
651 	ws->timer_expires = 0;
652 
653 	spin_unlock_irqrestore(&ws->lock, flags);
654 }
655 EXPORT_SYMBOL_GPL(__pm_stay_awake);
656 
657 /**
658  * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
659  * @dev: Device the wakeup event is related to.
660  *
661  * Notify the PM core of a wakeup event (signaled by @dev) by calling
662  * __pm_stay_awake for the @dev's wakeup source object.
663  *
664  * Call this function after detecting of a wakeup event if pm_relax() is going
665  * to be called directly after processing the event (and possibly passing it to
666  * user space for further processing).
667  */
pm_stay_awake(struct device * dev)668 void pm_stay_awake(struct device *dev)
669 {
670 	unsigned long flags;
671 
672 	if (!dev)
673 		return;
674 
675 	spin_lock_irqsave(&dev->power.lock, flags);
676 	__pm_stay_awake(dev->power.wakeup);
677 	spin_unlock_irqrestore(&dev->power.lock, flags);
678 }
679 EXPORT_SYMBOL_GPL(pm_stay_awake);
680 
681 #ifdef CONFIG_PM_AUTOSLEEP
update_prevent_sleep_time(struct wakeup_source * ws,ktime_t now)682 static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
683 {
684 	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
685 	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
686 }
687 #else
update_prevent_sleep_time(struct wakeup_source * ws,ktime_t now)688 static inline void update_prevent_sleep_time(struct wakeup_source *ws,
689 					     ktime_t now) {}
690 #endif
691 
692 /**
693  * wakup_source_deactivate - Mark given wakeup source as inactive.
694  * @ws: Wakeup source to handle.
695  *
696  * Update the @ws' statistics and notify the PM core that the wakeup source has
697  * become inactive by decrementing the counter of wakeup events being processed
698  * and incrementing the counter of registered wakeup events.
699  */
wakeup_source_deactivate(struct wakeup_source * ws)700 static void wakeup_source_deactivate(struct wakeup_source *ws)
701 {
702 	unsigned int cnt, inpr, cec;
703 	ktime_t duration;
704 	ktime_t now;
705 
706 	ws->relax_count++;
707 	/*
708 	 * __pm_relax() may be called directly or from a timer function.
709 	 * If it is called directly right after the timer function has been
710 	 * started, but before the timer function calls __pm_relax(), it is
711 	 * possible that __pm_stay_awake() will be called in the meantime and
712 	 * will set ws->active.  Then, ws->active may be cleared immediately
713 	 * by the __pm_relax() called from the timer function, but in such a
714 	 * case ws->relax_count will be different from ws->active_count.
715 	 */
716 	if (ws->relax_count != ws->active_count) {
717 		ws->relax_count--;
718 		return;
719 	}
720 
721 	ws->active = false;
722 
723 	now = ktime_get();
724 	duration = ktime_sub(now, ws->last_time);
725 	ws->total_time = ktime_add(ws->total_time, duration);
726 	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
727 		ws->max_time = duration;
728 
729 	ws->last_time = now;
730 	del_timer(&ws->timer);
731 	ws->timer_expires = 0;
732 
733 	if (ws->autosleep_enabled)
734 		update_prevent_sleep_time(ws, now);
735 
736 	/*
737 	 * Increment the counter of registered wakeup events and decrement the
738 	 * couter of wakeup events in progress simultaneously.
739 	 */
740 	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
741 	trace_wakeup_source_deactivate(ws->name, cec);
742 
743 	split_counters(&cnt, &inpr);
744 	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
745 		wake_up(&wakeup_count_wait_queue);
746 }
747 
748 /**
749  * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
750  * @ws: Wakeup source object associated with the source of the event.
751  *
752  * Call this function for wakeup events whose processing started with calling
753  * __pm_stay_awake().
754  *
755  * It is safe to call it from interrupt context.
756  */
__pm_relax(struct wakeup_source * ws)757 void __pm_relax(struct wakeup_source *ws)
758 {
759 	unsigned long flags;
760 
761 	if (!ws)
762 		return;
763 
764 	spin_lock_irqsave(&ws->lock, flags);
765 	if (ws->active)
766 		wakeup_source_deactivate(ws);
767 	spin_unlock_irqrestore(&ws->lock, flags);
768 }
769 EXPORT_SYMBOL_GPL(__pm_relax);
770 
771 /**
772  * pm_relax - Notify the PM core that processing of a wakeup event has ended.
773  * @dev: Device that signaled the event.
774  *
775  * Execute __pm_relax() for the @dev's wakeup source object.
776  */
pm_relax(struct device * dev)777 void pm_relax(struct device *dev)
778 {
779 	unsigned long flags;
780 
781 	if (!dev)
782 		return;
783 
784 	spin_lock_irqsave(&dev->power.lock, flags);
785 	__pm_relax(dev->power.wakeup);
786 	spin_unlock_irqrestore(&dev->power.lock, flags);
787 }
788 EXPORT_SYMBOL_GPL(pm_relax);
789 
790 /**
791  * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
792  * @data: Address of the wakeup source object associated with the event source.
793  *
794  * Call wakeup_source_deactivate() for the wakeup source whose address is stored
795  * in @data if it is currently active and its timer has not been canceled and
796  * the expiration time of the timer is not in future.
797  */
pm_wakeup_timer_fn(struct timer_list * t)798 static void pm_wakeup_timer_fn(struct timer_list *t)
799 {
800 	struct wakeup_source *ws = from_timer(ws, t, timer);
801 	unsigned long flags;
802 
803 	spin_lock_irqsave(&ws->lock, flags);
804 
805 	if (ws->active && ws->timer_expires
806 	    && time_after_eq(jiffies, ws->timer_expires)) {
807 		wakeup_source_deactivate(ws);
808 		ws->expire_count++;
809 	}
810 
811 	spin_unlock_irqrestore(&ws->lock, flags);
812 }
813 
814 /**
815  * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
816  * @ws: Wakeup source object associated with the event source.
817  * @msec: Anticipated event processing time (in milliseconds).
818  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
819  *
820  * Notify the PM core of a wakeup event whose source is @ws that will take
821  * approximately @msec milliseconds to be processed by the kernel.  If @ws is
822  * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
823  * execute pm_wakeup_timer_fn() in future.
824  *
825  * It is safe to call this function from interrupt context.
826  */
pm_wakeup_ws_event(struct wakeup_source * ws,unsigned int msec,bool hard)827 void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
828 {
829 	unsigned long flags;
830 	unsigned long expires;
831 
832 	if (!ws)
833 		return;
834 
835 	spin_lock_irqsave(&ws->lock, flags);
836 
837 	wakeup_source_report_event(ws, hard);
838 
839 	if (!msec) {
840 		wakeup_source_deactivate(ws);
841 		goto unlock;
842 	}
843 
844 	expires = jiffies + msecs_to_jiffies(msec);
845 	if (!expires)
846 		expires = 1;
847 
848 	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
849 		mod_timer(&ws->timer, expires);
850 		ws->timer_expires = expires;
851 	}
852 
853  unlock:
854 	spin_unlock_irqrestore(&ws->lock, flags);
855 }
856 EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
857 
858 /**
859  * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
860  * @dev: Device the wakeup event is related to.
861  * @msec: Anticipated event processing time (in milliseconds).
862  * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
863  *
864  * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
865  */
pm_wakeup_dev_event(struct device * dev,unsigned int msec,bool hard)866 void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
867 {
868 	unsigned long flags;
869 
870 	if (!dev)
871 		return;
872 
873 	spin_lock_irqsave(&dev->power.lock, flags);
874 	pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
875 	spin_unlock_irqrestore(&dev->power.lock, flags);
876 }
877 EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
878 
pm_get_active_wakeup_sources(char * pending_wakeup_source,size_t max)879 void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
880 {
881 	struct wakeup_source *ws, *last_active_ws = NULL;
882 	int len = 0;
883 	bool active = false;
884 
885 	rcu_read_lock();
886 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
887 		if (ws->active && len < max) {
888 			if (!active)
889 				len += scnprintf(pending_wakeup_source, max,
890 						"Pending Wakeup Sources: ");
891 			len += scnprintf(pending_wakeup_source + len, max - len,
892 				"%s ", ws->name);
893 			active = true;
894 		} else if (!active &&
895 			   (!last_active_ws ||
896 			    ktime_to_ns(ws->last_time) >
897 			    ktime_to_ns(last_active_ws->last_time))) {
898 			last_active_ws = ws;
899 		}
900 	}
901 	if (!active && last_active_ws) {
902 		scnprintf(pending_wakeup_source, max,
903 				"Last active Wakeup Source: %s",
904 				last_active_ws->name);
905 	}
906 	rcu_read_unlock();
907 }
908 EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
909 
pm_print_active_wakeup_sources(void)910 void pm_print_active_wakeup_sources(void)
911 {
912 	struct wakeup_source *ws;
913 	int srcuidx, active = 0;
914 	struct wakeup_source *last_activity_ws = NULL;
915 
916 	srcuidx = srcu_read_lock(&wakeup_srcu);
917 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
918 		if (ws->active) {
919 			pm_pr_dbg("active wakeup source: %s\n", ws->name);
920 			active = 1;
921 		} else if (!active &&
922 			   (!last_activity_ws ||
923 			    ktime_to_ns(ws->last_time) >
924 			    ktime_to_ns(last_activity_ws->last_time))) {
925 			last_activity_ws = ws;
926 		}
927 	}
928 
929 	if (!active && last_activity_ws)
930 		pm_pr_dbg("last active wakeup source: %s\n",
931 			last_activity_ws->name);
932 	srcu_read_unlock(&wakeup_srcu, srcuidx);
933 }
934 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
935 
936 /**
937  * pm_wakeup_pending - Check if power transition in progress should be aborted.
938  *
939  * Compare the current number of registered wakeup events with its preserved
940  * value from the past and return true if new wakeup events have been registered
941  * since the old value was stored.  Also return true if the current number of
942  * wakeup events being processed is different from zero.
943  */
pm_wakeup_pending(void)944 bool pm_wakeup_pending(void)
945 {
946 	unsigned long flags;
947 	bool ret = false;
948 	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
949 
950 	raw_spin_lock_irqsave(&events_lock, flags);
951 	if (events_check_enabled) {
952 		unsigned int cnt, inpr;
953 
954 		split_counters(&cnt, &inpr);
955 		ret = (cnt != saved_count || inpr > 0);
956 		events_check_enabled = !ret;
957 	}
958 	raw_spin_unlock_irqrestore(&events_lock, flags);
959 
960 	if (ret) {
961 		pm_pr_dbg("Wakeup pending, aborting suspend\n");
962 		pm_print_active_wakeup_sources();
963 		pm_get_active_wakeup_sources(suspend_abort,
964 					     MAX_SUSPEND_ABORT_LEN);
965 		log_suspend_abort_reason(suspend_abort);
966 		pr_info("PM: %s\n", suspend_abort);
967 	}
968 
969 	return ret || atomic_read(&pm_abort_suspend) > 0;
970 }
971 
pm_system_wakeup(void)972 void pm_system_wakeup(void)
973 {
974 	atomic_inc(&pm_abort_suspend);
975 	s2idle_wake();
976 }
977 EXPORT_SYMBOL_GPL(pm_system_wakeup);
978 
pm_system_cancel_wakeup(void)979 void pm_system_cancel_wakeup(void)
980 {
981 	atomic_dec_if_positive(&pm_abort_suspend);
982 }
983 
pm_wakeup_clear(unsigned int irq_number)984 void pm_wakeup_clear(unsigned int irq_number)
985 {
986 	raw_spin_lock_irq(&wakeup_irq_lock);
987 
988 	if (irq_number && wakeup_irq[0] == irq_number)
989 		wakeup_irq[0] = wakeup_irq[1];
990 	else
991 		wakeup_irq[0] = 0;
992 
993 	wakeup_irq[1] = 0;
994 
995 	raw_spin_unlock_irq(&wakeup_irq_lock);
996 
997 	if (!irq_number)
998 		atomic_set(&pm_abort_suspend, 0);
999 }
1000 
pm_system_irq_wakeup(unsigned int irq_number)1001 void pm_system_irq_wakeup(unsigned int irq_number)
1002 {
1003 	unsigned long flags;
1004 
1005 	raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
1006 
1007 	if (wakeup_irq[0] == 0)
1008 		wakeup_irq[0] = irq_number;
1009 	else if (wakeup_irq[1] == 0)
1010 		wakeup_irq[1] = irq_number;
1011 	else
1012 		irq_number = 0;
1013 
1014 	raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
1015 
1016 	if (irq_number) {
1017 		struct irq_desc *desc;
1018 		const char *name = "null";
1019 
1020 		desc = irq_to_desc(irq_number);
1021 		if (desc == NULL)
1022 			name = "stray irq";
1023 		else if (desc->action && desc->action->name)
1024 			name = desc->action->name;
1025 
1026 		log_irq_wakeup_reason(irq_number);
1027 		pr_warn("%s: %d triggered %s\n", __func__, irq_number, name);
1028 		pm_system_wakeup();
1029 	}
1030 }
1031 
pm_wakeup_irq(void)1032 unsigned int pm_wakeup_irq(void)
1033 {
1034 	return wakeup_irq[0];
1035 }
1036 
1037 /**
1038  * pm_get_wakeup_count - Read the number of registered wakeup events.
1039  * @count: Address to store the value at.
1040  * @block: Whether or not to block.
1041  *
1042  * Store the number of registered wakeup events at the address in @count.  If
1043  * @block is set, block until the current number of wakeup events being
1044  * processed is zero.
1045  *
1046  * Return 'false' if the current number of wakeup events being processed is
1047  * nonzero.  Otherwise return 'true'.
1048  */
pm_get_wakeup_count(unsigned int * count,bool block)1049 bool pm_get_wakeup_count(unsigned int *count, bool block)
1050 {
1051 	unsigned int cnt, inpr;
1052 
1053 	if (block) {
1054 		DEFINE_WAIT(wait);
1055 
1056 		for (;;) {
1057 			prepare_to_wait(&wakeup_count_wait_queue, &wait,
1058 					TASK_INTERRUPTIBLE);
1059 			split_counters(&cnt, &inpr);
1060 			if (inpr == 0 || signal_pending(current))
1061 				break;
1062 			pm_print_active_wakeup_sources();
1063 			schedule();
1064 		}
1065 		finish_wait(&wakeup_count_wait_queue, &wait);
1066 	}
1067 
1068 	split_counters(&cnt, &inpr);
1069 	*count = cnt;
1070 	return !inpr;
1071 }
1072 
1073 /**
1074  * pm_save_wakeup_count - Save the current number of registered wakeup events.
1075  * @count: Value to compare with the current number of registered wakeup events.
1076  *
1077  * If @count is equal to the current number of registered wakeup events and the
1078  * current number of wakeup events being processed is zero, store @count as the
1079  * old number of registered wakeup events for pm_check_wakeup_events(), enable
1080  * wakeup events detection and return 'true'.  Otherwise disable wakeup events
1081  * detection and return 'false'.
1082  */
pm_save_wakeup_count(unsigned int count)1083 bool pm_save_wakeup_count(unsigned int count)
1084 {
1085 	unsigned int cnt, inpr;
1086 	unsigned long flags;
1087 
1088 	events_check_enabled = false;
1089 	raw_spin_lock_irqsave(&events_lock, flags);
1090 	split_counters(&cnt, &inpr);
1091 	if (cnt == count && inpr == 0) {
1092 		saved_count = count;
1093 		events_check_enabled = true;
1094 	}
1095 	raw_spin_unlock_irqrestore(&events_lock, flags);
1096 	return events_check_enabled;
1097 }
1098 
1099 #ifdef CONFIG_PM_AUTOSLEEP
1100 /**
1101  * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
1102  * @enabled: Whether to set or to clear the autosleep_enabled flags.
1103  */
pm_wakep_autosleep_enabled(bool set)1104 void pm_wakep_autosleep_enabled(bool set)
1105 {
1106 	struct wakeup_source *ws;
1107 	ktime_t now = ktime_get();
1108 	int srcuidx;
1109 
1110 	srcuidx = srcu_read_lock(&wakeup_srcu);
1111 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1112 		spin_lock_irq(&ws->lock);
1113 		if (ws->autosleep_enabled != set) {
1114 			ws->autosleep_enabled = set;
1115 			if (ws->active) {
1116 				if (set)
1117 					ws->start_prevent_time = now;
1118 				else
1119 					update_prevent_sleep_time(ws, now);
1120 			}
1121 		}
1122 		spin_unlock_irq(&ws->lock);
1123 	}
1124 	srcu_read_unlock(&wakeup_srcu, srcuidx);
1125 }
1126 #endif /* CONFIG_PM_AUTOSLEEP */
1127 
1128 /**
1129  * print_wakeup_source_stats - Print wakeup source statistics information.
1130  * @m: seq_file to print the statistics into.
1131  * @ws: Wakeup source object to print the statistics for.
1132  */
print_wakeup_source_stats(struct seq_file * m,struct wakeup_source * ws)1133 static int print_wakeup_source_stats(struct seq_file *m,
1134 				     struct wakeup_source *ws)
1135 {
1136 	unsigned long flags;
1137 	ktime_t total_time;
1138 	ktime_t max_time;
1139 	unsigned long active_count;
1140 	ktime_t active_time;
1141 	ktime_t prevent_sleep_time;
1142 
1143 	spin_lock_irqsave(&ws->lock, flags);
1144 
1145 	total_time = ws->total_time;
1146 	max_time = ws->max_time;
1147 	prevent_sleep_time = ws->prevent_sleep_time;
1148 	active_count = ws->active_count;
1149 	if (ws->active) {
1150 		ktime_t now = ktime_get();
1151 
1152 		active_time = ktime_sub(now, ws->last_time);
1153 		total_time = ktime_add(total_time, active_time);
1154 		if (active_time > max_time)
1155 			max_time = active_time;
1156 
1157 		if (ws->autosleep_enabled)
1158 			prevent_sleep_time = ktime_add(prevent_sleep_time,
1159 				ktime_sub(now, ws->start_prevent_time));
1160 	} else {
1161 		active_time = 0;
1162 	}
1163 
1164 	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1165 		   ws->name, active_count, ws->event_count,
1166 		   ws->wakeup_count, ws->expire_count,
1167 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1168 		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1169 		   ktime_to_ms(prevent_sleep_time));
1170 
1171 	spin_unlock_irqrestore(&ws->lock, flags);
1172 
1173 	return 0;
1174 }
1175 
wakeup_sources_stats_seq_start(struct seq_file * m,loff_t * pos)1176 static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1177 					loff_t *pos)
1178 {
1179 	struct wakeup_source *ws;
1180 	loff_t n = *pos;
1181 	int *srcuidx = m->private;
1182 
1183 	if (n == 0) {
1184 		seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1185 			"expire_count\tactive_since\ttotal_time\tmax_time\t"
1186 			"last_change\tprevent_suspend_time\n");
1187 	}
1188 
1189 	*srcuidx = srcu_read_lock(&wakeup_srcu);
1190 	list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
1191 		if (n-- <= 0)
1192 			return ws;
1193 	}
1194 
1195 	return NULL;
1196 }
1197 
wakeup_sources_stats_seq_next(struct seq_file * m,void * v,loff_t * pos)1198 static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1199 					void *v, loff_t *pos)
1200 {
1201 	struct wakeup_source *ws = v;
1202 	struct wakeup_source *next_ws = NULL;
1203 
1204 	++(*pos);
1205 
1206 	list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1207 		next_ws = ws;
1208 		break;
1209 	}
1210 
1211 	if (!next_ws)
1212 		print_wakeup_source_stats(m, &deleted_ws);
1213 
1214 	return next_ws;
1215 }
1216 
wakeup_sources_stats_seq_stop(struct seq_file * m,void * v)1217 static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1218 {
1219 	int *srcuidx = m->private;
1220 
1221 	srcu_read_unlock(&wakeup_srcu, *srcuidx);
1222 }
1223 
1224 /**
1225  * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1226  * @m: seq_file to print the statistics into.
1227  * @v: wakeup_source of each iteration
1228  */
wakeup_sources_stats_seq_show(struct seq_file * m,void * v)1229 static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1230 {
1231 	struct wakeup_source *ws = v;
1232 
1233 	print_wakeup_source_stats(m, ws);
1234 
1235 	return 0;
1236 }
1237 
1238 static const struct seq_operations wakeup_sources_stats_seq_ops = {
1239 	.start = wakeup_sources_stats_seq_start,
1240 	.next  = wakeup_sources_stats_seq_next,
1241 	.stop  = wakeup_sources_stats_seq_stop,
1242 	.show  = wakeup_sources_stats_seq_show,
1243 };
1244 
wakeup_sources_stats_open(struct inode * inode,struct file * file)1245 static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1246 {
1247 	return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1248 }
1249 
1250 static const struct file_operations wakeup_sources_stats_fops = {
1251 	.owner = THIS_MODULE,
1252 	.open = wakeup_sources_stats_open,
1253 	.read = seq_read,
1254 	.llseek = seq_lseek,
1255 	.release = seq_release_private,
1256 };
1257 
wakeup_sources_debugfs_init(void)1258 static int __init wakeup_sources_debugfs_init(void)
1259 {
1260 	debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
1261 			    &wakeup_sources_stats_fops);
1262 	return 0;
1263 }
1264 
1265 postcore_initcall(wakeup_sources_debugfs_init);
1266