xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/intel_wakeref.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * SPDX-License-Identifier: MIT
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright © 2019 Intel Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/wait_bit.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "intel_runtime_pm.h"
10*4882a593Smuzhiyun #include "intel_wakeref.h"
11*4882a593Smuzhiyun 
rpm_get(struct intel_wakeref * wf)12*4882a593Smuzhiyun static void rpm_get(struct intel_wakeref *wf)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	wf->wakeref = intel_runtime_pm_get(wf->rpm);
15*4882a593Smuzhiyun }
16*4882a593Smuzhiyun 
rpm_put(struct intel_wakeref * wf)17*4882a593Smuzhiyun static void rpm_put(struct intel_wakeref *wf)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	intel_runtime_pm_put(wf->rpm, wakeref);
22*4882a593Smuzhiyun 	INTEL_WAKEREF_BUG_ON(!wakeref);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
__intel_wakeref_get_first(struct intel_wakeref * wf)25*4882a593Smuzhiyun int __intel_wakeref_get_first(struct intel_wakeref *wf)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	/*
28*4882a593Smuzhiyun 	 * Treat get/put as different subclasses, as we may need to run
29*4882a593Smuzhiyun 	 * the put callback from under the shrinker and do not want to
30*4882a593Smuzhiyun 	 * cross-contanimate that callback with any extra work performed
31*4882a593Smuzhiyun 	 * upon acquiring the wakeref.
32*4882a593Smuzhiyun 	 */
33*4882a593Smuzhiyun 	mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
34*4882a593Smuzhiyun 	if (!atomic_read(&wf->count)) {
35*4882a593Smuzhiyun 		int err;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 		rpm_get(wf);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 		err = wf->ops->get(wf);
40*4882a593Smuzhiyun 		if (unlikely(err)) {
41*4882a593Smuzhiyun 			rpm_put(wf);
42*4882a593Smuzhiyun 			mutex_unlock(&wf->mutex);
43*4882a593Smuzhiyun 			return err;
44*4882a593Smuzhiyun 		}
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 		smp_mb__before_atomic(); /* release wf->count */
47*4882a593Smuzhiyun 	}
48*4882a593Smuzhiyun 	atomic_inc(&wf->count);
49*4882a593Smuzhiyun 	mutex_unlock(&wf->mutex);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
52*4882a593Smuzhiyun 	return 0;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
____intel_wakeref_put_last(struct intel_wakeref * wf)55*4882a593Smuzhiyun static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
58*4882a593Smuzhiyun 	if (unlikely(!atomic_dec_and_test(&wf->count)))
59*4882a593Smuzhiyun 		goto unlock;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/* ops->put() must reschedule its own release on error/deferral */
62*4882a593Smuzhiyun 	if (likely(!wf->ops->put(wf))) {
63*4882a593Smuzhiyun 		rpm_put(wf);
64*4882a593Smuzhiyun 		wake_up_var(&wf->wakeref);
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun unlock:
68*4882a593Smuzhiyun 	mutex_unlock(&wf->mutex);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
__intel_wakeref_put_last(struct intel_wakeref * wf,unsigned long flags)71*4882a593Smuzhiyun void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/* Assume we are not in process context and so cannot sleep. */
76*4882a593Smuzhiyun 	if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
77*4882a593Smuzhiyun 		mod_delayed_work(system_wq, &wf->work,
78*4882a593Smuzhiyun 				 FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
79*4882a593Smuzhiyun 		return;
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	____intel_wakeref_put_last(wf);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
__intel_wakeref_put_work(struct work_struct * wrk)85*4882a593Smuzhiyun static void __intel_wakeref_put_work(struct work_struct *wrk)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (atomic_add_unless(&wf->count, -1, 1))
90*4882a593Smuzhiyun 		return;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	mutex_lock(&wf->mutex);
93*4882a593Smuzhiyun 	____intel_wakeref_put_last(wf);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
__intel_wakeref_init(struct intel_wakeref * wf,struct intel_runtime_pm * rpm,const struct intel_wakeref_ops * ops,struct intel_wakeref_lockclass * key)96*4882a593Smuzhiyun void __intel_wakeref_init(struct intel_wakeref *wf,
97*4882a593Smuzhiyun 			  struct intel_runtime_pm *rpm,
98*4882a593Smuzhiyun 			  const struct intel_wakeref_ops *ops,
99*4882a593Smuzhiyun 			  struct intel_wakeref_lockclass *key)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	wf->rpm = rpm;
102*4882a593Smuzhiyun 	wf->ops = ops;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	__mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
105*4882a593Smuzhiyun 	atomic_set(&wf->count, 0);
106*4882a593Smuzhiyun 	wf->wakeref = 0;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
109*4882a593Smuzhiyun 	lockdep_init_map(&wf->work.work.lockdep_map,
110*4882a593Smuzhiyun 			 "wakeref.work", &key->work, 0);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
intel_wakeref_wait_for_idle(struct intel_wakeref * wf)113*4882a593Smuzhiyun int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	int err;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	might_sleep();
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	err = wait_var_event_killable(&wf->wakeref,
120*4882a593Smuzhiyun 				      !intel_wakeref_is_active(wf));
121*4882a593Smuzhiyun 	if (err)
122*4882a593Smuzhiyun 		return err;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	intel_wakeref_unlock_wait(wf);
125*4882a593Smuzhiyun 	return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
wakeref_auto_timeout(struct timer_list * t)128*4882a593Smuzhiyun static void wakeref_auto_timeout(struct timer_list *t)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
131*4882a593Smuzhiyun 	intel_wakeref_t wakeref;
132*4882a593Smuzhiyun 	unsigned long flags;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
135*4882a593Smuzhiyun 		return;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	wakeref = fetch_and_zero(&wf->wakeref);
138*4882a593Smuzhiyun 	spin_unlock_irqrestore(&wf->lock, flags);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	intel_runtime_pm_put(wf->rpm, wakeref);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
intel_wakeref_auto_init(struct intel_wakeref_auto * wf,struct intel_runtime_pm * rpm)143*4882a593Smuzhiyun void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
144*4882a593Smuzhiyun 			     struct intel_runtime_pm *rpm)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	spin_lock_init(&wf->lock);
147*4882a593Smuzhiyun 	timer_setup(&wf->timer, wakeref_auto_timeout, 0);
148*4882a593Smuzhiyun 	refcount_set(&wf->count, 0);
149*4882a593Smuzhiyun 	wf->wakeref = 0;
150*4882a593Smuzhiyun 	wf->rpm = rpm;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
intel_wakeref_auto(struct intel_wakeref_auto * wf,unsigned long timeout)153*4882a593Smuzhiyun void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	unsigned long flags;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (!timeout) {
158*4882a593Smuzhiyun 		if (del_timer_sync(&wf->timer))
159*4882a593Smuzhiyun 			wakeref_auto_timeout(&wf->timer);
160*4882a593Smuzhiyun 		return;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/* Our mission is that we only extend an already active wakeref */
164*4882a593Smuzhiyun 	assert_rpm_wakelock_held(wf->rpm);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (!refcount_inc_not_zero(&wf->count)) {
167*4882a593Smuzhiyun 		spin_lock_irqsave(&wf->lock, flags);
168*4882a593Smuzhiyun 		if (!refcount_inc_not_zero(&wf->count)) {
169*4882a593Smuzhiyun 			INTEL_WAKEREF_BUG_ON(wf->wakeref);
170*4882a593Smuzhiyun 			wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
171*4882a593Smuzhiyun 			refcount_set(&wf->count, 1);
172*4882a593Smuzhiyun 		}
173*4882a593Smuzhiyun 		spin_unlock_irqrestore(&wf->lock, flags);
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	/*
177*4882a593Smuzhiyun 	 * If we extend a pending timer, we will only get a single timer
178*4882a593Smuzhiyun 	 * callback and so need to cancel the local inc by running the
179*4882a593Smuzhiyun 	 * elided callback to keep the wf->count balanced.
180*4882a593Smuzhiyun 	 */
181*4882a593Smuzhiyun 	if (mod_timer(&wf->timer, jiffies + timeout))
182*4882a593Smuzhiyun 		wakeref_auto_timeout(&wf->timer);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
intel_wakeref_auto_fini(struct intel_wakeref_auto * wf)185*4882a593Smuzhiyun void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	intel_wakeref_auto(wf, 0);
188*4882a593Smuzhiyun 	INTEL_WAKEREF_BUG_ON(wf->wakeref);
189*4882a593Smuzhiyun }
190