1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Tegra host1x Interrupt Management
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2010-2013, NVIDIA Corporation.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/irq.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <trace/events/host1x.h>
14*4882a593Smuzhiyun #include "channel.h"
15*4882a593Smuzhiyun #include "dev.h"
16*4882a593Smuzhiyun #include "intr.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /* Wait list management */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun enum waitlist_state {
21*4882a593Smuzhiyun WLS_PENDING,
22*4882a593Smuzhiyun WLS_REMOVED,
23*4882a593Smuzhiyun WLS_CANCELLED,
24*4882a593Smuzhiyun WLS_HANDLED
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun
waiter_release(struct kref * kref)27*4882a593Smuzhiyun static void waiter_release(struct kref *kref)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun kfree(container_of(kref, struct host1x_waitlist, refcount));
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * add a waiter to a waiter queue, sorted by threshold
34*4882a593Smuzhiyun * returns true if it was added at the head of the queue
35*4882a593Smuzhiyun */
add_waiter_to_queue(struct host1x_waitlist * waiter,struct list_head * queue)36*4882a593Smuzhiyun static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
37*4882a593Smuzhiyun struct list_head *queue)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun struct host1x_waitlist *pos;
40*4882a593Smuzhiyun u32 thresh = waiter->thresh;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun list_for_each_entry_reverse(pos, queue, list)
43*4882a593Smuzhiyun if ((s32)(pos->thresh - thresh) <= 0) {
44*4882a593Smuzhiyun list_add(&waiter->list, &pos->list);
45*4882a593Smuzhiyun return false;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun list_add(&waiter->list, queue);
49*4882a593Smuzhiyun return true;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * run through a waiter queue for a single sync point ID
54*4882a593Smuzhiyun * and gather all completed waiters into lists by actions
55*4882a593Smuzhiyun */
remove_completed_waiters(struct list_head * head,u32 sync,struct list_head completed[HOST1X_INTR_ACTION_COUNT])56*4882a593Smuzhiyun static void remove_completed_waiters(struct list_head *head, u32 sync,
57*4882a593Smuzhiyun struct list_head completed[HOST1X_INTR_ACTION_COUNT])
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun struct list_head *dest;
60*4882a593Smuzhiyun struct host1x_waitlist *waiter, *next, *prev;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun list_for_each_entry_safe(waiter, next, head, list) {
63*4882a593Smuzhiyun if ((s32)(waiter->thresh - sync) > 0)
64*4882a593Smuzhiyun break;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun dest = completed + waiter->action;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* consolidate submit cleanups */
69*4882a593Smuzhiyun if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
70*4882a593Smuzhiyun !list_empty(dest)) {
71*4882a593Smuzhiyun prev = list_entry(dest->prev,
72*4882a593Smuzhiyun struct host1x_waitlist, list);
73*4882a593Smuzhiyun if (prev->data == waiter->data) {
74*4882a593Smuzhiyun prev->count++;
75*4882a593Smuzhiyun dest = NULL;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* PENDING->REMOVED or CANCELLED->HANDLED */
80*4882a593Smuzhiyun if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
81*4882a593Smuzhiyun list_del(&waiter->list);
82*4882a593Smuzhiyun kref_put(&waiter->refcount, waiter_release);
83*4882a593Smuzhiyun } else
84*4882a593Smuzhiyun list_move_tail(&waiter->list, dest);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
reset_threshold_interrupt(struct host1x * host,struct list_head * head,unsigned int id)88*4882a593Smuzhiyun static void reset_threshold_interrupt(struct host1x *host,
89*4882a593Smuzhiyun struct list_head *head,
90*4882a593Smuzhiyun unsigned int id)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun u32 thresh =
93*4882a593Smuzhiyun list_first_entry(head, struct host1x_waitlist, list)->thresh;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
96*4882a593Smuzhiyun host1x_hw_intr_enable_syncpt_intr(host, id);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
action_submit_complete(struct host1x_waitlist * waiter)99*4882a593Smuzhiyun static void action_submit_complete(struct host1x_waitlist *waiter)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun struct host1x_channel *channel = waiter->data;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun host1x_cdma_update(&channel->cdma);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* Add nr_completed to trace */
106*4882a593Smuzhiyun trace_host1x_channel_submit_complete(dev_name(channel->dev),
107*4882a593Smuzhiyun waiter->count, waiter->thresh);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
action_wakeup(struct host1x_waitlist * waiter)110*4882a593Smuzhiyun static void action_wakeup(struct host1x_waitlist *waiter)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun wait_queue_head_t *wq = waiter->data;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun wake_up(wq);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
action_wakeup_interruptible(struct host1x_waitlist * waiter)117*4882a593Smuzhiyun static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun wait_queue_head_t *wq = waiter->data;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun wake_up_interruptible(wq);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun typedef void (*action_handler)(struct host1x_waitlist *waiter);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
127*4882a593Smuzhiyun action_submit_complete,
128*4882a593Smuzhiyun action_wakeup,
129*4882a593Smuzhiyun action_wakeup_interruptible,
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun
run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])132*4882a593Smuzhiyun static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct list_head *head = completed;
135*4882a593Smuzhiyun unsigned int i;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
138*4882a593Smuzhiyun action_handler handler = action_handlers[i];
139*4882a593Smuzhiyun struct host1x_waitlist *waiter, *next;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun list_for_each_entry_safe(waiter, next, head, list) {
142*4882a593Smuzhiyun list_del(&waiter->list);
143*4882a593Smuzhiyun handler(waiter);
144*4882a593Smuzhiyun WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
145*4882a593Smuzhiyun WLS_REMOVED);
146*4882a593Smuzhiyun kref_put(&waiter->refcount, waiter_release);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Remove & handle all waiters that have completed for the given syncpt
153*4882a593Smuzhiyun */
process_wait_list(struct host1x * host,struct host1x_syncpt * syncpt,u32 threshold)154*4882a593Smuzhiyun static int process_wait_list(struct host1x *host,
155*4882a593Smuzhiyun struct host1x_syncpt *syncpt,
156*4882a593Smuzhiyun u32 threshold)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun struct list_head completed[HOST1X_INTR_ACTION_COUNT];
159*4882a593Smuzhiyun unsigned int i;
160*4882a593Smuzhiyun int empty;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
163*4882a593Smuzhiyun INIT_LIST_HEAD(completed + i);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun spin_lock(&syncpt->intr.lock);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun remove_completed_waiters(&syncpt->intr.wait_head, threshold,
168*4882a593Smuzhiyun completed);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun empty = list_empty(&syncpt->intr.wait_head);
171*4882a593Smuzhiyun if (empty)
172*4882a593Smuzhiyun host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
173*4882a593Smuzhiyun else
174*4882a593Smuzhiyun reset_threshold_interrupt(host, &syncpt->intr.wait_head,
175*4882a593Smuzhiyun syncpt->id);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun spin_unlock(&syncpt->intr.lock);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun run_handlers(completed);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun return empty;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun * Sync point threshold interrupt service thread function
186*4882a593Smuzhiyun * Handles sync point threshold triggers, in thread context
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun
syncpt_thresh_work(struct work_struct * work)189*4882a593Smuzhiyun static void syncpt_thresh_work(struct work_struct *work)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct host1x_syncpt_intr *syncpt_intr =
192*4882a593Smuzhiyun container_of(work, struct host1x_syncpt_intr, work);
193*4882a593Smuzhiyun struct host1x_syncpt *syncpt =
194*4882a593Smuzhiyun container_of(syncpt_intr, struct host1x_syncpt, intr);
195*4882a593Smuzhiyun unsigned int id = syncpt->id;
196*4882a593Smuzhiyun struct host1x *host = syncpt->host;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun (void)process_wait_list(host, syncpt,
199*4882a593Smuzhiyun host1x_syncpt_load(host->syncpt + id));
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
host1x_intr_add_action(struct host1x * host,struct host1x_syncpt * syncpt,u32 thresh,enum host1x_intr_action action,void * data,struct host1x_waitlist * waiter,void ** ref)202*4882a593Smuzhiyun int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
203*4882a593Smuzhiyun u32 thresh, enum host1x_intr_action action,
204*4882a593Smuzhiyun void *data, struct host1x_waitlist *waiter,
205*4882a593Smuzhiyun void **ref)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun int queue_was_empty;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (waiter == NULL) {
210*4882a593Smuzhiyun pr_warn("%s: NULL waiter\n", __func__);
211*4882a593Smuzhiyun return -EINVAL;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* initialize a new waiter */
215*4882a593Smuzhiyun INIT_LIST_HEAD(&waiter->list);
216*4882a593Smuzhiyun kref_init(&waiter->refcount);
217*4882a593Smuzhiyun if (ref)
218*4882a593Smuzhiyun kref_get(&waiter->refcount);
219*4882a593Smuzhiyun waiter->thresh = thresh;
220*4882a593Smuzhiyun waiter->action = action;
221*4882a593Smuzhiyun atomic_set(&waiter->state, WLS_PENDING);
222*4882a593Smuzhiyun waiter->data = data;
223*4882a593Smuzhiyun waiter->count = 1;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun spin_lock(&syncpt->intr.lock);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun queue_was_empty = list_empty(&syncpt->intr.wait_head);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
230*4882a593Smuzhiyun /* added at head of list - new threshold value */
231*4882a593Smuzhiyun host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* added as first waiter - enable interrupt */
234*4882a593Smuzhiyun if (queue_was_empty)
235*4882a593Smuzhiyun host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun spin_unlock(&syncpt->intr.lock);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (ref)
241*4882a593Smuzhiyun *ref = waiter;
242*4882a593Smuzhiyun return 0;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
host1x_intr_put_ref(struct host1x * host,unsigned int id,void * ref)245*4882a593Smuzhiyun void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct host1x_waitlist *waiter = ref;
248*4882a593Smuzhiyun struct host1x_syncpt *syncpt;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
251*4882a593Smuzhiyun WLS_REMOVED)
252*4882a593Smuzhiyun schedule();
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun syncpt = host->syncpt + id;
255*4882a593Smuzhiyun (void)process_wait_list(host, syncpt,
256*4882a593Smuzhiyun host1x_syncpt_load(host->syncpt + id));
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun kref_put(&waiter->refcount, waiter_release);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
host1x_intr_init(struct host1x * host,unsigned int irq_sync)261*4882a593Smuzhiyun int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun unsigned int id;
264*4882a593Smuzhiyun u32 nb_pts = host1x_syncpt_nb_pts(host);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun mutex_init(&host->intr_mutex);
267*4882a593Smuzhiyun host->intr_syncpt_irq = irq_sync;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun for (id = 0; id < nb_pts; ++id) {
270*4882a593Smuzhiyun struct host1x_syncpt *syncpt = host->syncpt + id;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun spin_lock_init(&syncpt->intr.lock);
273*4882a593Smuzhiyun INIT_LIST_HEAD(&syncpt->intr.wait_head);
274*4882a593Smuzhiyun snprintf(syncpt->intr.thresh_irq_name,
275*4882a593Smuzhiyun sizeof(syncpt->intr.thresh_irq_name),
276*4882a593Smuzhiyun "host1x_sp_%02u", id);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun host1x_intr_start(host);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
host1x_intr_deinit(struct host1x * host)284*4882a593Smuzhiyun void host1x_intr_deinit(struct host1x *host)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun host1x_intr_stop(host);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
host1x_intr_start(struct host1x * host)289*4882a593Smuzhiyun void host1x_intr_start(struct host1x *host)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun u32 hz = clk_get_rate(host->clk);
292*4882a593Smuzhiyun int err;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun mutex_lock(&host->intr_mutex);
295*4882a593Smuzhiyun err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
296*4882a593Smuzhiyun syncpt_thresh_work);
297*4882a593Smuzhiyun if (err) {
298*4882a593Smuzhiyun mutex_unlock(&host->intr_mutex);
299*4882a593Smuzhiyun return;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun mutex_unlock(&host->intr_mutex);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
host1x_intr_stop(struct host1x * host)304*4882a593Smuzhiyun void host1x_intr_stop(struct host1x *host)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun unsigned int id;
307*4882a593Smuzhiyun struct host1x_syncpt *syncpt = host->syncpt;
308*4882a593Smuzhiyun u32 nb_pts = host1x_syncpt_nb_pts(host);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun mutex_lock(&host->intr_mutex);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun host1x_hw_intr_disable_all_syncpt_intrs(host);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun for (id = 0; id < nb_pts; ++id) {
315*4882a593Smuzhiyun struct host1x_waitlist *waiter, *next;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun list_for_each_entry_safe(waiter, next,
318*4882a593Smuzhiyun &syncpt[id].intr.wait_head, list) {
319*4882a593Smuzhiyun if (atomic_cmpxchg(&waiter->state,
320*4882a593Smuzhiyun WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
321*4882a593Smuzhiyun list_del(&waiter->list);
322*4882a593Smuzhiyun kref_put(&waiter->refcount, waiter_release);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (!list_empty(&syncpt[id].intr.wait_head)) {
327*4882a593Smuzhiyun /* output diagnostics */
328*4882a593Smuzhiyun mutex_unlock(&host->intr_mutex);
329*4882a593Smuzhiyun pr_warn("%s cannot stop syncpt intr id=%u\n",
330*4882a593Smuzhiyun __func__, id);
331*4882a593Smuzhiyun return;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun host1x_hw_intr_free_syncpt_irq(host);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun mutex_unlock(&host->intr_mutex);
338*4882a593Smuzhiyun }
339