1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Tegra host1x Syncpoints
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2010-2015, NVIDIA Corporation.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <trace/events/host1x.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include "syncpt.h"
15*4882a593Smuzhiyun #include "dev.h"
16*4882a593Smuzhiyun #include "intr.h"
17*4882a593Smuzhiyun #include "debug.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define SYNCPT_CHECK_PERIOD (2 * HZ)
20*4882a593Smuzhiyun #define MAX_STUCK_CHECK_COUNT 15
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static struct host1x_syncpt_base *
host1x_syncpt_base_request(struct host1x * host)23*4882a593Smuzhiyun host1x_syncpt_base_request(struct host1x *host)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun struct host1x_syncpt_base *bases = host->bases;
26*4882a593Smuzhiyun unsigned int i;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun for (i = 0; i < host->info->nb_bases; i++)
29*4882a593Smuzhiyun if (!bases[i].requested)
30*4882a593Smuzhiyun break;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun if (i >= host->info->nb_bases)
33*4882a593Smuzhiyun return NULL;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun bases[i].requested = true;
36*4882a593Smuzhiyun return &bases[i];
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
host1x_syncpt_base_free(struct host1x_syncpt_base * base)39*4882a593Smuzhiyun static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun if (base)
42*4882a593Smuzhiyun base->requested = false;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
host1x_syncpt_alloc(struct host1x * host,struct host1x_client * client,unsigned long flags)45*4882a593Smuzhiyun static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
46*4882a593Smuzhiyun struct host1x_client *client,
47*4882a593Smuzhiyun unsigned long flags)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct host1x_syncpt *sp = host->syncpt;
50*4882a593Smuzhiyun unsigned int i;
51*4882a593Smuzhiyun char *name;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun mutex_lock(&host->syncpt_mutex);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
56*4882a593Smuzhiyun ;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (i >= host->info->nb_pts)
59*4882a593Smuzhiyun goto unlock;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (flags & HOST1X_SYNCPT_HAS_BASE) {
62*4882a593Smuzhiyun sp->base = host1x_syncpt_base_request(host);
63*4882a593Smuzhiyun if (!sp->base)
64*4882a593Smuzhiyun goto unlock;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
68*4882a593Smuzhiyun client ? dev_name(client->dev) : NULL);
69*4882a593Smuzhiyun if (!name)
70*4882a593Smuzhiyun goto free_base;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun sp->client = client;
73*4882a593Smuzhiyun sp->name = name;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
76*4882a593Smuzhiyun sp->client_managed = true;
77*4882a593Smuzhiyun else
78*4882a593Smuzhiyun sp->client_managed = false;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun mutex_unlock(&host->syncpt_mutex);
81*4882a593Smuzhiyun return sp;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun free_base:
84*4882a593Smuzhiyun host1x_syncpt_base_free(sp->base);
85*4882a593Smuzhiyun sp->base = NULL;
86*4882a593Smuzhiyun unlock:
87*4882a593Smuzhiyun mutex_unlock(&host->syncpt_mutex);
88*4882a593Smuzhiyun return NULL;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /**
92*4882a593Smuzhiyun * host1x_syncpt_id() - retrieve syncpoint ID
93*4882a593Smuzhiyun * @sp: host1x syncpoint
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * Given a pointer to a struct host1x_syncpt, retrieves its ID. This ID is
96*4882a593Smuzhiyun * often used as a value to program into registers that control how hardware
97*4882a593Smuzhiyun * blocks interact with syncpoints.
98*4882a593Smuzhiyun */
host1x_syncpt_id(struct host1x_syncpt * sp)99*4882a593Smuzhiyun u32 host1x_syncpt_id(struct host1x_syncpt *sp)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun return sp->id;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_id);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun * host1x_syncpt_incr_max() - update the value sent to hardware
107*4882a593Smuzhiyun * @sp: host1x syncpoint
108*4882a593Smuzhiyun * @incrs: number of increments
109*4882a593Smuzhiyun */
host1x_syncpt_incr_max(struct host1x_syncpt * sp,u32 incrs)110*4882a593Smuzhiyun u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun return (u32)atomic_add_return(incrs, &sp->max_val);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_incr_max);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * Write cached syncpoint and waitbase values to hardware.
118*4882a593Smuzhiyun */
host1x_syncpt_restore(struct host1x * host)119*4882a593Smuzhiyun void host1x_syncpt_restore(struct host1x *host)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun struct host1x_syncpt *sp_base = host->syncpt;
122*4882a593Smuzhiyun unsigned int i;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
125*4882a593Smuzhiyun host1x_hw_syncpt_restore(host, sp_base + i);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
128*4882a593Smuzhiyun host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun wmb();
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * Update the cached syncpoint and waitbase values by reading them
135*4882a593Smuzhiyun * from the registers.
136*4882a593Smuzhiyun */
host1x_syncpt_save(struct host1x * host)137*4882a593Smuzhiyun void host1x_syncpt_save(struct host1x *host)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct host1x_syncpt *sp_base = host->syncpt;
140*4882a593Smuzhiyun unsigned int i;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
143*4882a593Smuzhiyun if (host1x_syncpt_client_managed(sp_base + i))
144*4882a593Smuzhiyun host1x_hw_syncpt_load(host, sp_base + i);
145*4882a593Smuzhiyun else
146*4882a593Smuzhiyun WARN_ON(!host1x_syncpt_idle(sp_base + i));
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
150*4882a593Smuzhiyun host1x_hw_syncpt_load_wait_base(host, sp_base + i);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * Updates the cached syncpoint value by reading a new value from the hardware
155*4882a593Smuzhiyun * register
156*4882a593Smuzhiyun */
host1x_syncpt_load(struct host1x_syncpt * sp)157*4882a593Smuzhiyun u32 host1x_syncpt_load(struct host1x_syncpt *sp)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun u32 val;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun val = host1x_hw_syncpt_load(sp->host, sp);
162*4882a593Smuzhiyun trace_host1x_syncpt_load_min(sp->id, val);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return val;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun * Get the current syncpoint base
169*4882a593Smuzhiyun */
host1x_syncpt_load_wait_base(struct host1x_syncpt * sp)170*4882a593Smuzhiyun u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun host1x_hw_syncpt_load_wait_base(sp->host, sp);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return sp->base_val;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /**
178*4882a593Smuzhiyun * host1x_syncpt_incr() - increment syncpoint value from CPU, updating cache
179*4882a593Smuzhiyun * @sp: host1x syncpoint
180*4882a593Smuzhiyun */
host1x_syncpt_incr(struct host1x_syncpt * sp)181*4882a593Smuzhiyun int host1x_syncpt_incr(struct host1x_syncpt *sp)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun return host1x_hw_syncpt_cpu_incr(sp->host, sp);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_incr);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * Updated sync point form hardware, and returns true if syncpoint is expired,
189*4882a593Smuzhiyun * false if we may need to wait
190*4882a593Smuzhiyun */
syncpt_load_min_is_expired(struct host1x_syncpt * sp,u32 thresh)191*4882a593Smuzhiyun static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun host1x_hw_syncpt_load(sp->host, sp);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun return host1x_syncpt_is_expired(sp, thresh);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun * host1x_syncpt_wait() - wait for a syncpoint to reach a given value
200*4882a593Smuzhiyun * @sp: host1x syncpoint
201*4882a593Smuzhiyun * @thresh: threshold
202*4882a593Smuzhiyun * @timeout: maximum time to wait for the syncpoint to reach the given value
203*4882a593Smuzhiyun * @value: return location for the syncpoint value
204*4882a593Smuzhiyun */
host1x_syncpt_wait(struct host1x_syncpt * sp,u32 thresh,long timeout,u32 * value)205*4882a593Smuzhiyun int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
206*4882a593Smuzhiyun u32 *value)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
209*4882a593Smuzhiyun void *ref;
210*4882a593Smuzhiyun struct host1x_waitlist *waiter;
211*4882a593Smuzhiyun int err = 0, check_count = 0;
212*4882a593Smuzhiyun u32 val;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (value)
215*4882a593Smuzhiyun *value = 0;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* first check cache */
218*4882a593Smuzhiyun if (host1x_syncpt_is_expired(sp, thresh)) {
219*4882a593Smuzhiyun if (value)
220*4882a593Smuzhiyun *value = host1x_syncpt_load(sp);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* try to read from register */
226*4882a593Smuzhiyun val = host1x_hw_syncpt_load(sp->host, sp);
227*4882a593Smuzhiyun if (host1x_syncpt_is_expired(sp, thresh)) {
228*4882a593Smuzhiyun if (value)
229*4882a593Smuzhiyun *value = val;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun goto done;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (!timeout) {
235*4882a593Smuzhiyun err = -EAGAIN;
236*4882a593Smuzhiyun goto done;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* allocate a waiter */
240*4882a593Smuzhiyun waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
241*4882a593Smuzhiyun if (!waiter) {
242*4882a593Smuzhiyun err = -ENOMEM;
243*4882a593Smuzhiyun goto done;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* schedule a wakeup when the syncpoint value is reached */
247*4882a593Smuzhiyun err = host1x_intr_add_action(sp->host, sp, thresh,
248*4882a593Smuzhiyun HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
249*4882a593Smuzhiyun &wq, waiter, &ref);
250*4882a593Smuzhiyun if (err)
251*4882a593Smuzhiyun goto done;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun err = -EAGAIN;
254*4882a593Smuzhiyun /* Caller-specified timeout may be impractically low */
255*4882a593Smuzhiyun if (timeout < 0)
256*4882a593Smuzhiyun timeout = LONG_MAX;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* wait for the syncpoint, or timeout, or signal */
259*4882a593Smuzhiyun while (timeout) {
260*4882a593Smuzhiyun long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
261*4882a593Smuzhiyun int remain;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun remain = wait_event_interruptible_timeout(wq,
264*4882a593Smuzhiyun syncpt_load_min_is_expired(sp, thresh),
265*4882a593Smuzhiyun check);
266*4882a593Smuzhiyun if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
267*4882a593Smuzhiyun if (value)
268*4882a593Smuzhiyun *value = host1x_syncpt_load(sp);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun err = 0;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (remain < 0) {
276*4882a593Smuzhiyun err = remain;
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun timeout -= check;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
283*4882a593Smuzhiyun dev_warn(sp->host->dev,
284*4882a593Smuzhiyun "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
285*4882a593Smuzhiyun current->comm, sp->id, sp->name,
286*4882a593Smuzhiyun thresh, timeout);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun host1x_debug_dump_syncpts(sp->host);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (check_count == MAX_STUCK_CHECK_COUNT)
291*4882a593Smuzhiyun host1x_debug_dump(sp->host);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun check_count++;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun host1x_intr_put_ref(sp->host, sp->id, ref);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun done:
300*4882a593Smuzhiyun return err;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_wait);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * Returns true if syncpoint is expired, false if we may need to wait
306*4882a593Smuzhiyun */
host1x_syncpt_is_expired(struct host1x_syncpt * sp,u32 thresh)307*4882a593Smuzhiyun bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun u32 current_val;
310*4882a593Smuzhiyun u32 future_val;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun smp_rmb();
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun current_val = (u32)atomic_read(&sp->min_val);
315*4882a593Smuzhiyun future_val = (u32)atomic_read(&sp->max_val);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* Note the use of unsigned arithmetic here (mod 1<<32).
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * c = current_val = min_val = the current value of the syncpoint.
320*4882a593Smuzhiyun * t = thresh = the value we are checking
321*4882a593Smuzhiyun * f = future_val = max_val = the value c will reach when all
322*4882a593Smuzhiyun * outstanding increments have completed.
323*4882a593Smuzhiyun *
324*4882a593Smuzhiyun * Note that c always chases f until it reaches f.
325*4882a593Smuzhiyun *
326*4882a593Smuzhiyun * Dtf = (f - t)
327*4882a593Smuzhiyun * Dtc = (c - t)
328*4882a593Smuzhiyun *
329*4882a593Smuzhiyun * Consider all cases:
330*4882a593Smuzhiyun *
331*4882a593Smuzhiyun * A) .....c..t..f..... Dtf < Dtc need to wait
332*4882a593Smuzhiyun * B) .....c.....f..t.. Dtf > Dtc expired
333*4882a593Smuzhiyun * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun * Any case where f==c: always expired (for any t). Dtf == Dcf
336*4882a593Smuzhiyun * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
337*4882a593Smuzhiyun * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
338*4882a593Smuzhiyun * Dtc!=0)
339*4882a593Smuzhiyun *
340*4882a593Smuzhiyun * Other cases:
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * A) .....t..f..c..... Dtf < Dtc need to wait
343*4882a593Smuzhiyun * A) .....f..c..t..... Dtf < Dtc need to wait
344*4882a593Smuzhiyun * A) .....f..t..c..... Dtf > Dtc expired
345*4882a593Smuzhiyun *
346*4882a593Smuzhiyun * So:
347*4882a593Smuzhiyun * Dtf >= Dtc implies EXPIRED (return true)
348*4882a593Smuzhiyun * Dtf < Dtc implies WAIT (return false)
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * Note: If t is expired then we *cannot* wait on it. We would wait
351*4882a593Smuzhiyun * forever (hang the system).
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * Note: do NOT get clever and remove the -thresh from both sides. It
354*4882a593Smuzhiyun * is NOT the same.
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * If future valueis zero, we have a client managed sync point. In that
357*4882a593Smuzhiyun * case we do a direct comparison.
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun if (!host1x_syncpt_client_managed(sp))
360*4882a593Smuzhiyun return future_val - thresh >= current_val - thresh;
361*4882a593Smuzhiyun else
362*4882a593Smuzhiyun return (s32)(current_val - thresh) >= 0;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
host1x_syncpt_init(struct host1x * host)365*4882a593Smuzhiyun int host1x_syncpt_init(struct host1x *host)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun struct host1x_syncpt_base *bases;
368*4882a593Smuzhiyun struct host1x_syncpt *syncpt;
369*4882a593Smuzhiyun unsigned int i;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
372*4882a593Smuzhiyun GFP_KERNEL);
373*4882a593Smuzhiyun if (!syncpt)
374*4882a593Smuzhiyun return -ENOMEM;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
377*4882a593Smuzhiyun GFP_KERNEL);
378*4882a593Smuzhiyun if (!bases)
379*4882a593Smuzhiyun return -ENOMEM;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun for (i = 0; i < host->info->nb_pts; i++) {
382*4882a593Smuzhiyun syncpt[i].id = i;
383*4882a593Smuzhiyun syncpt[i].host = host;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /*
386*4882a593Smuzhiyun * Unassign syncpt from channels for purposes of Tegra186
387*4882a593Smuzhiyun * syncpoint protection. This prevents any channel from
388*4882a593Smuzhiyun * accessing it until it is reassigned.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun for (i = 0; i < host->info->nb_bases; i++)
394*4882a593Smuzhiyun bases[i].id = i;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun mutex_init(&host->syncpt_mutex);
397*4882a593Smuzhiyun host->syncpt = syncpt;
398*4882a593Smuzhiyun host->bases = bases;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun host1x_syncpt_restore(host);
401*4882a593Smuzhiyun host1x_hw_syncpt_enable_protection(host);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Allocate sync point to use for clearing waits for expired fences */
404*4882a593Smuzhiyun host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
405*4882a593Smuzhiyun if (!host->nop_sp)
406*4882a593Smuzhiyun return -ENOMEM;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /**
412*4882a593Smuzhiyun * host1x_syncpt_request() - request a syncpoint
413*4882a593Smuzhiyun * @client: client requesting the syncpoint
414*4882a593Smuzhiyun * @flags: flags
415*4882a593Smuzhiyun *
416*4882a593Smuzhiyun * host1x client drivers can use this function to allocate a syncpoint for
417*4882a593Smuzhiyun * subsequent use. A syncpoint returned by this function will be reserved for
418*4882a593Smuzhiyun * use by the client exclusively. When no longer using a syncpoint, a host1x
419*4882a593Smuzhiyun * client driver needs to release it using host1x_syncpt_free().
420*4882a593Smuzhiyun */
host1x_syncpt_request(struct host1x_client * client,unsigned long flags)421*4882a593Smuzhiyun struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
422*4882a593Smuzhiyun unsigned long flags)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun struct host1x *host = dev_get_drvdata(client->host->parent);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun return host1x_syncpt_alloc(host, client, flags);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_request);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /**
431*4882a593Smuzhiyun * host1x_syncpt_free() - free a requested syncpoint
432*4882a593Smuzhiyun * @sp: host1x syncpoint
433*4882a593Smuzhiyun *
434*4882a593Smuzhiyun * Release a syncpoint previously allocated using host1x_syncpt_request(). A
435*4882a593Smuzhiyun * host1x client driver should call this when the syncpoint is no longer in
436*4882a593Smuzhiyun * use. Note that client drivers must ensure that the syncpoint doesn't remain
437*4882a593Smuzhiyun * under the control of hardware after calling this function, otherwise two
438*4882a593Smuzhiyun * clients may end up trying to access the same syncpoint concurrently.
439*4882a593Smuzhiyun */
host1x_syncpt_free(struct host1x_syncpt * sp)440*4882a593Smuzhiyun void host1x_syncpt_free(struct host1x_syncpt *sp)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun if (!sp)
443*4882a593Smuzhiyun return;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun mutex_lock(&sp->host->syncpt_mutex);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun host1x_syncpt_base_free(sp->base);
448*4882a593Smuzhiyun kfree(sp->name);
449*4882a593Smuzhiyun sp->base = NULL;
450*4882a593Smuzhiyun sp->client = NULL;
451*4882a593Smuzhiyun sp->name = NULL;
452*4882a593Smuzhiyun sp->client_managed = false;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun mutex_unlock(&sp->host->syncpt_mutex);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_free);
457*4882a593Smuzhiyun
host1x_syncpt_deinit(struct host1x * host)458*4882a593Smuzhiyun void host1x_syncpt_deinit(struct host1x *host)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct host1x_syncpt *sp = host->syncpt;
461*4882a593Smuzhiyun unsigned int i;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun for (i = 0; i < host->info->nb_pts; i++, sp++)
464*4882a593Smuzhiyun kfree(sp->name);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /**
468*4882a593Smuzhiyun * host1x_syncpt_read_max() - read maximum syncpoint value
469*4882a593Smuzhiyun * @sp: host1x syncpoint
470*4882a593Smuzhiyun *
471*4882a593Smuzhiyun * The maximum syncpoint value indicates how many operations there are in
472*4882a593Smuzhiyun * queue, either in channel or in a software thread.
473*4882a593Smuzhiyun */
host1x_syncpt_read_max(struct host1x_syncpt * sp)474*4882a593Smuzhiyun u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun smp_rmb();
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun return (u32)atomic_read(&sp->max_val);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_read_max);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /**
483*4882a593Smuzhiyun * host1x_syncpt_read_min() - read minimum syncpoint value
484*4882a593Smuzhiyun * @sp: host1x syncpoint
485*4882a593Smuzhiyun *
486*4882a593Smuzhiyun * The minimum syncpoint value is a shadow of the current sync point value in
487*4882a593Smuzhiyun * hardware.
488*4882a593Smuzhiyun */
host1x_syncpt_read_min(struct host1x_syncpt * sp)489*4882a593Smuzhiyun u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun smp_rmb();
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun return (u32)atomic_read(&sp->min_val);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_read_min);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /**
498*4882a593Smuzhiyun * host1x_syncpt_read() - read the current syncpoint value
499*4882a593Smuzhiyun * @sp: host1x syncpoint
500*4882a593Smuzhiyun */
host1x_syncpt_read(struct host1x_syncpt * sp)501*4882a593Smuzhiyun u32 host1x_syncpt_read(struct host1x_syncpt *sp)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun return host1x_syncpt_load(sp);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_read);
506*4882a593Smuzhiyun
host1x_syncpt_nb_pts(struct host1x * host)507*4882a593Smuzhiyun unsigned int host1x_syncpt_nb_pts(struct host1x *host)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun return host->info->nb_pts;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
host1x_syncpt_nb_bases(struct host1x * host)512*4882a593Smuzhiyun unsigned int host1x_syncpt_nb_bases(struct host1x *host)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun return host->info->nb_bases;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
host1x_syncpt_nb_mlocks(struct host1x * host)517*4882a593Smuzhiyun unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun return host->info->nb_mlocks;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /**
523*4882a593Smuzhiyun * host1x_syncpt_get() - obtain a syncpoint by ID
524*4882a593Smuzhiyun * @host: host1x controller
525*4882a593Smuzhiyun * @id: syncpoint ID
526*4882a593Smuzhiyun */
host1x_syncpt_get(struct host1x * host,unsigned int id)527*4882a593Smuzhiyun struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun if (id >= host->info->nb_pts)
530*4882a593Smuzhiyun return NULL;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun return host->syncpt + id;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_get);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /**
537*4882a593Smuzhiyun * host1x_syncpt_get_base() - obtain the wait base associated with a syncpoint
538*4882a593Smuzhiyun * @sp: host1x syncpoint
539*4882a593Smuzhiyun */
host1x_syncpt_get_base(struct host1x_syncpt * sp)540*4882a593Smuzhiyun struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun return sp ? sp->base : NULL;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_get_base);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * host1x_syncpt_base_id() - retrieve the ID of a syncpoint wait base
548*4882a593Smuzhiyun * @base: host1x syncpoint wait base
549*4882a593Smuzhiyun */
host1x_syncpt_base_id(struct host1x_syncpt_base * base)550*4882a593Smuzhiyun u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun return base->id;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun EXPORT_SYMBOL(host1x_syncpt_base_id);
555