xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gt/sysfs_engines.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: MIT
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright © 2019 Intel Corporation
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/kobject.h>
7*4882a593Smuzhiyun #include <linux/sysfs.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "i915_drv.h"
10*4882a593Smuzhiyun #include "intel_engine.h"
11*4882a593Smuzhiyun #include "intel_engine_heartbeat.h"
12*4882a593Smuzhiyun #include "sysfs_engines.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun struct kobj_engine {
15*4882a593Smuzhiyun 	struct kobject base;
16*4882a593Smuzhiyun 	struct intel_engine_cs *engine;
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun 
kobj_to_engine(struct kobject * kobj)19*4882a593Smuzhiyun static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	return container_of(kobj, struct kobj_engine, base)->engine;
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static ssize_t
name_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)25*4882a593Smuzhiyun name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static struct kobj_attribute name_attr =
31*4882a593Smuzhiyun __ATTR(name, 0444, name_show, NULL);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static ssize_t
class_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)34*4882a593Smuzhiyun class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static struct kobj_attribute class_attr =
40*4882a593Smuzhiyun __ATTR(class, 0444, class_show, NULL);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static ssize_t
inst_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)43*4882a593Smuzhiyun inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun static struct kobj_attribute inst_attr =
49*4882a593Smuzhiyun __ATTR(instance, 0444, inst_show, NULL);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static ssize_t
mmio_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)52*4882a593Smuzhiyun mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static struct kobj_attribute mmio_attr =
58*4882a593Smuzhiyun __ATTR(mmio_base, 0444, mmio_show, NULL);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun static const char * const vcs_caps[] = {
61*4882a593Smuzhiyun 	[ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
62*4882a593Smuzhiyun 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun static const char * const vecs_caps[] = {
66*4882a593Smuzhiyun 	[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
repr_trim(char * buf,ssize_t len)69*4882a593Smuzhiyun static ssize_t repr_trim(char *buf, ssize_t len)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	/* Trim off the trailing space and replace with a newline */
72*4882a593Smuzhiyun 	if (len > PAGE_SIZE)
73*4882a593Smuzhiyun 		len = PAGE_SIZE;
74*4882a593Smuzhiyun 	if (len > 0)
75*4882a593Smuzhiyun 		buf[len - 1] = '\n';
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	return len;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static ssize_t
__caps_show(struct intel_engine_cs * engine,u32 caps,char * buf,bool show_unknown)81*4882a593Smuzhiyun __caps_show(struct intel_engine_cs *engine,
82*4882a593Smuzhiyun 	    u32 caps, char *buf, bool show_unknown)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	const char * const *repr;
85*4882a593Smuzhiyun 	int count, n;
86*4882a593Smuzhiyun 	ssize_t len;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	switch (engine->class) {
91*4882a593Smuzhiyun 	case VIDEO_DECODE_CLASS:
92*4882a593Smuzhiyun 		repr = vcs_caps;
93*4882a593Smuzhiyun 		count = ARRAY_SIZE(vcs_caps);
94*4882a593Smuzhiyun 		break;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	case VIDEO_ENHANCEMENT_CLASS:
97*4882a593Smuzhiyun 		repr = vecs_caps;
98*4882a593Smuzhiyun 		count = ARRAY_SIZE(vecs_caps);
99*4882a593Smuzhiyun 		break;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	default:
102*4882a593Smuzhiyun 		repr = NULL;
103*4882a593Smuzhiyun 		count = 0;
104*4882a593Smuzhiyun 		break;
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 	GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	len = 0;
109*4882a593Smuzhiyun 	for_each_set_bit(n,
110*4882a593Smuzhiyun 			 (unsigned long *)&caps,
111*4882a593Smuzhiyun 			 show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
112*4882a593Smuzhiyun 		if (n >= count || !repr[n]) {
113*4882a593Smuzhiyun 			if (GEM_WARN_ON(show_unknown))
114*4882a593Smuzhiyun 				len += snprintf(buf + len, PAGE_SIZE - len,
115*4882a593Smuzhiyun 						"[%x] ", n);
116*4882a593Smuzhiyun 		} else {
117*4882a593Smuzhiyun 			len += snprintf(buf + len, PAGE_SIZE - len,
118*4882a593Smuzhiyun 					"%s ", repr[n]);
119*4882a593Smuzhiyun 		}
120*4882a593Smuzhiyun 		if (GEM_WARN_ON(len >= PAGE_SIZE))
121*4882a593Smuzhiyun 			break;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 	return repr_trim(buf, len);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun static ssize_t
caps_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)127*4882a593Smuzhiyun caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return __caps_show(engine, engine->uabi_capabilities, buf, true);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun static struct kobj_attribute caps_attr =
135*4882a593Smuzhiyun __ATTR(capabilities, 0444, caps_show, NULL);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun static ssize_t
all_caps_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)138*4882a593Smuzhiyun all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	return __caps_show(kobj_to_engine(kobj), -1, buf, false);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun static struct kobj_attribute all_caps_attr =
144*4882a593Smuzhiyun __ATTR(known_capabilities, 0444, all_caps_show, NULL);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun static ssize_t
max_spin_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)147*4882a593Smuzhiyun max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
148*4882a593Smuzhiyun 	       const char *buf, size_t count)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
151*4882a593Smuzhiyun 	unsigned long long duration;
152*4882a593Smuzhiyun 	int err;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/*
155*4882a593Smuzhiyun 	 * When waiting for a request, if is it currently being executed
156*4882a593Smuzhiyun 	 * on the GPU, we busywait for a short while before sleeping. The
157*4882a593Smuzhiyun 	 * premise is that most requests are short, and if it is already
158*4882a593Smuzhiyun 	 * executing then there is a good chance that it will complete
159*4882a593Smuzhiyun 	 * before we can setup the interrupt handler and go to sleep.
160*4882a593Smuzhiyun 	 * We try to offset the cost of going to sleep, by first spinning
161*4882a593Smuzhiyun 	 * on the request -- if it completed in less time than it would take
162*4882a593Smuzhiyun 	 * to go sleep, process the interrupt and return back to the client,
163*4882a593Smuzhiyun 	 * then we have saved the client some latency, albeit at the cost
164*4882a593Smuzhiyun 	 * of spinning on an expensive CPU core.
165*4882a593Smuzhiyun 	 *
166*4882a593Smuzhiyun 	 * While we try to avoid waiting at all for a request that is unlikely
167*4882a593Smuzhiyun 	 * to complete, deciding how long it is worth spinning is for is an
168*4882a593Smuzhiyun 	 * arbitrary decision: trading off power vs latency.
169*4882a593Smuzhiyun 	 */
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	err = kstrtoull(buf, 0, &duration);
172*4882a593Smuzhiyun 	if (err)
173*4882a593Smuzhiyun 		return err;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (duration > jiffies_to_nsecs(2))
176*4882a593Smuzhiyun 		return -EINVAL;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	return count;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun static ssize_t
max_spin_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)184*4882a593Smuzhiyun max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun static struct kobj_attribute max_spin_attr =
192*4882a593Smuzhiyun __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun static ssize_t
max_spin_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)195*4882a593Smuzhiyun max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun static struct kobj_attribute max_spin_def =
203*4882a593Smuzhiyun __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun static ssize_t
timeslice_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)206*4882a593Smuzhiyun timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
207*4882a593Smuzhiyun 		const char *buf, size_t count)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
210*4882a593Smuzhiyun 	unsigned long long duration;
211*4882a593Smuzhiyun 	int err;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/*
214*4882a593Smuzhiyun 	 * Execlists uses a scheduling quantum (a timeslice) to alternate
215*4882a593Smuzhiyun 	 * execution between ready-to-run contexts of equal priority. This
216*4882a593Smuzhiyun 	 * ensures that all users (though only if they of equal importance)
217*4882a593Smuzhiyun 	 * have the opportunity to run and prevents livelocks where contexts
218*4882a593Smuzhiyun 	 * may have implicit ordering due to userspace semaphores.
219*4882a593Smuzhiyun 	 */
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	err = kstrtoull(buf, 0, &duration);
222*4882a593Smuzhiyun 	if (err)
223*4882a593Smuzhiyun 		return err;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
226*4882a593Smuzhiyun 		return -EINVAL;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (execlists_active(&engine->execlists))
231*4882a593Smuzhiyun 		set_timer_ms(&engine->execlists.timer, duration);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return count;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static ssize_t
timeslice_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)237*4882a593Smuzhiyun timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun static struct kobj_attribute timeslice_duration_attr =
245*4882a593Smuzhiyun __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun static ssize_t
timeslice_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)248*4882a593Smuzhiyun timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun static struct kobj_attribute timeslice_duration_def =
256*4882a593Smuzhiyun __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun static ssize_t
stop_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)259*4882a593Smuzhiyun stop_store(struct kobject *kobj, struct kobj_attribute *attr,
260*4882a593Smuzhiyun 	   const char *buf, size_t count)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
263*4882a593Smuzhiyun 	unsigned long long duration;
264*4882a593Smuzhiyun 	int err;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/*
267*4882a593Smuzhiyun 	 * When we allow ourselves to sleep before a GPU reset after disabling
268*4882a593Smuzhiyun 	 * submission, even for a few milliseconds, gives an innocent context
269*4882a593Smuzhiyun 	 * the opportunity to clear the GPU before the reset occurs. However,
270*4882a593Smuzhiyun 	 * how long to sleep depends on the typical non-preemptible duration
271*4882a593Smuzhiyun 	 * (a similar problem to determining the ideal preempt-reset timeout
272*4882a593Smuzhiyun 	 * or even the heartbeat interval).
273*4882a593Smuzhiyun 	 */
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	err = kstrtoull(buf, 0, &duration);
276*4882a593Smuzhiyun 	if (err)
277*4882a593Smuzhiyun 		return err;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
280*4882a593Smuzhiyun 		return -EINVAL;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	WRITE_ONCE(engine->props.stop_timeout_ms, duration);
283*4882a593Smuzhiyun 	return count;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun static ssize_t
stop_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)287*4882a593Smuzhiyun stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun static struct kobj_attribute stop_timeout_attr =
295*4882a593Smuzhiyun __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun static ssize_t
stop_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)298*4882a593Smuzhiyun stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun static struct kobj_attribute stop_timeout_def =
306*4882a593Smuzhiyun __ATTR(stop_timeout_ms, 0444, stop_default, NULL);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun static ssize_t
preempt_timeout_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)309*4882a593Smuzhiyun preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
310*4882a593Smuzhiyun 		      const char *buf, size_t count)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
313*4882a593Smuzhiyun 	unsigned long long timeout;
314*4882a593Smuzhiyun 	int err;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/*
317*4882a593Smuzhiyun 	 * After initialising a preemption request, we give the current
318*4882a593Smuzhiyun 	 * resident a small amount of time to vacate the GPU. The preemption
319*4882a593Smuzhiyun 	 * request is for a higher priority context and should be immediate to
320*4882a593Smuzhiyun 	 * maintain high quality of service (and avoid priority inversion).
321*4882a593Smuzhiyun 	 * However, the preemption granularity of the GPU can be quite coarse
322*4882a593Smuzhiyun 	 * and so we need a compromise.
323*4882a593Smuzhiyun 	 */
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	err = kstrtoull(buf, 0, &timeout);
326*4882a593Smuzhiyun 	if (err)
327*4882a593Smuzhiyun 		return err;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
330*4882a593Smuzhiyun 		return -EINVAL;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if (READ_ONCE(engine->execlists.pending[0]))
335*4882a593Smuzhiyun 		set_timer_ms(&engine->execlists.preempt, timeout);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	return count;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun static ssize_t
preempt_timeout_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)341*4882a593Smuzhiyun preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
342*4882a593Smuzhiyun 		     char *buf)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun static struct kobj_attribute preempt_timeout_attr =
350*4882a593Smuzhiyun __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun static ssize_t
preempt_timeout_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)353*4882a593Smuzhiyun preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
354*4882a593Smuzhiyun 			char *buf)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun static struct kobj_attribute preempt_timeout_def =
362*4882a593Smuzhiyun __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun static ssize_t
heartbeat_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)365*4882a593Smuzhiyun heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
366*4882a593Smuzhiyun 		const char *buf, size_t count)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
369*4882a593Smuzhiyun 	unsigned long long delay;
370*4882a593Smuzhiyun 	int err;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	/*
373*4882a593Smuzhiyun 	 * We monitor the health of the system via periodic heartbeat pulses.
374*4882a593Smuzhiyun 	 * The pulses also provide the opportunity to perform garbage
375*4882a593Smuzhiyun 	 * collection.  However, we interpret an incomplete pulse (a missed
376*4882a593Smuzhiyun 	 * heartbeat) as an indication that the system is no longer responsive,
377*4882a593Smuzhiyun 	 * i.e. hung, and perform an engine or full GPU reset. Given that the
378*4882a593Smuzhiyun 	 * preemption granularity can be very coarse on a system, the optimal
379*4882a593Smuzhiyun 	 * value for any workload is unknowable!
380*4882a593Smuzhiyun 	 */
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	err = kstrtoull(buf, 0, &delay);
383*4882a593Smuzhiyun 	if (err)
384*4882a593Smuzhiyun 		return err;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
387*4882a593Smuzhiyun 		return -EINVAL;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	err = intel_engine_set_heartbeat(engine, delay);
390*4882a593Smuzhiyun 	if (err)
391*4882a593Smuzhiyun 		return err;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	return count;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun static ssize_t
heartbeat_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)397*4882a593Smuzhiyun heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun static struct kobj_attribute heartbeat_interval_attr =
405*4882a593Smuzhiyun __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun static ssize_t
heartbeat_default(struct kobject * kobj,struct kobj_attribute * attr,char * buf)408*4882a593Smuzhiyun heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	struct intel_engine_cs *engine = kobj_to_engine(kobj);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun static struct kobj_attribute heartbeat_interval_def =
416*4882a593Smuzhiyun __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
417*4882a593Smuzhiyun 
kobj_engine_release(struct kobject * kobj)418*4882a593Smuzhiyun static void kobj_engine_release(struct kobject *kobj)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	kfree(kobj);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun static struct kobj_type kobj_engine_type = {
424*4882a593Smuzhiyun 	.release = kobj_engine_release,
425*4882a593Smuzhiyun 	.sysfs_ops = &kobj_sysfs_ops
426*4882a593Smuzhiyun };
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun static struct kobject *
kobj_engine(struct kobject * dir,struct intel_engine_cs * engine)429*4882a593Smuzhiyun kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	struct kobj_engine *ke;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
434*4882a593Smuzhiyun 	if (!ke)
435*4882a593Smuzhiyun 		return NULL;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	kobject_init(&ke->base, &kobj_engine_type);
438*4882a593Smuzhiyun 	ke->engine = engine;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
441*4882a593Smuzhiyun 		kobject_put(&ke->base);
442*4882a593Smuzhiyun 		return NULL;
443*4882a593Smuzhiyun 	}
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	/* xfer ownership to sysfs tree */
446*4882a593Smuzhiyun 	return &ke->base;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
add_defaults(struct kobj_engine * parent)449*4882a593Smuzhiyun static void add_defaults(struct kobj_engine *parent)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	static const struct attribute *files[] = {
452*4882a593Smuzhiyun 		&max_spin_def.attr,
453*4882a593Smuzhiyun 		&stop_timeout_def.attr,
454*4882a593Smuzhiyun #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
455*4882a593Smuzhiyun 		&heartbeat_interval_def.attr,
456*4882a593Smuzhiyun #endif
457*4882a593Smuzhiyun 		NULL
458*4882a593Smuzhiyun 	};
459*4882a593Smuzhiyun 	struct kobj_engine *ke;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
462*4882a593Smuzhiyun 	if (!ke)
463*4882a593Smuzhiyun 		return;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	kobject_init(&ke->base, &kobj_engine_type);
466*4882a593Smuzhiyun 	ke->engine = parent->engine;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
469*4882a593Smuzhiyun 		kobject_put(&ke->base);
470*4882a593Smuzhiyun 		return;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (sysfs_create_files(&ke->base, files))
474*4882a593Smuzhiyun 		return;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (intel_engine_has_timeslices(ke->engine) &&
477*4882a593Smuzhiyun 	    sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
478*4882a593Smuzhiyun 		return;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (intel_engine_has_preempt_reset(ke->engine) &&
481*4882a593Smuzhiyun 	    sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
482*4882a593Smuzhiyun 		return;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
intel_engines_add_sysfs(struct drm_i915_private * i915)485*4882a593Smuzhiyun void intel_engines_add_sysfs(struct drm_i915_private *i915)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	static const struct attribute *files[] = {
488*4882a593Smuzhiyun 		&name_attr.attr,
489*4882a593Smuzhiyun 		&class_attr.attr,
490*4882a593Smuzhiyun 		&inst_attr.attr,
491*4882a593Smuzhiyun 		&mmio_attr.attr,
492*4882a593Smuzhiyun 		&caps_attr.attr,
493*4882a593Smuzhiyun 		&all_caps_attr.attr,
494*4882a593Smuzhiyun 		&max_spin_attr.attr,
495*4882a593Smuzhiyun 		&stop_timeout_attr.attr,
496*4882a593Smuzhiyun #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
497*4882a593Smuzhiyun 		&heartbeat_interval_attr.attr,
498*4882a593Smuzhiyun #endif
499*4882a593Smuzhiyun 		NULL
500*4882a593Smuzhiyun 	};
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	struct device *kdev = i915->drm.primary->kdev;
503*4882a593Smuzhiyun 	struct intel_engine_cs *engine;
504*4882a593Smuzhiyun 	struct kobject *dir;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	dir = kobject_create_and_add("engine", &kdev->kobj);
507*4882a593Smuzhiyun 	if (!dir)
508*4882a593Smuzhiyun 		return;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	for_each_uabi_engine(engine, i915) {
511*4882a593Smuzhiyun 		struct kobject *kobj;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		kobj = kobj_engine(dir, engine);
514*4882a593Smuzhiyun 		if (!kobj)
515*4882a593Smuzhiyun 			goto err_engine;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 		if (sysfs_create_files(kobj, files))
518*4882a593Smuzhiyun 			goto err_object;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		if (intel_engine_has_timeslices(engine) &&
521*4882a593Smuzhiyun 		    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
522*4882a593Smuzhiyun 			goto err_engine;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 		if (intel_engine_has_preempt_reset(engine) &&
525*4882a593Smuzhiyun 		    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
526*4882a593Smuzhiyun 			goto err_engine;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		add_defaults(container_of(kobj, struct kobj_engine, base));
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 		if (0) {
531*4882a593Smuzhiyun err_object:
532*4882a593Smuzhiyun 			kobject_put(kobj);
533*4882a593Smuzhiyun err_engine:
534*4882a593Smuzhiyun 			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
535*4882a593Smuzhiyun 				engine->name);
536*4882a593Smuzhiyun 			break;
537*4882a593Smuzhiyun 		}
538*4882a593Smuzhiyun 	}
539*4882a593Smuzhiyun }
540