xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/gvt/sched_policy.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun  * Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20*4882a593Smuzhiyun  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21*4882a593Smuzhiyun  * SOFTWARE.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Authors:
24*4882a593Smuzhiyun  *    Anhua Xu
25*4882a593Smuzhiyun  *    Kevin Tian <kevin.tian@intel.com>
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Contributors:
28*4882a593Smuzhiyun  *    Min He <min.he@intel.com>
29*4882a593Smuzhiyun  *    Bing Niu <bing.niu@intel.com>
30*4882a593Smuzhiyun  *    Zhi Wang <zhi.a.wang@intel.com>
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include "i915_drv.h"
35*4882a593Smuzhiyun #include "gvt.h"
36*4882a593Smuzhiyun 
vgpu_has_pending_workload(struct intel_vgpu * vgpu)37*4882a593Smuzhiyun static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	enum intel_engine_id i;
40*4882a593Smuzhiyun 	struct intel_engine_cs *engine;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	for_each_engine(engine, vgpu->gvt->gt, i) {
43*4882a593Smuzhiyun 		if (!list_empty(workload_q_head(vgpu, engine)))
44*4882a593Smuzhiyun 			return true;
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	return false;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* We give 2 seconds higher prio for vGPU during start */
51*4882a593Smuzhiyun #define GVT_SCHED_VGPU_PRI_TIME  2
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun struct vgpu_sched_data {
54*4882a593Smuzhiyun 	struct list_head lru_list;
55*4882a593Smuzhiyun 	struct intel_vgpu *vgpu;
56*4882a593Smuzhiyun 	bool active;
57*4882a593Smuzhiyun 	bool pri_sched;
58*4882a593Smuzhiyun 	ktime_t pri_time;
59*4882a593Smuzhiyun 	ktime_t sched_in_time;
60*4882a593Smuzhiyun 	ktime_t sched_time;
61*4882a593Smuzhiyun 	ktime_t left_ts;
62*4882a593Smuzhiyun 	ktime_t allocated_ts;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	struct vgpu_sched_ctl sched_ctl;
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun struct gvt_sched_data {
68*4882a593Smuzhiyun 	struct intel_gvt *gvt;
69*4882a593Smuzhiyun 	struct hrtimer timer;
70*4882a593Smuzhiyun 	unsigned long period;
71*4882a593Smuzhiyun 	struct list_head lru_runq_head;
72*4882a593Smuzhiyun 	ktime_t expire_time;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
vgpu_update_timeslice(struct intel_vgpu * vgpu,ktime_t cur_time)75*4882a593Smuzhiyun static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	ktime_t delta_ts;
78*4882a593Smuzhiyun 	struct vgpu_sched_data *vgpu_data;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
81*4882a593Smuzhiyun 		return;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	vgpu_data = vgpu->sched_data;
84*4882a593Smuzhiyun 	delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
85*4882a593Smuzhiyun 	vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
86*4882a593Smuzhiyun 	vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
87*4882a593Smuzhiyun 	vgpu_data->sched_in_time = cur_time;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define GVT_TS_BALANCE_PERIOD_MS 100
91*4882a593Smuzhiyun #define GVT_TS_BALANCE_STAGE_NUM 10
92*4882a593Smuzhiyun 
gvt_balance_timeslice(struct gvt_sched_data * sched_data)93*4882a593Smuzhiyun static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct vgpu_sched_data *vgpu_data;
96*4882a593Smuzhiyun 	struct list_head *pos;
97*4882a593Smuzhiyun 	static u64 stage_check;
98*4882a593Smuzhiyun 	int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* The timeslice accumulation reset at stage 0, which is
101*4882a593Smuzhiyun 	 * allocated again without adding previous debt.
102*4882a593Smuzhiyun 	 */
103*4882a593Smuzhiyun 	if (stage == 0) {
104*4882a593Smuzhiyun 		int total_weight = 0;
105*4882a593Smuzhiyun 		ktime_t fair_timeslice;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 		list_for_each(pos, &sched_data->lru_runq_head) {
108*4882a593Smuzhiyun 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
109*4882a593Smuzhiyun 			total_weight += vgpu_data->sched_ctl.weight;
110*4882a593Smuzhiyun 		}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		list_for_each(pos, &sched_data->lru_runq_head) {
113*4882a593Smuzhiyun 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
114*4882a593Smuzhiyun 			fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
115*4882a593Smuzhiyun 						     total_weight) * vgpu_data->sched_ctl.weight;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 			vgpu_data->allocated_ts = fair_timeslice;
118*4882a593Smuzhiyun 			vgpu_data->left_ts = vgpu_data->allocated_ts;
119*4882a593Smuzhiyun 		}
120*4882a593Smuzhiyun 	} else {
121*4882a593Smuzhiyun 		list_for_each(pos, &sched_data->lru_runq_head) {
122*4882a593Smuzhiyun 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 			/* timeslice for next 100ms should add the left/debt
125*4882a593Smuzhiyun 			 * slice of previous stages.
126*4882a593Smuzhiyun 			 */
127*4882a593Smuzhiyun 			vgpu_data->left_ts += vgpu_data->allocated_ts;
128*4882a593Smuzhiyun 		}
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
try_to_schedule_next_vgpu(struct intel_gvt * gvt)132*4882a593Smuzhiyun static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
135*4882a593Smuzhiyun 	enum intel_engine_id i;
136*4882a593Smuzhiyun 	struct intel_engine_cs *engine;
137*4882a593Smuzhiyun 	struct vgpu_sched_data *vgpu_data;
138*4882a593Smuzhiyun 	ktime_t cur_time;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* no need to schedule if next_vgpu is the same with current_vgpu,
141*4882a593Smuzhiyun 	 * let scheduler chose next_vgpu again by setting it to NULL.
142*4882a593Smuzhiyun 	 */
143*4882a593Smuzhiyun 	if (scheduler->next_vgpu == scheduler->current_vgpu) {
144*4882a593Smuzhiyun 		scheduler->next_vgpu = NULL;
145*4882a593Smuzhiyun 		return;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	/*
149*4882a593Smuzhiyun 	 * after the flag is set, workload dispatch thread will
150*4882a593Smuzhiyun 	 * stop dispatching workload for current vgpu
151*4882a593Smuzhiyun 	 */
152*4882a593Smuzhiyun 	scheduler->need_reschedule = true;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* still have uncompleted workload? */
155*4882a593Smuzhiyun 	for_each_engine(engine, gvt->gt, i) {
156*4882a593Smuzhiyun 		if (scheduler->current_workload[engine->id])
157*4882a593Smuzhiyun 			return;
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	cur_time = ktime_get();
161*4882a593Smuzhiyun 	vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
162*4882a593Smuzhiyun 	vgpu_data = scheduler->next_vgpu->sched_data;
163*4882a593Smuzhiyun 	vgpu_data->sched_in_time = cur_time;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* switch current vgpu */
166*4882a593Smuzhiyun 	scheduler->current_vgpu = scheduler->next_vgpu;
167*4882a593Smuzhiyun 	scheduler->next_vgpu = NULL;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	scheduler->need_reschedule = false;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	/* wake up workload dispatch thread */
172*4882a593Smuzhiyun 	for_each_engine(engine, gvt->gt, i)
173*4882a593Smuzhiyun 		wake_up(&scheduler->waitq[engine->id]);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
find_busy_vgpu(struct gvt_sched_data * sched_data)176*4882a593Smuzhiyun static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	struct vgpu_sched_data *vgpu_data;
179*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = NULL;
180*4882a593Smuzhiyun 	struct list_head *head = &sched_data->lru_runq_head;
181*4882a593Smuzhiyun 	struct list_head *pos;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* search a vgpu with pending workload */
184*4882a593Smuzhiyun 	list_for_each(pos, head) {
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
187*4882a593Smuzhiyun 		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
188*4882a593Smuzhiyun 			continue;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		if (vgpu_data->pri_sched) {
191*4882a593Smuzhiyun 			if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
192*4882a593Smuzhiyun 				vgpu = vgpu_data->vgpu;
193*4882a593Smuzhiyun 				break;
194*4882a593Smuzhiyun 			} else
195*4882a593Smuzhiyun 				vgpu_data->pri_sched = false;
196*4882a593Smuzhiyun 		}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		/* Return the vGPU only if it has time slice left */
199*4882a593Smuzhiyun 		if (vgpu_data->left_ts > 0) {
200*4882a593Smuzhiyun 			vgpu = vgpu_data->vgpu;
201*4882a593Smuzhiyun 			break;
202*4882a593Smuzhiyun 		}
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return vgpu;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /* in nanosecond */
209*4882a593Smuzhiyun #define GVT_DEFAULT_TIME_SLICE 1000000
210*4882a593Smuzhiyun 
tbs_sched_func(struct gvt_sched_data * sched_data)211*4882a593Smuzhiyun static void tbs_sched_func(struct gvt_sched_data *sched_data)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct intel_gvt *gvt = sched_data->gvt;
214*4882a593Smuzhiyun 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
215*4882a593Smuzhiyun 	struct vgpu_sched_data *vgpu_data;
216*4882a593Smuzhiyun 	struct intel_vgpu *vgpu = NULL;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* no active vgpu or has already had a target */
219*4882a593Smuzhiyun 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
220*4882a593Smuzhiyun 		goto out;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	vgpu = find_busy_vgpu(sched_data);
223*4882a593Smuzhiyun 	if (vgpu) {
224*4882a593Smuzhiyun 		scheduler->next_vgpu = vgpu;
225*4882a593Smuzhiyun 		vgpu_data = vgpu->sched_data;
226*4882a593Smuzhiyun 		if (!vgpu_data->pri_sched) {
227*4882a593Smuzhiyun 			/* Move the last used vGPU to the tail of lru_list */
228*4882a593Smuzhiyun 			list_del_init(&vgpu_data->lru_list);
229*4882a593Smuzhiyun 			list_add_tail(&vgpu_data->lru_list,
230*4882a593Smuzhiyun 				      &sched_data->lru_runq_head);
231*4882a593Smuzhiyun 		}
232*4882a593Smuzhiyun 	} else {
233*4882a593Smuzhiyun 		scheduler->next_vgpu = gvt->idle_vgpu;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun out:
236*4882a593Smuzhiyun 	if (scheduler->next_vgpu)
237*4882a593Smuzhiyun 		try_to_schedule_next_vgpu(gvt);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
intel_gvt_schedule(struct intel_gvt * gvt)240*4882a593Smuzhiyun void intel_gvt_schedule(struct intel_gvt *gvt)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
243*4882a593Smuzhiyun 	ktime_t cur_time;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	mutex_lock(&gvt->sched_lock);
246*4882a593Smuzhiyun 	cur_time = ktime_get();
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
249*4882a593Smuzhiyun 				(void *)&gvt->service_request)) {
250*4882a593Smuzhiyun 		if (cur_time >= sched_data->expire_time) {
251*4882a593Smuzhiyun 			gvt_balance_timeslice(sched_data);
252*4882a593Smuzhiyun 			sched_data->expire_time = ktime_add_ms(
253*4882a593Smuzhiyun 				cur_time, GVT_TS_BALANCE_PERIOD_MS);
254*4882a593Smuzhiyun 		}
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
259*4882a593Smuzhiyun 	tbs_sched_func(sched_data);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	mutex_unlock(&gvt->sched_lock);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
tbs_timer_fn(struct hrtimer * timer_data)264*4882a593Smuzhiyun static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct gvt_sched_data *data;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	data = container_of(timer_data, struct gvt_sched_data, timer);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	hrtimer_add_expires_ns(&data->timer, data->period);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	return HRTIMER_RESTART;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
tbs_sched_init(struct intel_gvt * gvt)277*4882a593Smuzhiyun static int tbs_sched_init(struct intel_gvt *gvt)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	struct intel_gvt_workload_scheduler *scheduler =
280*4882a593Smuzhiyun 		&gvt->scheduler;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	struct gvt_sched_data *data;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL);
285*4882a593Smuzhiyun 	if (!data)
286*4882a593Smuzhiyun 		return -ENOMEM;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	INIT_LIST_HEAD(&data->lru_runq_head);
289*4882a593Smuzhiyun 	hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
290*4882a593Smuzhiyun 	data->timer.function = tbs_timer_fn;
291*4882a593Smuzhiyun 	data->period = GVT_DEFAULT_TIME_SLICE;
292*4882a593Smuzhiyun 	data->gvt = gvt;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	scheduler->sched_data = data;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
tbs_sched_clean(struct intel_gvt * gvt)299*4882a593Smuzhiyun static void tbs_sched_clean(struct intel_gvt *gvt)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct intel_gvt_workload_scheduler *scheduler =
302*4882a593Smuzhiyun 		&gvt->scheduler;
303*4882a593Smuzhiyun 	struct gvt_sched_data *data = scheduler->sched_data;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	hrtimer_cancel(&data->timer);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	kfree(data);
308*4882a593Smuzhiyun 	scheduler->sched_data = NULL;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
tbs_sched_init_vgpu(struct intel_vgpu * vgpu)311*4882a593Smuzhiyun static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct vgpu_sched_data *data;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	data = kzalloc(sizeof(*data), GFP_KERNEL);
316*4882a593Smuzhiyun 	if (!data)
317*4882a593Smuzhiyun 		return -ENOMEM;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	data->sched_ctl.weight = vgpu->sched_ctl.weight;
320*4882a593Smuzhiyun 	data->vgpu = vgpu;
321*4882a593Smuzhiyun 	INIT_LIST_HEAD(&data->lru_list);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	vgpu->sched_data = data;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
tbs_sched_clean_vgpu(struct intel_vgpu * vgpu)328*4882a593Smuzhiyun static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	struct intel_gvt *gvt = vgpu->gvt;
331*4882a593Smuzhiyun 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	kfree(vgpu->sched_data);
334*4882a593Smuzhiyun 	vgpu->sched_data = NULL;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/* this vgpu id has been removed */
337*4882a593Smuzhiyun 	if (idr_is_empty(&gvt->vgpu_idr))
338*4882a593Smuzhiyun 		hrtimer_cancel(&sched_data->timer);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
tbs_sched_start_schedule(struct intel_vgpu * vgpu)341*4882a593Smuzhiyun static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
344*4882a593Smuzhiyun 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
345*4882a593Smuzhiyun 	ktime_t now;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (!list_empty(&vgpu_data->lru_list))
348*4882a593Smuzhiyun 		return;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	now = ktime_get();
351*4882a593Smuzhiyun 	vgpu_data->pri_time = ktime_add(now,
352*4882a593Smuzhiyun 					ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
353*4882a593Smuzhiyun 	vgpu_data->pri_sched = true;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (!hrtimer_active(&sched_data->timer))
358*4882a593Smuzhiyun 		hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
359*4882a593Smuzhiyun 			sched_data->period), HRTIMER_MODE_ABS);
360*4882a593Smuzhiyun 	vgpu_data->active = true;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
tbs_sched_stop_schedule(struct intel_vgpu * vgpu)363*4882a593Smuzhiyun static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	list_del_init(&vgpu_data->lru_list);
368*4882a593Smuzhiyun 	vgpu_data->active = false;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
372*4882a593Smuzhiyun 	.init = tbs_sched_init,
373*4882a593Smuzhiyun 	.clean = tbs_sched_clean,
374*4882a593Smuzhiyun 	.init_vgpu = tbs_sched_init_vgpu,
375*4882a593Smuzhiyun 	.clean_vgpu = tbs_sched_clean_vgpu,
376*4882a593Smuzhiyun 	.start_schedule = tbs_sched_start_schedule,
377*4882a593Smuzhiyun 	.stop_schedule = tbs_sched_stop_schedule,
378*4882a593Smuzhiyun };
379*4882a593Smuzhiyun 
intel_gvt_init_sched_policy(struct intel_gvt * gvt)380*4882a593Smuzhiyun int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	int ret;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	mutex_lock(&gvt->sched_lock);
385*4882a593Smuzhiyun 	gvt->scheduler.sched_ops = &tbs_schedule_ops;
386*4882a593Smuzhiyun 	ret = gvt->scheduler.sched_ops->init(gvt);
387*4882a593Smuzhiyun 	mutex_unlock(&gvt->sched_lock);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	return ret;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
intel_gvt_clean_sched_policy(struct intel_gvt * gvt)392*4882a593Smuzhiyun void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	mutex_lock(&gvt->sched_lock);
395*4882a593Smuzhiyun 	gvt->scheduler.sched_ops->clean(gvt);
396*4882a593Smuzhiyun 	mutex_unlock(&gvt->sched_lock);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun /* for per-vgpu scheduler policy, there are 2 per-vgpu data:
400*4882a593Smuzhiyun  * sched_data, and sched_ctl. We see these 2 data as part of
401*4882a593Smuzhiyun  * the global scheduler which are proteced by gvt->sched_lock.
402*4882a593Smuzhiyun  * Caller should make their decision if the vgpu_lock should
403*4882a593Smuzhiyun  * be hold outside.
404*4882a593Smuzhiyun  */
405*4882a593Smuzhiyun 
intel_vgpu_init_sched_policy(struct intel_vgpu * vgpu)406*4882a593Smuzhiyun int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	int ret;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	mutex_lock(&vgpu->gvt->sched_lock);
411*4882a593Smuzhiyun 	ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
412*4882a593Smuzhiyun 	mutex_unlock(&vgpu->gvt->sched_lock);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	return ret;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
intel_vgpu_clean_sched_policy(struct intel_vgpu * vgpu)417*4882a593Smuzhiyun void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	mutex_lock(&vgpu->gvt->sched_lock);
420*4882a593Smuzhiyun 	vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
421*4882a593Smuzhiyun 	mutex_unlock(&vgpu->gvt->sched_lock);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
intel_vgpu_start_schedule(struct intel_vgpu * vgpu)424*4882a593Smuzhiyun void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	mutex_lock(&vgpu->gvt->sched_lock);
429*4882a593Smuzhiyun 	if (!vgpu_data->active) {
430*4882a593Smuzhiyun 		gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
431*4882a593Smuzhiyun 		vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 	mutex_unlock(&vgpu->gvt->sched_lock);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
intel_gvt_kick_schedule(struct intel_gvt * gvt)436*4882a593Smuzhiyun void intel_gvt_kick_schedule(struct intel_gvt *gvt)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	mutex_lock(&gvt->sched_lock);
439*4882a593Smuzhiyun 	intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
440*4882a593Smuzhiyun 	mutex_unlock(&gvt->sched_lock);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
intel_vgpu_stop_schedule(struct intel_vgpu * vgpu)443*4882a593Smuzhiyun void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	struct intel_gvt_workload_scheduler *scheduler =
446*4882a593Smuzhiyun 		&vgpu->gvt->scheduler;
447*4882a593Smuzhiyun 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
448*4882a593Smuzhiyun 	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
449*4882a593Smuzhiyun 	struct intel_engine_cs *engine;
450*4882a593Smuzhiyun 	enum intel_engine_id id;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (!vgpu_data->active)
453*4882a593Smuzhiyun 		return;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	mutex_lock(&vgpu->gvt->sched_lock);
458*4882a593Smuzhiyun 	scheduler->sched_ops->stop_schedule(vgpu);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	if (scheduler->next_vgpu == vgpu)
461*4882a593Smuzhiyun 		scheduler->next_vgpu = NULL;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (scheduler->current_vgpu == vgpu) {
464*4882a593Smuzhiyun 		/* stop workload dispatching */
465*4882a593Smuzhiyun 		scheduler->need_reschedule = true;
466*4882a593Smuzhiyun 		scheduler->current_vgpu = NULL;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	intel_runtime_pm_get(&dev_priv->runtime_pm);
470*4882a593Smuzhiyun 	spin_lock_bh(&scheduler->mmio_context_lock);
471*4882a593Smuzhiyun 	for_each_engine(engine, vgpu->gvt->gt, id) {
472*4882a593Smuzhiyun 		if (scheduler->engine_owner[engine->id] == vgpu) {
473*4882a593Smuzhiyun 			intel_gvt_switch_mmio(vgpu, NULL, engine);
474*4882a593Smuzhiyun 			scheduler->engine_owner[engine->id] = NULL;
475*4882a593Smuzhiyun 		}
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 	spin_unlock_bh(&scheduler->mmio_context_lock);
478*4882a593Smuzhiyun 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
479*4882a593Smuzhiyun 	mutex_unlock(&vgpu->gvt->sched_lock);
480*4882a593Smuzhiyun }
481