1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"), 6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation 7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the 9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions: 10*4882a593Smuzhiyun * 11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next 12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the 13*4882a593Smuzhiyun * Software. 14*4882a593Smuzhiyun * 15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20*4882a593Smuzhiyun * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21*4882a593Smuzhiyun * SOFTWARE. 22*4882a593Smuzhiyun * 23*4882a593Smuzhiyun * Authors: 24*4882a593Smuzhiyun * Zhi Wang <zhi.a.wang@intel.com> 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun * Contributors: 27*4882a593Smuzhiyun * Ping Gao <ping.a.gao@intel.com> 28*4882a593Smuzhiyun * Tina Zhang <tina.zhang@intel.com> 29*4882a593Smuzhiyun * Chanbin Du <changbin.du@intel.com> 30*4882a593Smuzhiyun * Min He <min.he@intel.com> 31*4882a593Smuzhiyun * Bing Niu <bing.niu@intel.com> 32*4882a593Smuzhiyun * Zhenyu Wang <zhenyuw@linux.intel.com> 33*4882a593Smuzhiyun * 34*4882a593Smuzhiyun */ 35*4882a593Smuzhiyun 36*4882a593Smuzhiyun #ifndef _GVT_SCHEDULER_H_ 37*4882a593Smuzhiyun #define _GVT_SCHEDULER_H_ 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun struct intel_gvt_workload_scheduler { 40*4882a593Smuzhiyun struct intel_vgpu *current_vgpu; 41*4882a593Smuzhiyun struct intel_vgpu *next_vgpu; 42*4882a593Smuzhiyun struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES]; 43*4882a593Smuzhiyun bool need_reschedule; 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun spinlock_t mmio_context_lock; 46*4882a593Smuzhiyun /* can be null when owner is host */ 47*4882a593Smuzhiyun struct intel_vgpu *engine_owner[I915_NUM_ENGINES]; 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun wait_queue_head_t workload_complete_wq; 50*4882a593Smuzhiyun struct task_struct *thread[I915_NUM_ENGINES]; 51*4882a593Smuzhiyun wait_queue_head_t waitq[I915_NUM_ENGINES]; 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun void *sched_data; 54*4882a593Smuzhiyun struct intel_gvt_sched_policy_ops *sched_ops; 55*4882a593Smuzhiyun }; 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun #define INDIRECT_CTX_ADDR_MASK 0xffffffc0 58*4882a593Smuzhiyun #define INDIRECT_CTX_SIZE_MASK 0x3f 59*4882a593Smuzhiyun struct shadow_indirect_ctx { 60*4882a593Smuzhiyun struct drm_i915_gem_object *obj; 61*4882a593Smuzhiyun unsigned long guest_gma; 62*4882a593Smuzhiyun unsigned long shadow_gma; 63*4882a593Smuzhiyun void *shadow_va; 64*4882a593Smuzhiyun u32 size; 65*4882a593Smuzhiyun }; 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun #define PER_CTX_ADDR_MASK 0xfffff000 68*4882a593Smuzhiyun struct shadow_per_ctx { 69*4882a593Smuzhiyun unsigned long guest_gma; 70*4882a593Smuzhiyun unsigned long shadow_gma; 71*4882a593Smuzhiyun unsigned valid; 72*4882a593Smuzhiyun }; 73*4882a593Smuzhiyun 74*4882a593Smuzhiyun struct intel_shadow_wa_ctx { 75*4882a593Smuzhiyun struct shadow_indirect_ctx indirect_ctx; 76*4882a593Smuzhiyun struct shadow_per_ctx per_ctx; 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun }; 79*4882a593Smuzhiyun 80*4882a593Smuzhiyun struct intel_vgpu_workload { 81*4882a593Smuzhiyun struct intel_vgpu *vgpu; 82*4882a593Smuzhiyun const struct intel_engine_cs *engine; 83*4882a593Smuzhiyun struct i915_request *req; 84*4882a593Smuzhiyun /* if this workload has been dispatched to i915? */ 85*4882a593Smuzhiyun bool dispatched; 86*4882a593Smuzhiyun bool shadow; /* if workload has done shadow of guest request */ 87*4882a593Smuzhiyun int status; 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun struct intel_vgpu_mm *shadow_mm; 90*4882a593Smuzhiyun struct list_head lri_shadow_mm; /* For PPGTT load cmd */ 91*4882a593Smuzhiyun 92*4882a593Smuzhiyun /* different submission model may need different handler */ 93*4882a593Smuzhiyun int (*prepare)(struct intel_vgpu_workload *); 94*4882a593Smuzhiyun int (*complete)(struct intel_vgpu_workload *); 95*4882a593Smuzhiyun struct list_head list; 96*4882a593Smuzhiyun 97*4882a593Smuzhiyun DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX); 98*4882a593Smuzhiyun void *shadow_ring_buffer_va; 99*4882a593Smuzhiyun 100*4882a593Smuzhiyun /* execlist context information */ 101*4882a593Smuzhiyun struct execlist_ctx_descriptor_format ctx_desc; 102*4882a593Smuzhiyun struct execlist_ring_context *ring_context; 103*4882a593Smuzhiyun unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len; 104*4882a593Smuzhiyun unsigned long guest_rb_head; 105*4882a593Smuzhiyun bool restore_inhibit; 106*4882a593Smuzhiyun struct intel_vgpu_elsp_dwords elsp_dwords; 107*4882a593Smuzhiyun bool emulate_schedule_in; 108*4882a593Smuzhiyun atomic_t shadow_ctx_active; 109*4882a593Smuzhiyun wait_queue_head_t shadow_ctx_status_wq; 110*4882a593Smuzhiyun u64 ring_context_gpa; 111*4882a593Smuzhiyun 112*4882a593Smuzhiyun /* shadow batch buffer */ 113*4882a593Smuzhiyun struct list_head shadow_bb; 114*4882a593Smuzhiyun struct intel_shadow_wa_ctx wa_ctx; 115*4882a593Smuzhiyun 116*4882a593Smuzhiyun /* oa registers */ 117*4882a593Smuzhiyun u32 oactxctrl; 118*4882a593Smuzhiyun u32 flex_mmio[7]; 119*4882a593Smuzhiyun }; 120*4882a593Smuzhiyun 121*4882a593Smuzhiyun struct intel_vgpu_shadow_bb { 122*4882a593Smuzhiyun struct list_head list; 123*4882a593Smuzhiyun struct drm_i915_gem_object *obj; 124*4882a593Smuzhiyun struct i915_vma *vma; 125*4882a593Smuzhiyun void *va; 126*4882a593Smuzhiyun u32 *bb_start_cmd_va; 127*4882a593Smuzhiyun unsigned long bb_offset; 128*4882a593Smuzhiyun bool ppgtt; 129*4882a593Smuzhiyun }; 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun #define workload_q_head(vgpu, e) \ 132*4882a593Smuzhiyun (&(vgpu)->submission.workload_q_head[(e)->id]) 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt); 137*4882a593Smuzhiyun 138*4882a593Smuzhiyun void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt); 139*4882a593Smuzhiyun 140*4882a593Smuzhiyun void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); 141*4882a593Smuzhiyun 142*4882a593Smuzhiyun int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); 143*4882a593Smuzhiyun 144*4882a593Smuzhiyun void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, 145*4882a593Smuzhiyun intel_engine_mask_t engine_mask); 146*4882a593Smuzhiyun 147*4882a593Smuzhiyun void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); 148*4882a593Smuzhiyun 149*4882a593Smuzhiyun int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, 150*4882a593Smuzhiyun intel_engine_mask_t engine_mask, 151*4882a593Smuzhiyun unsigned int interface); 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun extern const struct intel_vgpu_submission_ops 154*4882a593Smuzhiyun intel_vgpu_execlist_submission_ops; 155*4882a593Smuzhiyun 156*4882a593Smuzhiyun struct intel_vgpu_workload * 157*4882a593Smuzhiyun intel_vgpu_create_workload(struct intel_vgpu *vgpu, 158*4882a593Smuzhiyun const struct intel_engine_cs *engine, 159*4882a593Smuzhiyun struct execlist_ctx_descriptor_format *desc); 160*4882a593Smuzhiyun 161*4882a593Smuzhiyun void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); 162*4882a593Smuzhiyun 163*4882a593Smuzhiyun void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, 164*4882a593Smuzhiyun intel_engine_mask_t engine_mask); 165*4882a593Smuzhiyun 166*4882a593Smuzhiyun #endif 167