Lines Matching refs:vgpu

83 	struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;  in sr_oa_regs()
125 struct intel_vgpu *vgpu = workload->vgpu; in populate_shadow_context() local
126 struct intel_gvt *gvt = vgpu->gvt; in populate_shadow_context()
134 struct intel_vgpu_submission *s = &vgpu->submission; in populate_shadow_context()
148 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
151 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ in populate_shadow_context()
168 intel_gvt_hypervisor_read_gpa(vgpu, in populate_shadow_context()
213 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, in populate_shadow_context()
235 intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size); in populate_shadow_context()
249 static void save_ring_hw_state(struct intel_vgpu *vgpu, in save_ring_hw_state() argument
256 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = in save_ring_hw_state()
260 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = in save_ring_hw_state()
264 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = in save_ring_hw_state()
300 if (workload->vgpu != scheduler->engine_owner[ring_id]) { in shadow_context_status_change()
303 workload->vgpu, rq->engine); in shadow_context_status_change()
304 scheduler->engine_owner[ring_id] = workload->vgpu; in shadow_context_status_change()
307 ring_id, workload->vgpu->id); in shadow_context_status_change()
312 save_ring_hw_state(workload->vgpu, rq->engine); in shadow_context_status_change()
316 save_ring_hw_state(workload->vgpu, rq->engine); in shadow_context_status_change()
345 struct intel_vgpu *vgpu = workload->vgpu; in copy_workload_to_ring_buffer() local
352 intel_vgpu_restore_inhibit_context(vgpu, req); in copy_workload_to_ring_buffer()
441 struct intel_vgpu *vgpu = workload->vgpu; in intel_gvt_workload_req_alloc() local
442 struct intel_vgpu_submission *s = &vgpu->submission; in intel_gvt_workload_req_alloc()
468 struct intel_vgpu *vgpu = workload->vgpu; in intel_gvt_scan_and_shadow_workload() local
469 struct intel_vgpu_submission *s = &vgpu->submission; in intel_gvt_scan_and_shadow_workload()
472 lockdep_assert_held(&vgpu->vgpu_lock); in intel_gvt_scan_and_shadow_workload()
504 struct intel_gvt *gvt = workload->vgpu->gvt; in prepare_shadow_batch_buffer()
606 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) = in update_vreg_in_ctx()
638 struct intel_vgpu *vgpu = workload->vgpu; in intel_vgpu_shadow_mm_pin() local
690 struct intel_vgpu *vgpu = workload->vgpu; in prepare_workload() local
691 struct intel_vgpu_submission *s = &vgpu->submission; in prepare_workload()
704 ret = intel_vgpu_sync_oos_pages(workload->vgpu); in prepare_workload()
710 ret = intel_vgpu_flush_post_shadow(workload->vgpu); in prepare_workload()
752 struct intel_vgpu *vgpu = workload->vgpu; in dispatch_workload() local
759 mutex_lock(&vgpu->vgpu_lock); in dispatch_workload()
794 mutex_unlock(&vgpu->vgpu_lock); in dispatch_workload()
850 atomic_inc(&workload->vgpu->submission.running_workload_num); in pick_next_workload()
856 static void update_guest_pdps(struct intel_vgpu *vgpu, in update_guest_pdps() argument
865 intel_gvt_hypervisor_write_gpa(vgpu, in update_guest_pdps()
890 struct intel_vgpu *vgpu = workload->vgpu; in update_guest_context() local
922 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail; in update_guest_context()
923 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; in update_guest_context()
939 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, in update_guest_context()
961 intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size); in update_guest_context()
967 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + in update_guest_context()
977 update_guest_pdps(vgpu, workload->ring_context_gpa, in update_guest_context()
982 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ in update_guest_context()
990 intel_gvt_hypervisor_write_gpa(vgpu, in update_guest_context()
998 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, in intel_vgpu_clean_workloads() argument
1001 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_clean_workloads()
1002 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in intel_vgpu_clean_workloads()
1023 struct intel_vgpu *vgpu = workload->vgpu; in complete_current_workload() local
1024 struct intel_vgpu_submission *s = &vgpu->submission; in complete_current_workload()
1028 mutex_lock(&vgpu->vgpu_lock); in complete_current_workload()
1052 !(vgpu->resetting_eng & BIT(ring_id))) { in complete_current_workload()
1057 intel_vgpu_trigger_virtual_event(vgpu, event); in complete_current_workload()
1070 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { in complete_current_workload()
1084 intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); in complete_current_workload()
1099 mutex_unlock(&vgpu->vgpu_lock); in complete_current_workload()
1109 struct intel_vgpu *vgpu = NULL; in workload_thread() local
1133 workload->vgpu->id); in workload_thread()
1154 vgpu = workload->vgpu; in workload_thread()
1175 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); in workload_thread()
1180 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) in intel_gvt_wait_vgpu_idle() argument
1182 struct intel_vgpu_submission *s = &vgpu->submission; in intel_gvt_wait_vgpu_idle()
1183 struct intel_gvt *gvt = vgpu->gvt; in intel_gvt_wait_vgpu_idle()
1270 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) in intel_vgpu_clean_submission() argument
1272 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_clean_submission()
1276 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); in intel_vgpu_clean_submission()
1279 for_each_engine(engine, vgpu->gvt->gt, id) in intel_vgpu_clean_submission()
1294 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, in intel_vgpu_reset_submission() argument
1297 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_reset_submission()
1302 intel_vgpu_clean_workloads(vgpu, engine_mask); in intel_vgpu_reset_submission()
1303 s->ops->reset(vgpu, engine_mask); in intel_vgpu_reset_submission()
1334 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) in intel_vgpu_setup_submission() argument
1336 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_setup_submission()
1337 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_setup_submission()
1349 for_each_engine(engine, vgpu->gvt->gt, i) { in intel_vgpu_setup_submission()
1399 for_each_engine(engine, vgpu->gvt->gt, i) { in intel_vgpu_setup_submission()
1421 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, in intel_vgpu_select_submission_ops() argument
1425 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_vgpu_select_submission_ops()
1426 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_select_submission_ops()
1441 s->ops->clean(vgpu, engine_mask); in intel_vgpu_select_submission_ops()
1447 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id); in intel_vgpu_select_submission_ops()
1451 ret = ops[interface]->init(vgpu, engine_mask); in intel_vgpu_select_submission_ops()
1460 vgpu->id, s->ops->name); in intel_vgpu_select_submission_ops()
1474 struct intel_vgpu_submission *s = &workload->vgpu->submission; in intel_vgpu_destroy_workload()
1497 alloc_workload(struct intel_vgpu *vgpu) in alloc_workload() argument
1499 struct intel_vgpu_submission *s = &vgpu->submission; in alloc_workload()
1514 workload->vgpu = vgpu; in alloc_workload()
1522 static void read_guest_pdps(struct intel_vgpu *vgpu, in read_guest_pdps() argument
1531 intel_gvt_hypervisor_read_gpa(vgpu, in read_guest_pdps()
1539 struct intel_vgpu *vgpu = workload->vgpu; in prepare_mm() local
1555 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps); in prepare_mm()
1557 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps); in prepare_mm()
1582 intel_vgpu_create_workload(struct intel_vgpu *vgpu, in intel_vgpu_create_workload() argument
1586 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_create_workload()
1587 struct list_head *q = workload_q_head(vgpu, engine); in intel_vgpu_create_workload()
1595 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, in intel_vgpu_create_workload()
1602 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1605 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1632 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1634 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1636 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1639 if (!intel_gvt_ggtt_validate_range(vgpu, start, in intel_vgpu_create_workload()
1645 workload = alloc_workload(vgpu); in intel_vgpu_create_workload()
1659 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1661 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + in intel_vgpu_create_workload()
1671 if (!intel_gvt_ggtt_validate_range(vgpu, in intel_vgpu_create_workload()
1685 if (!intel_gvt_ggtt_validate_range(vgpu, in intel_vgpu_create_workload()
1717 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); in intel_vgpu_create_workload()
1738 workload_q_head(workload->vgpu, workload->engine)); in intel_vgpu_queue_workload()
1739 intel_gvt_kick_schedule(workload->vgpu->gvt); in intel_vgpu_queue_workload()
1740 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]); in intel_vgpu_queue_workload()