1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Copyright 2018 Advanced Micro Devices, Inc. 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"), 6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation 7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the 9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions: 10*4882a593Smuzhiyun * 11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in 12*4882a593Smuzhiyun * all copies or substantial portions of the Software. 13*4882a593Smuzhiyun * 14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE. 21*4882a593Smuzhiyun * 22*4882a593Smuzhiyun */ 23*4882a593Smuzhiyun #ifndef __AMDGPU_JOB_H__ 24*4882a593Smuzhiyun #define __AMDGPU_JOB_H__ 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun /* bit set means command submit involves a preamble IB */ 27*4882a593Smuzhiyun #define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) 28*4882a593Smuzhiyun /* bit set means preamble IB is first presented in belonging context */ 29*4882a593Smuzhiyun #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) 30*4882a593Smuzhiyun /* bit set means context switch occured */ 31*4882a593Smuzhiyun #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) 32*4882a593Smuzhiyun /* bit set means IB is preempted */ 33*4882a593Smuzhiyun #define AMDGPU_IB_PREEMPTED (1 << 3) 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun #define to_amdgpu_job(sched_job) \ 36*4882a593Smuzhiyun container_of((sched_job), struct amdgpu_job, base) 37*4882a593Smuzhiyun 38*4882a593Smuzhiyun #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun struct amdgpu_fence; 41*4882a593Smuzhiyun enum amdgpu_ib_pool_type; 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun struct amdgpu_job { 44*4882a593Smuzhiyun struct drm_sched_job base; 45*4882a593Smuzhiyun struct amdgpu_vm *vm; 46*4882a593Smuzhiyun struct amdgpu_sync sync; 47*4882a593Smuzhiyun struct amdgpu_sync sched_sync; 48*4882a593Smuzhiyun struct amdgpu_ib *ibs; 49*4882a593Smuzhiyun struct dma_fence *fence; /* the hw fence */ 50*4882a593Smuzhiyun uint32_t preamble_status; 51*4882a593Smuzhiyun uint32_t preemption_status; 52*4882a593Smuzhiyun uint32_t num_ibs; 53*4882a593Smuzhiyun bool vm_needs_flush; 54*4882a593Smuzhiyun uint64_t vm_pd_addr; 55*4882a593Smuzhiyun unsigned vmid; 56*4882a593Smuzhiyun unsigned pasid; 57*4882a593Smuzhiyun uint32_t gds_base, gds_size; 58*4882a593Smuzhiyun uint32_t gws_base, gws_size; 59*4882a593Smuzhiyun uint32_t oa_base, oa_size; 60*4882a593Smuzhiyun uint32_t vram_lost_counter; 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun /* user fence handling */ 63*4882a593Smuzhiyun uint64_t uf_addr; 64*4882a593Smuzhiyun uint64_t uf_sequence; 65*4882a593Smuzhiyun }; 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 68*4882a593Smuzhiyun struct amdgpu_job **job, struct amdgpu_vm *vm); 69*4882a593Smuzhiyun int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 70*4882a593Smuzhiyun enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); 71*4882a593Smuzhiyun void amdgpu_job_free_resources(struct amdgpu_job *job); 72*4882a593Smuzhiyun void amdgpu_job_free(struct amdgpu_job *job); 73*4882a593Smuzhiyun int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, 74*4882a593Smuzhiyun void *owner, struct dma_fence **f); 75*4882a593Smuzhiyun int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, 76*4882a593Smuzhiyun struct dma_fence **fence); 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched); 79*4882a593Smuzhiyun 80*4882a593Smuzhiyun #endif 81