1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2008 Jerome Glisse.
3*4882a593Smuzhiyun * All Rights Reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
7*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
8*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
10*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
13*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
14*4882a593Smuzhiyun * Software.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19*4882a593Smuzhiyun * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22*4882a593Smuzhiyun * DEALINGS IN THE SOFTWARE.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Authors:
25*4882a593Smuzhiyun * Jerome Glisse <glisse@freedesktop.org>
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <linux/file.h>
29*4882a593Smuzhiyun #include <linux/pagemap.h>
30*4882a593Smuzhiyun #include <linux/sync_file.h>
31*4882a593Smuzhiyun #include <linux/dma-buf.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <drm/amdgpu_drm.h>
34*4882a593Smuzhiyun #include <drm/drm_syncobj.h>
35*4882a593Smuzhiyun #include "amdgpu.h"
36*4882a593Smuzhiyun #include "amdgpu_trace.h"
37*4882a593Smuzhiyun #include "amdgpu_gmc.h"
38*4882a593Smuzhiyun #include "amdgpu_gem.h"
39*4882a593Smuzhiyun #include "amdgpu_ras.h"
40*4882a593Smuzhiyun
amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser * p,struct drm_amdgpu_cs_chunk_fence * data,uint32_t * offset)41*4882a593Smuzhiyun static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
42*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_fence *data,
43*4882a593Smuzhiyun uint32_t *offset)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct drm_gem_object *gobj;
46*4882a593Smuzhiyun struct amdgpu_bo *bo;
47*4882a593Smuzhiyun unsigned long size;
48*4882a593Smuzhiyun int r;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun gobj = drm_gem_object_lookup(p->filp, data->handle);
51*4882a593Smuzhiyun if (gobj == NULL)
52*4882a593Smuzhiyun return -EINVAL;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
55*4882a593Smuzhiyun p->uf_entry.priority = 0;
56*4882a593Smuzhiyun p->uf_entry.tv.bo = &bo->tbo;
57*4882a593Smuzhiyun /* One for TTM and one for the CS job */
58*4882a593Smuzhiyun p->uf_entry.tv.num_shared = 2;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun drm_gem_object_put(gobj);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun size = amdgpu_bo_size(bo);
63*4882a593Smuzhiyun if (size != PAGE_SIZE || (data->offset + 8) > size) {
64*4882a593Smuzhiyun r = -EINVAL;
65*4882a593Smuzhiyun goto error_unref;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
69*4882a593Smuzhiyun r = -EINVAL;
70*4882a593Smuzhiyun goto error_unref;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun *offset = data->offset;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return 0;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun error_unref:
78*4882a593Smuzhiyun amdgpu_bo_unref(&bo);
79*4882a593Smuzhiyun return r;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser * p,struct drm_amdgpu_bo_list_in * data)82*4882a593Smuzhiyun static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
83*4882a593Smuzhiyun struct drm_amdgpu_bo_list_in *data)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun int r;
86*4882a593Smuzhiyun struct drm_amdgpu_bo_list_entry *info = NULL;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun r = amdgpu_bo_create_list_entry_array(data, &info);
89*4882a593Smuzhiyun if (r)
90*4882a593Smuzhiyun return r;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
93*4882a593Smuzhiyun &p->bo_list);
94*4882a593Smuzhiyun if (r)
95*4882a593Smuzhiyun goto error_free;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun kvfree(info);
98*4882a593Smuzhiyun return 0;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun error_free:
101*4882a593Smuzhiyun if (info)
102*4882a593Smuzhiyun kvfree(info);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun return r;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
amdgpu_cs_parser_init(struct amdgpu_cs_parser * p,union drm_amdgpu_cs * cs)107*4882a593Smuzhiyun static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
110*4882a593Smuzhiyun struct amdgpu_vm *vm = &fpriv->vm;
111*4882a593Smuzhiyun uint64_t *chunk_array_user;
112*4882a593Smuzhiyun uint64_t *chunk_array;
113*4882a593Smuzhiyun unsigned size, num_ibs = 0;
114*4882a593Smuzhiyun uint32_t uf_offset = 0;
115*4882a593Smuzhiyun int i;
116*4882a593Smuzhiyun int ret;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (cs->in.num_chunks == 0)
119*4882a593Smuzhiyun return -EINVAL;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
122*4882a593Smuzhiyun if (!chunk_array)
123*4882a593Smuzhiyun return -ENOMEM;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
126*4882a593Smuzhiyun if (!p->ctx) {
127*4882a593Smuzhiyun ret = -EINVAL;
128*4882a593Smuzhiyun goto free_chunk;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun mutex_lock(&p->ctx->lock);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* skip guilty context job */
134*4882a593Smuzhiyun if (atomic_read(&p->ctx->guilty) == 1) {
135*4882a593Smuzhiyun ret = -ECANCELED;
136*4882a593Smuzhiyun goto free_chunk;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* get chunks */
140*4882a593Smuzhiyun chunk_array_user = u64_to_user_ptr(cs->in.chunks);
141*4882a593Smuzhiyun if (copy_from_user(chunk_array, chunk_array_user,
142*4882a593Smuzhiyun sizeof(uint64_t)*cs->in.num_chunks)) {
143*4882a593Smuzhiyun ret = -EFAULT;
144*4882a593Smuzhiyun goto free_chunk;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun p->nchunks = cs->in.num_chunks;
148*4882a593Smuzhiyun p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
149*4882a593Smuzhiyun GFP_KERNEL);
150*4882a593Smuzhiyun if (!p->chunks) {
151*4882a593Smuzhiyun ret = -ENOMEM;
152*4882a593Smuzhiyun goto free_chunk;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun for (i = 0; i < p->nchunks; i++) {
156*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
157*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk user_chunk;
158*4882a593Smuzhiyun uint32_t __user *cdata;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun chunk_ptr = u64_to_user_ptr(chunk_array[i]);
161*4882a593Smuzhiyun if (copy_from_user(&user_chunk, chunk_ptr,
162*4882a593Smuzhiyun sizeof(struct drm_amdgpu_cs_chunk))) {
163*4882a593Smuzhiyun ret = -EFAULT;
164*4882a593Smuzhiyun i--;
165*4882a593Smuzhiyun goto free_partial_kdata;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun p->chunks[i].chunk_id = user_chunk.chunk_id;
168*4882a593Smuzhiyun p->chunks[i].length_dw = user_chunk.length_dw;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun size = p->chunks[i].length_dw;
171*4882a593Smuzhiyun cdata = u64_to_user_ptr(user_chunk.chunk_data);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
174*4882a593Smuzhiyun if (p->chunks[i].kdata == NULL) {
175*4882a593Smuzhiyun ret = -ENOMEM;
176*4882a593Smuzhiyun i--;
177*4882a593Smuzhiyun goto free_partial_kdata;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun size *= sizeof(uint32_t);
180*4882a593Smuzhiyun if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
181*4882a593Smuzhiyun ret = -EFAULT;
182*4882a593Smuzhiyun goto free_partial_kdata;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun switch (p->chunks[i].chunk_id) {
186*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_IB:
187*4882a593Smuzhiyun ++num_ibs;
188*4882a593Smuzhiyun break;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_FENCE:
191*4882a593Smuzhiyun size = sizeof(struct drm_amdgpu_cs_chunk_fence);
192*4882a593Smuzhiyun if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
193*4882a593Smuzhiyun ret = -EINVAL;
194*4882a593Smuzhiyun goto free_partial_kdata;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
198*4882a593Smuzhiyun &uf_offset);
199*4882a593Smuzhiyun if (ret)
200*4882a593Smuzhiyun goto free_partial_kdata;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun break;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_BO_HANDLES:
205*4882a593Smuzhiyun size = sizeof(struct drm_amdgpu_bo_list_in);
206*4882a593Smuzhiyun if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
207*4882a593Smuzhiyun ret = -EINVAL;
208*4882a593Smuzhiyun goto free_partial_kdata;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
212*4882a593Smuzhiyun if (ret)
213*4882a593Smuzhiyun goto free_partial_kdata;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun break;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_DEPENDENCIES:
218*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
219*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
220*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
221*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
222*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
223*4882a593Smuzhiyun break;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun default:
226*4882a593Smuzhiyun ret = -EINVAL;
227*4882a593Smuzhiyun goto free_partial_kdata;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
232*4882a593Smuzhiyun if (ret)
233*4882a593Smuzhiyun goto free_all_kdata;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
236*4882a593Smuzhiyun ret = -ECANCELED;
237*4882a593Smuzhiyun goto free_all_kdata;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (p->uf_entry.tv.bo)
241*4882a593Smuzhiyun p->job->uf_addr = uf_offset;
242*4882a593Smuzhiyun kfree(chunk_array);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Use this opportunity to fill in task info for the vm */
245*4882a593Smuzhiyun amdgpu_vm_set_task_info(vm);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun return 0;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun free_all_kdata:
250*4882a593Smuzhiyun i = p->nchunks - 1;
251*4882a593Smuzhiyun free_partial_kdata:
252*4882a593Smuzhiyun for (; i >= 0; i--)
253*4882a593Smuzhiyun kvfree(p->chunks[i].kdata);
254*4882a593Smuzhiyun kfree(p->chunks);
255*4882a593Smuzhiyun p->chunks = NULL;
256*4882a593Smuzhiyun p->nchunks = 0;
257*4882a593Smuzhiyun free_chunk:
258*4882a593Smuzhiyun kfree(chunk_array);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return ret;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Convert microseconds to bytes. */
us_to_bytes(struct amdgpu_device * adev,s64 us)264*4882a593Smuzhiyun static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun if (us <= 0 || !adev->mm_stats.log2_max_MBps)
267*4882a593Smuzhiyun return 0;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* Since accum_us is incremented by a million per second, just
270*4882a593Smuzhiyun * multiply it by the number of MB/s to get the number of bytes.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun return us << adev->mm_stats.log2_max_MBps;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
bytes_to_us(struct amdgpu_device * adev,u64 bytes)275*4882a593Smuzhiyun static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun if (!adev->mm_stats.log2_max_MBps)
278*4882a593Smuzhiyun return 0;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return bytes >> adev->mm_stats.log2_max_MBps;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* Returns how many bytes TTM can move right now. If no bytes can be moved,
284*4882a593Smuzhiyun * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
285*4882a593Smuzhiyun * which means it can go over the threshold once. If that happens, the driver
286*4882a593Smuzhiyun * will be in debt and no other buffer migrations can be done until that debt
287*4882a593Smuzhiyun * is repaid.
288*4882a593Smuzhiyun *
289*4882a593Smuzhiyun * This approach allows moving a buffer of any size (it's important to allow
290*4882a593Smuzhiyun * that).
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * The currency is simply time in microseconds and it increases as the clock
293*4882a593Smuzhiyun * ticks. The accumulated microseconds (us) are converted to bytes and
294*4882a593Smuzhiyun * returned.
295*4882a593Smuzhiyun */
amdgpu_cs_get_threshold_for_moves(struct amdgpu_device * adev,u64 * max_bytes,u64 * max_vis_bytes)296*4882a593Smuzhiyun static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
297*4882a593Smuzhiyun u64 *max_bytes,
298*4882a593Smuzhiyun u64 *max_vis_bytes)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun s64 time_us, increment_us;
301*4882a593Smuzhiyun u64 free_vram, total_vram, used_vram;
302*4882a593Smuzhiyun struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
303*4882a593Smuzhiyun /* Allow a maximum of 200 accumulated ms. This is basically per-IB
304*4882a593Smuzhiyun * throttling.
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * It means that in order to get full max MBps, at least 5 IBs per
307*4882a593Smuzhiyun * second must be submitted and not more than 200ms apart from each
308*4882a593Smuzhiyun * other.
309*4882a593Smuzhiyun */
310*4882a593Smuzhiyun const s64 us_upper_bound = 200000;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (!adev->mm_stats.log2_max_MBps) {
313*4882a593Smuzhiyun *max_bytes = 0;
314*4882a593Smuzhiyun *max_vis_bytes = 0;
315*4882a593Smuzhiyun return;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
319*4882a593Smuzhiyun used_vram = amdgpu_vram_mgr_usage(vram_man);
320*4882a593Smuzhiyun free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun spin_lock(&adev->mm_stats.lock);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* Increase the amount of accumulated us. */
325*4882a593Smuzhiyun time_us = ktime_to_us(ktime_get());
326*4882a593Smuzhiyun increment_us = time_us - adev->mm_stats.last_update_us;
327*4882a593Smuzhiyun adev->mm_stats.last_update_us = time_us;
328*4882a593Smuzhiyun adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
329*4882a593Smuzhiyun us_upper_bound);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* This prevents the short period of low performance when the VRAM
332*4882a593Smuzhiyun * usage is low and the driver is in debt or doesn't have enough
333*4882a593Smuzhiyun * accumulated us to fill VRAM quickly.
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun * The situation can occur in these cases:
336*4882a593Smuzhiyun * - a lot of VRAM is freed by userspace
337*4882a593Smuzhiyun * - the presence of a big buffer causes a lot of evictions
338*4882a593Smuzhiyun * (solution: split buffers into smaller ones)
339*4882a593Smuzhiyun *
340*4882a593Smuzhiyun * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
341*4882a593Smuzhiyun * accum_us to a positive number.
342*4882a593Smuzhiyun */
343*4882a593Smuzhiyun if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
344*4882a593Smuzhiyun s64 min_us;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* Be more aggresive on dGPUs. Try to fill a portion of free
347*4882a593Smuzhiyun * VRAM now.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun if (!(adev->flags & AMD_IS_APU))
350*4882a593Smuzhiyun min_us = bytes_to_us(adev, free_vram / 4);
351*4882a593Smuzhiyun else
352*4882a593Smuzhiyun min_us = 0; /* Reset accum_us on APUs. */
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* This is set to 0 if the driver is in debt to disallow (optional)
358*4882a593Smuzhiyun * buffer moves.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* Do the same for visible VRAM if half of it is free */
363*4882a593Smuzhiyun if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
364*4882a593Smuzhiyun u64 total_vis_vram = adev->gmc.visible_vram_size;
365*4882a593Smuzhiyun u64 used_vis_vram =
366*4882a593Smuzhiyun amdgpu_vram_mgr_vis_usage(vram_man);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (used_vis_vram < total_vis_vram) {
369*4882a593Smuzhiyun u64 free_vis_vram = total_vis_vram - used_vis_vram;
370*4882a593Smuzhiyun adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
371*4882a593Smuzhiyun increment_us, us_upper_bound);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (free_vis_vram >= total_vis_vram / 2)
374*4882a593Smuzhiyun adev->mm_stats.accum_us_vis =
375*4882a593Smuzhiyun max(bytes_to_us(adev, free_vis_vram / 2),
376*4882a593Smuzhiyun adev->mm_stats.accum_us_vis);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
380*4882a593Smuzhiyun } else {
381*4882a593Smuzhiyun *max_vis_bytes = 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun spin_unlock(&adev->mm_stats.lock);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* Report how many bytes have really been moved for the last command
388*4882a593Smuzhiyun * submission. This can result in a debt that can stop buffer migrations
389*4882a593Smuzhiyun * temporarily.
390*4882a593Smuzhiyun */
amdgpu_cs_report_moved_bytes(struct amdgpu_device * adev,u64 num_bytes,u64 num_vis_bytes)391*4882a593Smuzhiyun void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
392*4882a593Smuzhiyun u64 num_vis_bytes)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun spin_lock(&adev->mm_stats.lock);
395*4882a593Smuzhiyun adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
396*4882a593Smuzhiyun adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
397*4882a593Smuzhiyun spin_unlock(&adev->mm_stats.lock);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
amdgpu_cs_bo_validate(struct amdgpu_cs_parser * p,struct amdgpu_bo * bo)400*4882a593Smuzhiyun static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
401*4882a593Smuzhiyun struct amdgpu_bo *bo)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
404*4882a593Smuzhiyun struct ttm_operation_ctx ctx = {
405*4882a593Smuzhiyun .interruptible = true,
406*4882a593Smuzhiyun .no_wait_gpu = false,
407*4882a593Smuzhiyun .resv = bo->tbo.base.resv,
408*4882a593Smuzhiyun .flags = 0
409*4882a593Smuzhiyun };
410*4882a593Smuzhiyun uint32_t domain;
411*4882a593Smuzhiyun int r;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (bo->pin_count)
414*4882a593Smuzhiyun return 0;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* Don't move this buffer if we have depleted our allowance
417*4882a593Smuzhiyun * to move it. Don't move anything if the threshold is zero.
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun if (p->bytes_moved < p->bytes_moved_threshold &&
420*4882a593Smuzhiyun (!bo->tbo.base.dma_buf ||
421*4882a593Smuzhiyun list_empty(&bo->tbo.base.dma_buf->attachments))) {
422*4882a593Smuzhiyun if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
423*4882a593Smuzhiyun (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
424*4882a593Smuzhiyun /* And don't move a CPU_ACCESS_REQUIRED BO to limited
425*4882a593Smuzhiyun * visible VRAM if we've depleted our allowance to do
426*4882a593Smuzhiyun * that.
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
429*4882a593Smuzhiyun domain = bo->preferred_domains;
430*4882a593Smuzhiyun else
431*4882a593Smuzhiyun domain = bo->allowed_domains;
432*4882a593Smuzhiyun } else {
433*4882a593Smuzhiyun domain = bo->preferred_domains;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun } else {
436*4882a593Smuzhiyun domain = bo->allowed_domains;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun retry:
440*4882a593Smuzhiyun amdgpu_bo_placement_from_domain(bo, domain);
441*4882a593Smuzhiyun r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun p->bytes_moved += ctx.bytes_moved;
444*4882a593Smuzhiyun if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
445*4882a593Smuzhiyun amdgpu_bo_in_cpu_visible_vram(bo))
446*4882a593Smuzhiyun p->bytes_moved_vis += ctx.bytes_moved;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
449*4882a593Smuzhiyun domain = bo->allowed_domains;
450*4882a593Smuzhiyun goto retry;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun return r;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
amdgpu_cs_validate(void * param,struct amdgpu_bo * bo)456*4882a593Smuzhiyun static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun struct amdgpu_cs_parser *p = param;
459*4882a593Smuzhiyun int r;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun r = amdgpu_cs_bo_validate(p, bo);
462*4882a593Smuzhiyun if (r)
463*4882a593Smuzhiyun return r;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun if (bo->shadow)
466*4882a593Smuzhiyun r = amdgpu_cs_bo_validate(p, bo->shadow);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun return r;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
amdgpu_cs_list_validate(struct amdgpu_cs_parser * p,struct list_head * validated)471*4882a593Smuzhiyun static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
472*4882a593Smuzhiyun struct list_head *validated)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun struct ttm_operation_ctx ctx = { true, false };
475*4882a593Smuzhiyun struct amdgpu_bo_list_entry *lobj;
476*4882a593Smuzhiyun int r;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun list_for_each_entry(lobj, validated, tv.head) {
479*4882a593Smuzhiyun struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
480*4882a593Smuzhiyun struct mm_struct *usermm;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
483*4882a593Smuzhiyun if (usermm && usermm != current->mm)
484*4882a593Smuzhiyun return -EPERM;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
487*4882a593Smuzhiyun lobj->user_invalidated && lobj->user_pages) {
488*4882a593Smuzhiyun amdgpu_bo_placement_from_domain(bo,
489*4882a593Smuzhiyun AMDGPU_GEM_DOMAIN_CPU);
490*4882a593Smuzhiyun r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
491*4882a593Smuzhiyun if (r)
492*4882a593Smuzhiyun return r;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
495*4882a593Smuzhiyun lobj->user_pages);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun r = amdgpu_cs_validate(p, bo);
499*4882a593Smuzhiyun if (r)
500*4882a593Smuzhiyun return r;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun kvfree(lobj->user_pages);
503*4882a593Smuzhiyun lobj->user_pages = NULL;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun return 0;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
amdgpu_cs_parser_bos(struct amdgpu_cs_parser * p,union drm_amdgpu_cs * cs)508*4882a593Smuzhiyun static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
509*4882a593Smuzhiyun union drm_amdgpu_cs *cs)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
512*4882a593Smuzhiyun struct amdgpu_vm *vm = &fpriv->vm;
513*4882a593Smuzhiyun struct amdgpu_bo_list_entry *e;
514*4882a593Smuzhiyun struct list_head duplicates;
515*4882a593Smuzhiyun struct amdgpu_bo *gds;
516*4882a593Smuzhiyun struct amdgpu_bo *gws;
517*4882a593Smuzhiyun struct amdgpu_bo *oa;
518*4882a593Smuzhiyun int r;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun INIT_LIST_HEAD(&p->validated);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
523*4882a593Smuzhiyun if (cs->in.bo_list_handle) {
524*4882a593Smuzhiyun if (p->bo_list)
525*4882a593Smuzhiyun return -EINVAL;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
528*4882a593Smuzhiyun &p->bo_list);
529*4882a593Smuzhiyun if (r)
530*4882a593Smuzhiyun return r;
531*4882a593Smuzhiyun } else if (!p->bo_list) {
532*4882a593Smuzhiyun /* Create a empty bo_list when no handle is provided */
533*4882a593Smuzhiyun r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
534*4882a593Smuzhiyun &p->bo_list);
535*4882a593Smuzhiyun if (r)
536*4882a593Smuzhiyun return r;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /* One for TTM and one for the CS job */
540*4882a593Smuzhiyun amdgpu_bo_list_for_each_entry(e, p->bo_list)
541*4882a593Smuzhiyun e->tv.num_shared = 2;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun amdgpu_bo_list_get_list(p->bo_list, &p->validated);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun INIT_LIST_HEAD(&duplicates);
546*4882a593Smuzhiyun amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
549*4882a593Smuzhiyun list_add(&p->uf_entry.tv.head, &p->validated);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* Get userptr backing pages. If pages are updated after registered
552*4882a593Smuzhiyun * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
553*4882a593Smuzhiyun * amdgpu_ttm_backend_bind() to flush and invalidate new pages
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
556*4882a593Smuzhiyun struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
557*4882a593Smuzhiyun bool userpage_invalidated = false;
558*4882a593Smuzhiyun int i;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
561*4882a593Smuzhiyun sizeof(struct page *),
562*4882a593Smuzhiyun GFP_KERNEL | __GFP_ZERO);
563*4882a593Smuzhiyun if (!e->user_pages) {
564*4882a593Smuzhiyun DRM_ERROR("calloc failure\n");
565*4882a593Smuzhiyun return -ENOMEM;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
569*4882a593Smuzhiyun if (r) {
570*4882a593Smuzhiyun kvfree(e->user_pages);
571*4882a593Smuzhiyun e->user_pages = NULL;
572*4882a593Smuzhiyun return r;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
576*4882a593Smuzhiyun if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
577*4882a593Smuzhiyun userpage_invalidated = true;
578*4882a593Smuzhiyun break;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun e->user_invalidated = userpage_invalidated;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
585*4882a593Smuzhiyun &duplicates);
586*4882a593Smuzhiyun if (unlikely(r != 0)) {
587*4882a593Smuzhiyun if (r != -ERESTARTSYS)
588*4882a593Smuzhiyun DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
589*4882a593Smuzhiyun goto out;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
593*4882a593Smuzhiyun &p->bytes_moved_vis_threshold);
594*4882a593Smuzhiyun p->bytes_moved = 0;
595*4882a593Smuzhiyun p->bytes_moved_vis = 0;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
598*4882a593Smuzhiyun amdgpu_cs_validate, p);
599*4882a593Smuzhiyun if (r) {
600*4882a593Smuzhiyun DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
601*4882a593Smuzhiyun goto error_validate;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun r = amdgpu_cs_list_validate(p, &duplicates);
605*4882a593Smuzhiyun if (r)
606*4882a593Smuzhiyun goto error_validate;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun r = amdgpu_cs_list_validate(p, &p->validated);
609*4882a593Smuzhiyun if (r)
610*4882a593Smuzhiyun goto error_validate;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
613*4882a593Smuzhiyun p->bytes_moved_vis);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun gds = p->bo_list->gds_obj;
616*4882a593Smuzhiyun gws = p->bo_list->gws_obj;
617*4882a593Smuzhiyun oa = p->bo_list->oa_obj;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun amdgpu_bo_list_for_each_entry(e, p->bo_list) {
620*4882a593Smuzhiyun struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* Make sure we use the exclusive slot for shared BOs */
623*4882a593Smuzhiyun if (bo->prime_shared_count)
624*4882a593Smuzhiyun e->tv.num_shared = 0;
625*4882a593Smuzhiyun e->bo_va = amdgpu_vm_bo_find(vm, bo);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (gds) {
629*4882a593Smuzhiyun p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
630*4882a593Smuzhiyun p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun if (gws) {
633*4882a593Smuzhiyun p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
634*4882a593Smuzhiyun p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun if (oa) {
637*4882a593Smuzhiyun p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
638*4882a593Smuzhiyun p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun if (!r && p->uf_entry.tv.bo) {
642*4882a593Smuzhiyun struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun r = amdgpu_ttm_alloc_gart(&uf->tbo);
645*4882a593Smuzhiyun p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun error_validate:
649*4882a593Smuzhiyun if (r)
650*4882a593Smuzhiyun ttm_eu_backoff_reservation(&p->ticket, &p->validated);
651*4882a593Smuzhiyun out:
652*4882a593Smuzhiyun return r;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
amdgpu_cs_sync_rings(struct amdgpu_cs_parser * p)655*4882a593Smuzhiyun static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
658*4882a593Smuzhiyun struct amdgpu_bo_list_entry *e;
659*4882a593Smuzhiyun int r;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun list_for_each_entry(e, &p->validated, tv.head) {
662*4882a593Smuzhiyun struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
663*4882a593Smuzhiyun struct dma_resv *resv = bo->tbo.base.resv;
664*4882a593Smuzhiyun enum amdgpu_sync_mode sync_mode;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun sync_mode = amdgpu_bo_explicit_sync(bo) ?
667*4882a593Smuzhiyun AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
668*4882a593Smuzhiyun r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
669*4882a593Smuzhiyun &fpriv->vm);
670*4882a593Smuzhiyun if (r)
671*4882a593Smuzhiyun return r;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun return 0;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun * cs_parser_fini() - clean parser states
678*4882a593Smuzhiyun * @parser: parser structure holding parsing context.
679*4882a593Smuzhiyun * @error: error number
680*4882a593Smuzhiyun *
681*4882a593Smuzhiyun * If error is set than unvalidate buffer, otherwise just free memory
682*4882a593Smuzhiyun * used by parsing context.
683*4882a593Smuzhiyun **/
amdgpu_cs_parser_fini(struct amdgpu_cs_parser * parser,int error,bool backoff)684*4882a593Smuzhiyun static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
685*4882a593Smuzhiyun bool backoff)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun unsigned i;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (error && backoff)
690*4882a593Smuzhiyun ttm_eu_backoff_reservation(&parser->ticket,
691*4882a593Smuzhiyun &parser->validated);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun for (i = 0; i < parser->num_post_deps; i++) {
694*4882a593Smuzhiyun drm_syncobj_put(parser->post_deps[i].syncobj);
695*4882a593Smuzhiyun kfree(parser->post_deps[i].chain);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun kfree(parser->post_deps);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun dma_fence_put(parser->fence);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun if (parser->ctx) {
702*4882a593Smuzhiyun mutex_unlock(&parser->ctx->lock);
703*4882a593Smuzhiyun amdgpu_ctx_put(parser->ctx);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun if (parser->bo_list)
706*4882a593Smuzhiyun amdgpu_bo_list_put(parser->bo_list);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun for (i = 0; i < parser->nchunks; i++)
709*4882a593Smuzhiyun kvfree(parser->chunks[i].kdata);
710*4882a593Smuzhiyun kfree(parser->chunks);
711*4882a593Smuzhiyun if (parser->job)
712*4882a593Smuzhiyun amdgpu_job_free(parser->job);
713*4882a593Smuzhiyun if (parser->uf_entry.tv.bo) {
714*4882a593Smuzhiyun struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun amdgpu_bo_unref(&uf);
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
amdgpu_cs_vm_handling(struct amdgpu_cs_parser * p)720*4882a593Smuzhiyun static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
723*4882a593Smuzhiyun struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
724*4882a593Smuzhiyun struct amdgpu_device *adev = p->adev;
725*4882a593Smuzhiyun struct amdgpu_vm *vm = &fpriv->vm;
726*4882a593Smuzhiyun struct amdgpu_bo_list_entry *e;
727*4882a593Smuzhiyun struct amdgpu_bo_va *bo_va;
728*4882a593Smuzhiyun struct amdgpu_bo *bo;
729*4882a593Smuzhiyun int r;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /* Only for UVD/VCE VM emulation */
732*4882a593Smuzhiyun if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
733*4882a593Smuzhiyun unsigned i, j;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
736*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_ib *chunk_ib;
737*4882a593Smuzhiyun struct amdgpu_bo_va_mapping *m;
738*4882a593Smuzhiyun struct amdgpu_bo *aobj = NULL;
739*4882a593Smuzhiyun struct amdgpu_cs_chunk *chunk;
740*4882a593Smuzhiyun uint64_t offset, va_start;
741*4882a593Smuzhiyun struct amdgpu_ib *ib;
742*4882a593Smuzhiyun uint8_t *kptr;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun chunk = &p->chunks[i];
745*4882a593Smuzhiyun ib = &p->job->ibs[j];
746*4882a593Smuzhiyun chunk_ib = chunk->kdata;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
749*4882a593Smuzhiyun continue;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
752*4882a593Smuzhiyun r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
753*4882a593Smuzhiyun if (r) {
754*4882a593Smuzhiyun DRM_ERROR("IB va_start is invalid\n");
755*4882a593Smuzhiyun return r;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if ((va_start + chunk_ib->ib_bytes) >
759*4882a593Smuzhiyun (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
760*4882a593Smuzhiyun DRM_ERROR("IB va_start+ib_bytes is invalid\n");
761*4882a593Smuzhiyun return -EINVAL;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* the IB should be reserved at this point */
765*4882a593Smuzhiyun r = amdgpu_bo_kmap(aobj, (void **)&kptr);
766*4882a593Smuzhiyun if (r) {
767*4882a593Smuzhiyun return r;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun offset = m->start * AMDGPU_GPU_PAGE_SIZE;
771*4882a593Smuzhiyun kptr += va_start - offset;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun if (ring->funcs->parse_cs) {
774*4882a593Smuzhiyun memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
775*4882a593Smuzhiyun amdgpu_bo_kunmap(aobj);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun r = amdgpu_ring_parse_cs(ring, p, j);
778*4882a593Smuzhiyun if (r)
779*4882a593Smuzhiyun return r;
780*4882a593Smuzhiyun } else {
781*4882a593Smuzhiyun ib->ptr = (uint32_t *)kptr;
782*4882a593Smuzhiyun r = amdgpu_ring_patch_cs_in_place(ring, p, j);
783*4882a593Smuzhiyun amdgpu_bo_kunmap(aobj);
784*4882a593Smuzhiyun if (r)
785*4882a593Smuzhiyun return r;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun j++;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun if (!p->job->vm)
793*4882a593Smuzhiyun return amdgpu_cs_sync_rings(p);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun r = amdgpu_vm_clear_freed(adev, vm, NULL);
797*4882a593Smuzhiyun if (r)
798*4882a593Smuzhiyun return r;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
801*4882a593Smuzhiyun if (r)
802*4882a593Smuzhiyun return r;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
805*4882a593Smuzhiyun if (r)
806*4882a593Smuzhiyun return r;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
809*4882a593Smuzhiyun bo_va = fpriv->csa_va;
810*4882a593Smuzhiyun BUG_ON(!bo_va);
811*4882a593Smuzhiyun r = amdgpu_vm_bo_update(adev, bo_va, false);
812*4882a593Smuzhiyun if (r)
813*4882a593Smuzhiyun return r;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
816*4882a593Smuzhiyun if (r)
817*4882a593Smuzhiyun return r;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun amdgpu_bo_list_for_each_entry(e, p->bo_list) {
821*4882a593Smuzhiyun /* ignore duplicates */
822*4882a593Smuzhiyun bo = ttm_to_amdgpu_bo(e->tv.bo);
823*4882a593Smuzhiyun if (!bo)
824*4882a593Smuzhiyun continue;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun bo_va = e->bo_va;
827*4882a593Smuzhiyun if (bo_va == NULL)
828*4882a593Smuzhiyun continue;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun r = amdgpu_vm_bo_update(adev, bo_va, false);
831*4882a593Smuzhiyun if (r)
832*4882a593Smuzhiyun return r;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
835*4882a593Smuzhiyun if (r)
836*4882a593Smuzhiyun return r;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun r = amdgpu_vm_handle_moved(adev, vm);
840*4882a593Smuzhiyun if (r)
841*4882a593Smuzhiyun return r;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun r = amdgpu_vm_update_pdes(adev, vm, false);
844*4882a593Smuzhiyun if (r)
845*4882a593Smuzhiyun return r;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
848*4882a593Smuzhiyun if (r)
849*4882a593Smuzhiyun return r;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun if (amdgpu_vm_debug) {
854*4882a593Smuzhiyun /* Invalidate all BOs to test for userspace bugs */
855*4882a593Smuzhiyun amdgpu_bo_list_for_each_entry(e, p->bo_list) {
856*4882a593Smuzhiyun struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* ignore duplicates */
859*4882a593Smuzhiyun if (!bo)
860*4882a593Smuzhiyun continue;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun amdgpu_vm_bo_invalidate(adev, bo, false);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun return amdgpu_cs_sync_rings(p);
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
amdgpu_cs_ib_fill(struct amdgpu_device * adev,struct amdgpu_cs_parser * parser)869*4882a593Smuzhiyun static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
870*4882a593Smuzhiyun struct amdgpu_cs_parser *parser)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
873*4882a593Smuzhiyun struct amdgpu_vm *vm = &fpriv->vm;
874*4882a593Smuzhiyun int r, ce_preempt = 0, de_preempt = 0;
875*4882a593Smuzhiyun struct amdgpu_ring *ring;
876*4882a593Smuzhiyun int i, j;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
879*4882a593Smuzhiyun struct amdgpu_cs_chunk *chunk;
880*4882a593Smuzhiyun struct amdgpu_ib *ib;
881*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_ib *chunk_ib;
882*4882a593Smuzhiyun struct drm_sched_entity *entity;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun chunk = &parser->chunks[i];
885*4882a593Smuzhiyun ib = &parser->job->ibs[j];
886*4882a593Smuzhiyun chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
889*4882a593Smuzhiyun continue;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
892*4882a593Smuzhiyun (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
893*4882a593Smuzhiyun if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
894*4882a593Smuzhiyun if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
895*4882a593Smuzhiyun ce_preempt++;
896*4882a593Smuzhiyun else
897*4882a593Smuzhiyun de_preempt++;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
901*4882a593Smuzhiyun if (ce_preempt > 1 || de_preempt > 1)
902*4882a593Smuzhiyun return -EINVAL;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
906*4882a593Smuzhiyun chunk_ib->ip_instance, chunk_ib->ring,
907*4882a593Smuzhiyun &entity);
908*4882a593Smuzhiyun if (r)
909*4882a593Smuzhiyun return r;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
912*4882a593Smuzhiyun parser->job->preamble_status |=
913*4882a593Smuzhiyun AMDGPU_PREAMBLE_IB_PRESENT;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (parser->entity && parser->entity != entity)
916*4882a593Smuzhiyun return -EINVAL;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun /* Return if there is no run queue associated with this entity.
919*4882a593Smuzhiyun * Possibly because of disabled HW IP*/
920*4882a593Smuzhiyun if (entity->rq == NULL)
921*4882a593Smuzhiyun return -EINVAL;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun parser->entity = entity;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun ring = to_amdgpu_ring(entity->rq->sched);
926*4882a593Smuzhiyun r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
927*4882a593Smuzhiyun chunk_ib->ib_bytes : 0,
928*4882a593Smuzhiyun AMDGPU_IB_POOL_DELAYED, ib);
929*4882a593Smuzhiyun if (r) {
930*4882a593Smuzhiyun DRM_ERROR("Failed to get ib !\n");
931*4882a593Smuzhiyun return r;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun ib->gpu_addr = chunk_ib->va_start;
935*4882a593Smuzhiyun ib->length_dw = chunk_ib->ib_bytes / 4;
936*4882a593Smuzhiyun ib->flags = chunk_ib->flags;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun j++;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /* MM engine doesn't support user fences */
942*4882a593Smuzhiyun ring = to_amdgpu_ring(parser->entity->rq->sched);
943*4882a593Smuzhiyun if (parser->job->uf_addr && ring->funcs->no_user_fence)
944*4882a593Smuzhiyun return -EINVAL;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)949*4882a593Smuzhiyun static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
950*4882a593Smuzhiyun struct amdgpu_cs_chunk *chunk)
951*4882a593Smuzhiyun {
952*4882a593Smuzhiyun struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
953*4882a593Smuzhiyun unsigned num_deps;
954*4882a593Smuzhiyun int i, r;
955*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_dep *deps;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
958*4882a593Smuzhiyun num_deps = chunk->length_dw * 4 /
959*4882a593Smuzhiyun sizeof(struct drm_amdgpu_cs_chunk_dep);
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun for (i = 0; i < num_deps; ++i) {
962*4882a593Smuzhiyun struct amdgpu_ctx *ctx;
963*4882a593Smuzhiyun struct drm_sched_entity *entity;
964*4882a593Smuzhiyun struct dma_fence *fence;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
967*4882a593Smuzhiyun if (ctx == NULL)
968*4882a593Smuzhiyun return -EINVAL;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
971*4882a593Smuzhiyun deps[i].ip_instance,
972*4882a593Smuzhiyun deps[i].ring, &entity);
973*4882a593Smuzhiyun if (r) {
974*4882a593Smuzhiyun amdgpu_ctx_put(ctx);
975*4882a593Smuzhiyun return r;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
979*4882a593Smuzhiyun amdgpu_ctx_put(ctx);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun if (IS_ERR(fence))
982*4882a593Smuzhiyun return PTR_ERR(fence);
983*4882a593Smuzhiyun else if (!fence)
984*4882a593Smuzhiyun continue;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
987*4882a593Smuzhiyun struct drm_sched_fence *s_fence;
988*4882a593Smuzhiyun struct dma_fence *old = fence;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun s_fence = to_drm_sched_fence(fence);
991*4882a593Smuzhiyun fence = dma_fence_get(&s_fence->scheduled);
992*4882a593Smuzhiyun dma_fence_put(old);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun r = amdgpu_sync_fence(&p->job->sync, fence);
996*4882a593Smuzhiyun dma_fence_put(fence);
997*4882a593Smuzhiyun if (r)
998*4882a593Smuzhiyun return r;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun return 0;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun
amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser * p,uint32_t handle,u64 point,u64 flags)1003*4882a593Smuzhiyun static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
1004*4882a593Smuzhiyun uint32_t handle, u64 point,
1005*4882a593Smuzhiyun u64 flags)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun struct dma_fence *fence;
1008*4882a593Smuzhiyun int r;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
1011*4882a593Smuzhiyun if (r) {
1012*4882a593Smuzhiyun DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1013*4882a593Smuzhiyun handle, point, r);
1014*4882a593Smuzhiyun return r;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun r = amdgpu_sync_fence(&p->job->sync, fence);
1018*4882a593Smuzhiyun dma_fence_put(fence);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun return r;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)1023*4882a593Smuzhiyun static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1024*4882a593Smuzhiyun struct amdgpu_cs_chunk *chunk)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_sem *deps;
1027*4882a593Smuzhiyun unsigned num_deps;
1028*4882a593Smuzhiyun int i, r;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1031*4882a593Smuzhiyun num_deps = chunk->length_dw * 4 /
1032*4882a593Smuzhiyun sizeof(struct drm_amdgpu_cs_chunk_sem);
1033*4882a593Smuzhiyun for (i = 0; i < num_deps; ++i) {
1034*4882a593Smuzhiyun r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1035*4882a593Smuzhiyun 0, 0);
1036*4882a593Smuzhiyun if (r)
1037*4882a593Smuzhiyun return r;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun return 0;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun
amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)1044*4882a593Smuzhiyun static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1045*4882a593Smuzhiyun struct amdgpu_cs_chunk *chunk)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1048*4882a593Smuzhiyun unsigned num_deps;
1049*4882a593Smuzhiyun int i, r;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1052*4882a593Smuzhiyun num_deps = chunk->length_dw * 4 /
1053*4882a593Smuzhiyun sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1054*4882a593Smuzhiyun for (i = 0; i < num_deps; ++i) {
1055*4882a593Smuzhiyun r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1056*4882a593Smuzhiyun syncobj_deps[i].handle,
1057*4882a593Smuzhiyun syncobj_deps[i].point,
1058*4882a593Smuzhiyun syncobj_deps[i].flags);
1059*4882a593Smuzhiyun if (r)
1060*4882a593Smuzhiyun return r;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun return 0;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)1066*4882a593Smuzhiyun static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1067*4882a593Smuzhiyun struct amdgpu_cs_chunk *chunk)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_sem *deps;
1070*4882a593Smuzhiyun unsigned num_deps;
1071*4882a593Smuzhiyun int i;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1074*4882a593Smuzhiyun num_deps = chunk->length_dw * 4 /
1075*4882a593Smuzhiyun sizeof(struct drm_amdgpu_cs_chunk_sem);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (p->post_deps)
1078*4882a593Smuzhiyun return -EINVAL;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1081*4882a593Smuzhiyun GFP_KERNEL);
1082*4882a593Smuzhiyun p->num_post_deps = 0;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun if (!p->post_deps)
1085*4882a593Smuzhiyun return -ENOMEM;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun for (i = 0; i < num_deps; ++i) {
1089*4882a593Smuzhiyun p->post_deps[i].syncobj =
1090*4882a593Smuzhiyun drm_syncobj_find(p->filp, deps[i].handle);
1091*4882a593Smuzhiyun if (!p->post_deps[i].syncobj)
1092*4882a593Smuzhiyun return -EINVAL;
1093*4882a593Smuzhiyun p->post_deps[i].chain = NULL;
1094*4882a593Smuzhiyun p->post_deps[i].point = 0;
1095*4882a593Smuzhiyun p->num_post_deps++;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun return 0;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun
amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser * p,struct amdgpu_cs_chunk * chunk)1102*4882a593Smuzhiyun static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1103*4882a593Smuzhiyun struct amdgpu_cs_chunk *chunk)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1106*4882a593Smuzhiyun unsigned num_deps;
1107*4882a593Smuzhiyun int i;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1110*4882a593Smuzhiyun num_deps = chunk->length_dw * 4 /
1111*4882a593Smuzhiyun sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun if (p->post_deps)
1114*4882a593Smuzhiyun return -EINVAL;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1117*4882a593Smuzhiyun GFP_KERNEL);
1118*4882a593Smuzhiyun p->num_post_deps = 0;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun if (!p->post_deps)
1121*4882a593Smuzhiyun return -ENOMEM;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun for (i = 0; i < num_deps; ++i) {
1124*4882a593Smuzhiyun struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun dep->chain = NULL;
1127*4882a593Smuzhiyun if (syncobj_deps[i].point) {
1128*4882a593Smuzhiyun dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
1129*4882a593Smuzhiyun if (!dep->chain)
1130*4882a593Smuzhiyun return -ENOMEM;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun dep->syncobj = drm_syncobj_find(p->filp,
1134*4882a593Smuzhiyun syncobj_deps[i].handle);
1135*4882a593Smuzhiyun if (!dep->syncobj) {
1136*4882a593Smuzhiyun kfree(dep->chain);
1137*4882a593Smuzhiyun return -EINVAL;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun dep->point = syncobj_deps[i].point;
1140*4882a593Smuzhiyun p->num_post_deps++;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun return 0;
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun
amdgpu_cs_dependencies(struct amdgpu_device * adev,struct amdgpu_cs_parser * p)1146*4882a593Smuzhiyun static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1147*4882a593Smuzhiyun struct amdgpu_cs_parser *p)
1148*4882a593Smuzhiyun {
1149*4882a593Smuzhiyun int i, r;
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun for (i = 0; i < p->nchunks; ++i) {
1152*4882a593Smuzhiyun struct amdgpu_cs_chunk *chunk;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun chunk = &p->chunks[i];
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun switch (chunk->chunk_id) {
1157*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_DEPENDENCIES:
1158*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1159*4882a593Smuzhiyun r = amdgpu_cs_process_fence_dep(p, chunk);
1160*4882a593Smuzhiyun if (r)
1161*4882a593Smuzhiyun return r;
1162*4882a593Smuzhiyun break;
1163*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1164*4882a593Smuzhiyun r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1165*4882a593Smuzhiyun if (r)
1166*4882a593Smuzhiyun return r;
1167*4882a593Smuzhiyun break;
1168*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1169*4882a593Smuzhiyun r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1170*4882a593Smuzhiyun if (r)
1171*4882a593Smuzhiyun return r;
1172*4882a593Smuzhiyun break;
1173*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1174*4882a593Smuzhiyun r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1175*4882a593Smuzhiyun if (r)
1176*4882a593Smuzhiyun return r;
1177*4882a593Smuzhiyun break;
1178*4882a593Smuzhiyun case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1179*4882a593Smuzhiyun r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1180*4882a593Smuzhiyun if (r)
1181*4882a593Smuzhiyun return r;
1182*4882a593Smuzhiyun break;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun return 0;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun
amdgpu_cs_post_dependencies(struct amdgpu_cs_parser * p)1189*4882a593Smuzhiyun static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun int i;
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun for (i = 0; i < p->num_post_deps; ++i) {
1194*4882a593Smuzhiyun if (p->post_deps[i].chain && p->post_deps[i].point) {
1195*4882a593Smuzhiyun drm_syncobj_add_point(p->post_deps[i].syncobj,
1196*4882a593Smuzhiyun p->post_deps[i].chain,
1197*4882a593Smuzhiyun p->fence, p->post_deps[i].point);
1198*4882a593Smuzhiyun p->post_deps[i].chain = NULL;
1199*4882a593Smuzhiyun } else {
1200*4882a593Smuzhiyun drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1201*4882a593Smuzhiyun p->fence);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
amdgpu_cs_submit(struct amdgpu_cs_parser * p,union drm_amdgpu_cs * cs)1206*4882a593Smuzhiyun static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1207*4882a593Smuzhiyun union drm_amdgpu_cs *cs)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1210*4882a593Smuzhiyun struct drm_sched_entity *entity = p->entity;
1211*4882a593Smuzhiyun struct amdgpu_bo_list_entry *e;
1212*4882a593Smuzhiyun struct amdgpu_job *job;
1213*4882a593Smuzhiyun uint64_t seq;
1214*4882a593Smuzhiyun int r;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun job = p->job;
1217*4882a593Smuzhiyun p->job = NULL;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
1220*4882a593Smuzhiyun if (r)
1221*4882a593Smuzhiyun goto error_unlock;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /* No memory allocation is allowed while holding the notifier lock.
1224*4882a593Smuzhiyun * The lock is held until amdgpu_cs_submit is finished and fence is
1225*4882a593Smuzhiyun * added to BOs.
1226*4882a593Smuzhiyun */
1227*4882a593Smuzhiyun mutex_lock(&p->adev->notifier_lock);
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1230*4882a593Smuzhiyun * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1231*4882a593Smuzhiyun */
1232*4882a593Smuzhiyun amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1233*4882a593Smuzhiyun struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun if (r) {
1238*4882a593Smuzhiyun r = -EAGAIN;
1239*4882a593Smuzhiyun goto error_abort;
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun p->fence = dma_fence_get(&job->base.s_fence->finished);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1245*4882a593Smuzhiyun amdgpu_cs_post_dependencies(p);
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1248*4882a593Smuzhiyun !p->ctx->preamble_presented) {
1249*4882a593Smuzhiyun job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1250*4882a593Smuzhiyun p->ctx->preamble_presented = true;
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun cs->out.handle = seq;
1254*4882a593Smuzhiyun job->uf_sequence = seq;
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun amdgpu_job_free_resources(job);
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun trace_amdgpu_cs_ioctl(job);
1259*4882a593Smuzhiyun amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1260*4882a593Smuzhiyun drm_sched_entity_push_job(&job->base, entity);
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1265*4882a593Smuzhiyun mutex_unlock(&p->adev->notifier_lock);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun return 0;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun error_abort:
1270*4882a593Smuzhiyun drm_sched_job_cleanup(&job->base);
1271*4882a593Smuzhiyun mutex_unlock(&p->adev->notifier_lock);
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun error_unlock:
1274*4882a593Smuzhiyun amdgpu_job_free(job);
1275*4882a593Smuzhiyun return r;
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun
trace_amdgpu_cs_ibs(struct amdgpu_cs_parser * parser)1278*4882a593Smuzhiyun static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun int i;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun if (!trace_amdgpu_cs_enabled())
1283*4882a593Smuzhiyun return;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun for (i = 0; i < parser->job->num_ibs; i++)
1286*4882a593Smuzhiyun trace_amdgpu_cs(parser, i);
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
amdgpu_cs_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1289*4882a593Smuzhiyun int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun struct amdgpu_device *adev = drm_to_adev(dev);
1292*4882a593Smuzhiyun union drm_amdgpu_cs *cs = data;
1293*4882a593Smuzhiyun struct amdgpu_cs_parser parser = {};
1294*4882a593Smuzhiyun bool reserved_buffers = false;
1295*4882a593Smuzhiyun int r;
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun if (amdgpu_ras_intr_triggered())
1298*4882a593Smuzhiyun return -EHWPOISON;
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun if (!adev->accel_working)
1301*4882a593Smuzhiyun return -EBUSY;
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun parser.adev = adev;
1304*4882a593Smuzhiyun parser.filp = filp;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun r = amdgpu_cs_parser_init(&parser, data);
1307*4882a593Smuzhiyun if (r) {
1308*4882a593Smuzhiyun if (printk_ratelimit())
1309*4882a593Smuzhiyun DRM_ERROR("Failed to initialize parser %d!\n", r);
1310*4882a593Smuzhiyun goto out;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun r = amdgpu_cs_ib_fill(adev, &parser);
1314*4882a593Smuzhiyun if (r)
1315*4882a593Smuzhiyun goto out;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun r = amdgpu_cs_dependencies(adev, &parser);
1318*4882a593Smuzhiyun if (r) {
1319*4882a593Smuzhiyun DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1320*4882a593Smuzhiyun goto out;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun r = amdgpu_cs_parser_bos(&parser, data);
1324*4882a593Smuzhiyun if (r) {
1325*4882a593Smuzhiyun if (r == -ENOMEM)
1326*4882a593Smuzhiyun DRM_ERROR("Not enough memory for command submission!\n");
1327*4882a593Smuzhiyun else if (r != -ERESTARTSYS && r != -EAGAIN)
1328*4882a593Smuzhiyun DRM_ERROR("Failed to process the buffer list %d!\n", r);
1329*4882a593Smuzhiyun goto out;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun reserved_buffers = true;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun trace_amdgpu_cs_ibs(&parser);
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun r = amdgpu_cs_vm_handling(&parser);
1337*4882a593Smuzhiyun if (r)
1338*4882a593Smuzhiyun goto out;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun r = amdgpu_cs_submit(&parser, cs);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun out:
1343*4882a593Smuzhiyun amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun return r;
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun /**
1349*4882a593Smuzhiyun * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1350*4882a593Smuzhiyun *
1351*4882a593Smuzhiyun * @dev: drm device
1352*4882a593Smuzhiyun * @data: data from userspace
1353*4882a593Smuzhiyun * @filp: file private
1354*4882a593Smuzhiyun *
1355*4882a593Smuzhiyun * Wait for the command submission identified by handle to finish.
1356*4882a593Smuzhiyun */
amdgpu_cs_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1357*4882a593Smuzhiyun int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1358*4882a593Smuzhiyun struct drm_file *filp)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun union drm_amdgpu_wait_cs *wait = data;
1361*4882a593Smuzhiyun unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1362*4882a593Smuzhiyun struct drm_sched_entity *entity;
1363*4882a593Smuzhiyun struct amdgpu_ctx *ctx;
1364*4882a593Smuzhiyun struct dma_fence *fence;
1365*4882a593Smuzhiyun long r;
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1368*4882a593Smuzhiyun if (ctx == NULL)
1369*4882a593Smuzhiyun return -EINVAL;
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1372*4882a593Smuzhiyun wait->in.ring, &entity);
1373*4882a593Smuzhiyun if (r) {
1374*4882a593Smuzhiyun amdgpu_ctx_put(ctx);
1375*4882a593Smuzhiyun return r;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1379*4882a593Smuzhiyun if (IS_ERR(fence))
1380*4882a593Smuzhiyun r = PTR_ERR(fence);
1381*4882a593Smuzhiyun else if (fence) {
1382*4882a593Smuzhiyun r = dma_fence_wait_timeout(fence, true, timeout);
1383*4882a593Smuzhiyun if (r > 0 && fence->error)
1384*4882a593Smuzhiyun r = fence->error;
1385*4882a593Smuzhiyun dma_fence_put(fence);
1386*4882a593Smuzhiyun } else
1387*4882a593Smuzhiyun r = 1;
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun amdgpu_ctx_put(ctx);
1390*4882a593Smuzhiyun if (r < 0)
1391*4882a593Smuzhiyun return r;
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun memset(wait, 0, sizeof(*wait));
1394*4882a593Smuzhiyun wait->out.status = (r == 0);
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun return 0;
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /**
1400*4882a593Smuzhiyun * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1401*4882a593Smuzhiyun *
1402*4882a593Smuzhiyun * @adev: amdgpu device
1403*4882a593Smuzhiyun * @filp: file private
1404*4882a593Smuzhiyun * @user: drm_amdgpu_fence copied from user space
1405*4882a593Smuzhiyun */
amdgpu_cs_get_fence(struct amdgpu_device * adev,struct drm_file * filp,struct drm_amdgpu_fence * user)1406*4882a593Smuzhiyun static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1407*4882a593Smuzhiyun struct drm_file *filp,
1408*4882a593Smuzhiyun struct drm_amdgpu_fence *user)
1409*4882a593Smuzhiyun {
1410*4882a593Smuzhiyun struct drm_sched_entity *entity;
1411*4882a593Smuzhiyun struct amdgpu_ctx *ctx;
1412*4882a593Smuzhiyun struct dma_fence *fence;
1413*4882a593Smuzhiyun int r;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1416*4882a593Smuzhiyun if (ctx == NULL)
1417*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1420*4882a593Smuzhiyun user->ring, &entity);
1421*4882a593Smuzhiyun if (r) {
1422*4882a593Smuzhiyun amdgpu_ctx_put(ctx);
1423*4882a593Smuzhiyun return ERR_PTR(r);
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1427*4882a593Smuzhiyun amdgpu_ctx_put(ctx);
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun return fence;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun
amdgpu_cs_fence_to_handle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1432*4882a593Smuzhiyun int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1433*4882a593Smuzhiyun struct drm_file *filp)
1434*4882a593Smuzhiyun {
1435*4882a593Smuzhiyun struct amdgpu_device *adev = drm_to_adev(dev);
1436*4882a593Smuzhiyun union drm_amdgpu_fence_to_handle *info = data;
1437*4882a593Smuzhiyun struct dma_fence *fence;
1438*4882a593Smuzhiyun struct drm_syncobj *syncobj;
1439*4882a593Smuzhiyun struct sync_file *sync_file;
1440*4882a593Smuzhiyun int fd, r;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1443*4882a593Smuzhiyun if (IS_ERR(fence))
1444*4882a593Smuzhiyun return PTR_ERR(fence);
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun if (!fence)
1447*4882a593Smuzhiyun fence = dma_fence_get_stub();
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun switch (info->in.what) {
1450*4882a593Smuzhiyun case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1451*4882a593Smuzhiyun r = drm_syncobj_create(&syncobj, 0, fence);
1452*4882a593Smuzhiyun dma_fence_put(fence);
1453*4882a593Smuzhiyun if (r)
1454*4882a593Smuzhiyun return r;
1455*4882a593Smuzhiyun r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1456*4882a593Smuzhiyun drm_syncobj_put(syncobj);
1457*4882a593Smuzhiyun return r;
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1460*4882a593Smuzhiyun r = drm_syncobj_create(&syncobj, 0, fence);
1461*4882a593Smuzhiyun dma_fence_put(fence);
1462*4882a593Smuzhiyun if (r)
1463*4882a593Smuzhiyun return r;
1464*4882a593Smuzhiyun r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1465*4882a593Smuzhiyun drm_syncobj_put(syncobj);
1466*4882a593Smuzhiyun return r;
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1469*4882a593Smuzhiyun fd = get_unused_fd_flags(O_CLOEXEC);
1470*4882a593Smuzhiyun if (fd < 0) {
1471*4882a593Smuzhiyun dma_fence_put(fence);
1472*4882a593Smuzhiyun return fd;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun sync_file = sync_file_create(fence);
1476*4882a593Smuzhiyun dma_fence_put(fence);
1477*4882a593Smuzhiyun if (!sync_file) {
1478*4882a593Smuzhiyun put_unused_fd(fd);
1479*4882a593Smuzhiyun return -ENOMEM;
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun fd_install(fd, sync_file->file);
1483*4882a593Smuzhiyun info->out.handle = fd;
1484*4882a593Smuzhiyun return 0;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun default:
1487*4882a593Smuzhiyun dma_fence_put(fence);
1488*4882a593Smuzhiyun return -EINVAL;
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun /**
1493*4882a593Smuzhiyun * amdgpu_cs_wait_all_fence - wait on all fences to signal
1494*4882a593Smuzhiyun *
1495*4882a593Smuzhiyun * @adev: amdgpu device
1496*4882a593Smuzhiyun * @filp: file private
1497*4882a593Smuzhiyun * @wait: wait parameters
1498*4882a593Smuzhiyun * @fences: array of drm_amdgpu_fence
1499*4882a593Smuzhiyun */
amdgpu_cs_wait_all_fences(struct amdgpu_device * adev,struct drm_file * filp,union drm_amdgpu_wait_fences * wait,struct drm_amdgpu_fence * fences)1500*4882a593Smuzhiyun static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1501*4882a593Smuzhiyun struct drm_file *filp,
1502*4882a593Smuzhiyun union drm_amdgpu_wait_fences *wait,
1503*4882a593Smuzhiyun struct drm_amdgpu_fence *fences)
1504*4882a593Smuzhiyun {
1505*4882a593Smuzhiyun uint32_t fence_count = wait->in.fence_count;
1506*4882a593Smuzhiyun unsigned int i;
1507*4882a593Smuzhiyun long r = 1;
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun for (i = 0; i < fence_count; i++) {
1510*4882a593Smuzhiyun struct dma_fence *fence;
1511*4882a593Smuzhiyun unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1514*4882a593Smuzhiyun if (IS_ERR(fence))
1515*4882a593Smuzhiyun return PTR_ERR(fence);
1516*4882a593Smuzhiyun else if (!fence)
1517*4882a593Smuzhiyun continue;
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun r = dma_fence_wait_timeout(fence, true, timeout);
1520*4882a593Smuzhiyun dma_fence_put(fence);
1521*4882a593Smuzhiyun if (r < 0)
1522*4882a593Smuzhiyun return r;
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun if (r == 0)
1525*4882a593Smuzhiyun break;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun if (fence->error)
1528*4882a593Smuzhiyun return fence->error;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun memset(wait, 0, sizeof(*wait));
1532*4882a593Smuzhiyun wait->out.status = (r > 0);
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun return 0;
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun /**
1538*4882a593Smuzhiyun * amdgpu_cs_wait_any_fence - wait on any fence to signal
1539*4882a593Smuzhiyun *
1540*4882a593Smuzhiyun * @adev: amdgpu device
1541*4882a593Smuzhiyun * @filp: file private
1542*4882a593Smuzhiyun * @wait: wait parameters
1543*4882a593Smuzhiyun * @fences: array of drm_amdgpu_fence
1544*4882a593Smuzhiyun */
amdgpu_cs_wait_any_fence(struct amdgpu_device * adev,struct drm_file * filp,union drm_amdgpu_wait_fences * wait,struct drm_amdgpu_fence * fences)1545*4882a593Smuzhiyun static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1546*4882a593Smuzhiyun struct drm_file *filp,
1547*4882a593Smuzhiyun union drm_amdgpu_wait_fences *wait,
1548*4882a593Smuzhiyun struct drm_amdgpu_fence *fences)
1549*4882a593Smuzhiyun {
1550*4882a593Smuzhiyun unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1551*4882a593Smuzhiyun uint32_t fence_count = wait->in.fence_count;
1552*4882a593Smuzhiyun uint32_t first = ~0;
1553*4882a593Smuzhiyun struct dma_fence **array;
1554*4882a593Smuzhiyun unsigned int i;
1555*4882a593Smuzhiyun long r;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun /* Prepare the fence array */
1558*4882a593Smuzhiyun array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun if (array == NULL)
1561*4882a593Smuzhiyun return -ENOMEM;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun for (i = 0; i < fence_count; i++) {
1564*4882a593Smuzhiyun struct dma_fence *fence;
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1567*4882a593Smuzhiyun if (IS_ERR(fence)) {
1568*4882a593Smuzhiyun r = PTR_ERR(fence);
1569*4882a593Smuzhiyun goto err_free_fence_array;
1570*4882a593Smuzhiyun } else if (fence) {
1571*4882a593Smuzhiyun array[i] = fence;
1572*4882a593Smuzhiyun } else { /* NULL, the fence has been already signaled */
1573*4882a593Smuzhiyun r = 1;
1574*4882a593Smuzhiyun first = i;
1575*4882a593Smuzhiyun goto out;
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1580*4882a593Smuzhiyun &first);
1581*4882a593Smuzhiyun if (r < 0)
1582*4882a593Smuzhiyun goto err_free_fence_array;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun out:
1585*4882a593Smuzhiyun memset(wait, 0, sizeof(*wait));
1586*4882a593Smuzhiyun wait->out.status = (r > 0);
1587*4882a593Smuzhiyun wait->out.first_signaled = first;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun if (first < fence_count && array[first])
1590*4882a593Smuzhiyun r = array[first]->error;
1591*4882a593Smuzhiyun else
1592*4882a593Smuzhiyun r = 0;
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun err_free_fence_array:
1595*4882a593Smuzhiyun for (i = 0; i < fence_count; i++)
1596*4882a593Smuzhiyun dma_fence_put(array[i]);
1597*4882a593Smuzhiyun kfree(array);
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun return r;
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun /**
1603*4882a593Smuzhiyun * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1604*4882a593Smuzhiyun *
1605*4882a593Smuzhiyun * @dev: drm device
1606*4882a593Smuzhiyun * @data: data from userspace
1607*4882a593Smuzhiyun * @filp: file private
1608*4882a593Smuzhiyun */
amdgpu_cs_wait_fences_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)1609*4882a593Smuzhiyun int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1610*4882a593Smuzhiyun struct drm_file *filp)
1611*4882a593Smuzhiyun {
1612*4882a593Smuzhiyun struct amdgpu_device *adev = drm_to_adev(dev);
1613*4882a593Smuzhiyun union drm_amdgpu_wait_fences *wait = data;
1614*4882a593Smuzhiyun uint32_t fence_count = wait->in.fence_count;
1615*4882a593Smuzhiyun struct drm_amdgpu_fence *fences_user;
1616*4882a593Smuzhiyun struct drm_amdgpu_fence *fences;
1617*4882a593Smuzhiyun int r;
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun /* Get the fences from userspace */
1620*4882a593Smuzhiyun fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1621*4882a593Smuzhiyun GFP_KERNEL);
1622*4882a593Smuzhiyun if (fences == NULL)
1623*4882a593Smuzhiyun return -ENOMEM;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun fences_user = u64_to_user_ptr(wait->in.fences);
1626*4882a593Smuzhiyun if (copy_from_user(fences, fences_user,
1627*4882a593Smuzhiyun sizeof(struct drm_amdgpu_fence) * fence_count)) {
1628*4882a593Smuzhiyun r = -EFAULT;
1629*4882a593Smuzhiyun goto err_free_fences;
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun if (wait->in.wait_all)
1633*4882a593Smuzhiyun r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1634*4882a593Smuzhiyun else
1635*4882a593Smuzhiyun r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun err_free_fences:
1638*4882a593Smuzhiyun kfree(fences);
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun return r;
1641*4882a593Smuzhiyun }
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun /**
1644*4882a593Smuzhiyun * amdgpu_cs_find_bo_va - find bo_va for VM address
1645*4882a593Smuzhiyun *
1646*4882a593Smuzhiyun * @parser: command submission parser context
1647*4882a593Smuzhiyun * @addr: VM address
1648*4882a593Smuzhiyun * @bo: resulting BO of the mapping found
1649*4882a593Smuzhiyun *
1650*4882a593Smuzhiyun * Search the buffer objects in the command submission context for a certain
1651*4882a593Smuzhiyun * virtual memory address. Returns allocation structure when found, NULL
1652*4882a593Smuzhiyun * otherwise.
1653*4882a593Smuzhiyun */
amdgpu_cs_find_mapping(struct amdgpu_cs_parser * parser,uint64_t addr,struct amdgpu_bo ** bo,struct amdgpu_bo_va_mapping ** map)1654*4882a593Smuzhiyun int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1655*4882a593Smuzhiyun uint64_t addr, struct amdgpu_bo **bo,
1656*4882a593Smuzhiyun struct amdgpu_bo_va_mapping **map)
1657*4882a593Smuzhiyun {
1658*4882a593Smuzhiyun struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1659*4882a593Smuzhiyun struct ttm_operation_ctx ctx = { false, false };
1660*4882a593Smuzhiyun struct amdgpu_vm *vm = &fpriv->vm;
1661*4882a593Smuzhiyun struct amdgpu_bo_va_mapping *mapping;
1662*4882a593Smuzhiyun int r;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun addr /= AMDGPU_GPU_PAGE_SIZE;
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1667*4882a593Smuzhiyun if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1668*4882a593Smuzhiyun return -EINVAL;
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun *bo = mapping->bo_va->base.bo;
1671*4882a593Smuzhiyun *map = mapping;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun /* Double check that the BO is reserved by this CS */
1674*4882a593Smuzhiyun if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1675*4882a593Smuzhiyun return -EINVAL;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1678*4882a593Smuzhiyun (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1679*4882a593Smuzhiyun amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1680*4882a593Smuzhiyun r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1681*4882a593Smuzhiyun if (r)
1682*4882a593Smuzhiyun return r;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1686*4882a593Smuzhiyun }
1687