1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2008 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun * Copyright 2008 Red Hat Inc.
4*4882a593Smuzhiyun * Copyright 2009 Jerome Glisse.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
8*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
9*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
11*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
14*4882a593Smuzhiyun * all copies or substantial portions of the Software.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Authors: Dave Airlie
25*4882a593Smuzhiyun * Alex Deucher
26*4882a593Smuzhiyun * Jerome Glisse
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <drm/radeon_drm.h>
30*4882a593Smuzhiyun #include "radeon.h"
31*4882a593Smuzhiyun #include "radeon_trace.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun * GPUVM
35*4882a593Smuzhiyun * GPUVM is similar to the legacy gart on older asics, however
36*4882a593Smuzhiyun * rather than there being a single global gart table
37*4882a593Smuzhiyun * for the entire GPU, there are multiple VM page tables active
38*4882a593Smuzhiyun * at any given time. The VM page tables can contain a mix
39*4882a593Smuzhiyun * vram pages and system memory pages and system memory pages
40*4882a593Smuzhiyun * can be mapped as snooped (cached system pages) or unsnooped
41*4882a593Smuzhiyun * (uncached system pages).
42*4882a593Smuzhiyun * Each VM has an ID associated with it and there is a page table
43*4882a593Smuzhiyun * associated with each VMID. When execting a command buffer,
44*4882a593Smuzhiyun * the kernel tells the the ring what VMID to use for that command
45*4882a593Smuzhiyun * buffer. VMIDs are allocated dynamically as commands are submitted.
46*4882a593Smuzhiyun * The userspace drivers maintain their own address space and the kernel
47*4882a593Smuzhiyun * sets up their pages tables accordingly when they submit their
48*4882a593Smuzhiyun * command buffers and a VMID is assigned.
49*4882a593Smuzhiyun * Cayman/Trinity support up to 8 active VMs at any given time;
50*4882a593Smuzhiyun * SI supports 16.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /**
54*4882a593Smuzhiyun * radeon_vm_num_pde - return the number of page directory entries
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * @rdev: radeon_device pointer
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Calculate the number of page directory entries (cayman+).
59*4882a593Smuzhiyun */
radeon_vm_num_pdes(struct radeon_device * rdev)60*4882a593Smuzhiyun static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun * radeon_vm_directory_size - returns the size of the page directory in bytes
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * @rdev: radeon_device pointer
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * Calculate the size of the page directory in bytes (cayman+).
71*4882a593Smuzhiyun */
radeon_vm_directory_size(struct radeon_device * rdev)72*4882a593Smuzhiyun static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun * radeon_vm_manager_init - init the vm manager
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * @rdev: radeon_device pointer
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * Init the vm manager (cayman+).
83*4882a593Smuzhiyun * Returns 0 for success, error for failure.
84*4882a593Smuzhiyun */
radeon_vm_manager_init(struct radeon_device * rdev)85*4882a593Smuzhiyun int radeon_vm_manager_init(struct radeon_device *rdev)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun int r;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (!rdev->vm_manager.enabled) {
90*4882a593Smuzhiyun r = radeon_asic_vm_init(rdev);
91*4882a593Smuzhiyun if (r)
92*4882a593Smuzhiyun return r;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun rdev->vm_manager.enabled = true;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun * radeon_vm_manager_fini - tear down the vm manager
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun * @rdev: radeon_device pointer
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * Tear down the VM manager (cayman+).
105*4882a593Smuzhiyun */
radeon_vm_manager_fini(struct radeon_device * rdev)106*4882a593Smuzhiyun void radeon_vm_manager_fini(struct radeon_device *rdev)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun int i;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (!rdev->vm_manager.enabled)
111*4882a593Smuzhiyun return;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun for (i = 0; i < RADEON_NUM_VM; ++i)
114*4882a593Smuzhiyun radeon_fence_unref(&rdev->vm_manager.active[i]);
115*4882a593Smuzhiyun radeon_asic_vm_fini(rdev);
116*4882a593Smuzhiyun rdev->vm_manager.enabled = false;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun * radeon_vm_get_bos - add the vm BOs to a validation list
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * @vm: vm providing the BOs
123*4882a593Smuzhiyun * @head: head of validation list
124*4882a593Smuzhiyun *
125*4882a593Smuzhiyun * Add the page directory to the list of BOs to
126*4882a593Smuzhiyun * validate for command submission (cayman+).
127*4882a593Smuzhiyun */
radeon_vm_get_bos(struct radeon_device * rdev,struct radeon_vm * vm,struct list_head * head)128*4882a593Smuzhiyun struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
129*4882a593Smuzhiyun struct radeon_vm *vm,
130*4882a593Smuzhiyun struct list_head *head)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct radeon_bo_list *list;
133*4882a593Smuzhiyun unsigned i, idx;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun list = kvmalloc_array(vm->max_pde_used + 2,
136*4882a593Smuzhiyun sizeof(struct radeon_bo_list), GFP_KERNEL);
137*4882a593Smuzhiyun if (!list)
138*4882a593Smuzhiyun return NULL;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* add the vm page table to the list */
141*4882a593Smuzhiyun list[0].robj = vm->page_directory;
142*4882a593Smuzhiyun list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
143*4882a593Smuzhiyun list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
144*4882a593Smuzhiyun list[0].tv.bo = &vm->page_directory->tbo;
145*4882a593Smuzhiyun list[0].tv.num_shared = 1;
146*4882a593Smuzhiyun list[0].tiling_flags = 0;
147*4882a593Smuzhiyun list_add(&list[0].tv.head, head);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
150*4882a593Smuzhiyun if (!vm->page_tables[i].bo)
151*4882a593Smuzhiyun continue;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun list[idx].robj = vm->page_tables[i].bo;
154*4882a593Smuzhiyun list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
155*4882a593Smuzhiyun list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
156*4882a593Smuzhiyun list[idx].tv.bo = &list[idx].robj->tbo;
157*4882a593Smuzhiyun list[idx].tv.num_shared = 1;
158*4882a593Smuzhiyun list[idx].tiling_flags = 0;
159*4882a593Smuzhiyun list_add(&list[idx++].tv.head, head);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return list;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun * radeon_vm_grab_id - allocate the next free VMID
167*4882a593Smuzhiyun *
168*4882a593Smuzhiyun * @rdev: radeon_device pointer
169*4882a593Smuzhiyun * @vm: vm to allocate id for
170*4882a593Smuzhiyun * @ring: ring we want to submit job to
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * Allocate an id for the vm (cayman+).
173*4882a593Smuzhiyun * Returns the fence we need to sync to (if any).
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun * Global and local mutex must be locked!
176*4882a593Smuzhiyun */
radeon_vm_grab_id(struct radeon_device * rdev,struct radeon_vm * vm,int ring)177*4882a593Smuzhiyun struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
178*4882a593Smuzhiyun struct radeon_vm *vm, int ring)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun struct radeon_fence *best[RADEON_NUM_RINGS] = {};
181*4882a593Smuzhiyun struct radeon_vm_id *vm_id = &vm->ids[ring];
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun unsigned choices[2] = {};
184*4882a593Smuzhiyun unsigned i;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* check if the id is still valid */
187*4882a593Smuzhiyun if (vm_id->id && vm_id->last_id_use &&
188*4882a593Smuzhiyun vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
189*4882a593Smuzhiyun return NULL;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* we definitely need to flush */
192*4882a593Smuzhiyun vm_id->pd_gpu_addr = ~0ll;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* skip over VMID 0, since it is the system VM */
195*4882a593Smuzhiyun for (i = 1; i < rdev->vm_manager.nvm; ++i) {
196*4882a593Smuzhiyun struct radeon_fence *fence = rdev->vm_manager.active[i];
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (fence == NULL) {
199*4882a593Smuzhiyun /* found a free one */
200*4882a593Smuzhiyun vm_id->id = i;
201*4882a593Smuzhiyun trace_radeon_vm_grab_id(i, ring);
202*4882a593Smuzhiyun return NULL;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (radeon_fence_is_earlier(fence, best[fence->ring])) {
206*4882a593Smuzhiyun best[fence->ring] = fence;
207*4882a593Smuzhiyun choices[fence->ring == ring ? 0 : 1] = i;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun for (i = 0; i < 2; ++i) {
212*4882a593Smuzhiyun if (choices[i]) {
213*4882a593Smuzhiyun vm_id->id = choices[i];
214*4882a593Smuzhiyun trace_radeon_vm_grab_id(choices[i], ring);
215*4882a593Smuzhiyun return rdev->vm_manager.active[choices[i]];
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* should never happen */
220*4882a593Smuzhiyun BUG();
221*4882a593Smuzhiyun return NULL;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /**
225*4882a593Smuzhiyun * radeon_vm_flush - hardware flush the vm
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * @rdev: radeon_device pointer
228*4882a593Smuzhiyun * @vm: vm we want to flush
229*4882a593Smuzhiyun * @ring: ring to use for flush
230*4882a593Smuzhiyun * @updates: last vm update that is waited for
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * Flush the vm (cayman+).
233*4882a593Smuzhiyun *
234*4882a593Smuzhiyun * Global and local mutex must be locked!
235*4882a593Smuzhiyun */
radeon_vm_flush(struct radeon_device * rdev,struct radeon_vm * vm,int ring,struct radeon_fence * updates)236*4882a593Smuzhiyun void radeon_vm_flush(struct radeon_device *rdev,
237*4882a593Smuzhiyun struct radeon_vm *vm,
238*4882a593Smuzhiyun int ring, struct radeon_fence *updates)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
241*4882a593Smuzhiyun struct radeon_vm_id *vm_id = &vm->ids[ring];
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
244*4882a593Smuzhiyun radeon_fence_is_earlier(vm_id->flushed_updates, updates)) {
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
247*4882a593Smuzhiyun radeon_fence_unref(&vm_id->flushed_updates);
248*4882a593Smuzhiyun vm_id->flushed_updates = radeon_fence_ref(updates);
249*4882a593Smuzhiyun vm_id->pd_gpu_addr = pd_addr;
250*4882a593Smuzhiyun radeon_ring_vm_flush(rdev, &rdev->ring[ring],
251*4882a593Smuzhiyun vm_id->id, vm_id->pd_gpu_addr);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /**
257*4882a593Smuzhiyun * radeon_vm_fence - remember fence for vm
258*4882a593Smuzhiyun *
259*4882a593Smuzhiyun * @rdev: radeon_device pointer
260*4882a593Smuzhiyun * @vm: vm we want to fence
261*4882a593Smuzhiyun * @fence: fence to remember
262*4882a593Smuzhiyun *
263*4882a593Smuzhiyun * Fence the vm (cayman+).
264*4882a593Smuzhiyun * Set the fence used to protect page table and id.
265*4882a593Smuzhiyun *
266*4882a593Smuzhiyun * Global and local mutex must be locked!
267*4882a593Smuzhiyun */
radeon_vm_fence(struct radeon_device * rdev,struct radeon_vm * vm,struct radeon_fence * fence)268*4882a593Smuzhiyun void radeon_vm_fence(struct radeon_device *rdev,
269*4882a593Smuzhiyun struct radeon_vm *vm,
270*4882a593Smuzhiyun struct radeon_fence *fence)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun unsigned vm_id = vm->ids[fence->ring].id;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
275*4882a593Smuzhiyun rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
278*4882a593Smuzhiyun vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun * radeon_vm_bo_find - find the bo_va for a specific vm & bo
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * @vm: requested vm
285*4882a593Smuzhiyun * @bo: requested buffer object
286*4882a593Smuzhiyun *
287*4882a593Smuzhiyun * Find @bo inside the requested vm (cayman+).
288*4882a593Smuzhiyun * Search inside the @bos vm list for the requested vm
289*4882a593Smuzhiyun * Returns the found bo_va or NULL if none is found
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * Object has to be reserved!
292*4882a593Smuzhiyun */
radeon_vm_bo_find(struct radeon_vm * vm,struct radeon_bo * bo)293*4882a593Smuzhiyun struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
294*4882a593Smuzhiyun struct radeon_bo *bo)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct radeon_bo_va *bo_va;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun list_for_each_entry(bo_va, &bo->va, bo_list) {
299*4882a593Smuzhiyun if (bo_va->vm == vm)
300*4882a593Smuzhiyun return bo_va;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun return NULL;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /**
307*4882a593Smuzhiyun * radeon_vm_bo_add - add a bo to a specific vm
308*4882a593Smuzhiyun *
309*4882a593Smuzhiyun * @rdev: radeon_device pointer
310*4882a593Smuzhiyun * @vm: requested vm
311*4882a593Smuzhiyun * @bo: radeon buffer object
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * Add @bo into the requested vm (cayman+).
314*4882a593Smuzhiyun * Add @bo to the list of bos associated with the vm
315*4882a593Smuzhiyun * Returns newly added bo_va or NULL for failure
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * Object has to be reserved!
318*4882a593Smuzhiyun */
radeon_vm_bo_add(struct radeon_device * rdev,struct radeon_vm * vm,struct radeon_bo * bo)319*4882a593Smuzhiyun struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
320*4882a593Smuzhiyun struct radeon_vm *vm,
321*4882a593Smuzhiyun struct radeon_bo *bo)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct radeon_bo_va *bo_va;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
326*4882a593Smuzhiyun if (bo_va == NULL)
327*4882a593Smuzhiyun return NULL;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun bo_va->vm = vm;
330*4882a593Smuzhiyun bo_va->bo = bo;
331*4882a593Smuzhiyun bo_va->it.start = 0;
332*4882a593Smuzhiyun bo_va->it.last = 0;
333*4882a593Smuzhiyun bo_va->flags = 0;
334*4882a593Smuzhiyun bo_va->ref_count = 1;
335*4882a593Smuzhiyun INIT_LIST_HEAD(&bo_va->bo_list);
336*4882a593Smuzhiyun INIT_LIST_HEAD(&bo_va->vm_status);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun mutex_lock(&vm->mutex);
339*4882a593Smuzhiyun list_add_tail(&bo_va->bo_list, &bo->va);
340*4882a593Smuzhiyun mutex_unlock(&vm->mutex);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun return bo_va;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /**
346*4882a593Smuzhiyun * radeon_vm_set_pages - helper to call the right asic function
347*4882a593Smuzhiyun *
348*4882a593Smuzhiyun * @rdev: radeon_device pointer
349*4882a593Smuzhiyun * @ib: indirect buffer to fill with commands
350*4882a593Smuzhiyun * @pe: addr of the page entry
351*4882a593Smuzhiyun * @addr: dst addr to write into pe
352*4882a593Smuzhiyun * @count: number of page entries to update
353*4882a593Smuzhiyun * @incr: increase next addr by incr bytes
354*4882a593Smuzhiyun * @flags: hw access flags
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * Traces the parameters and calls the right asic functions
357*4882a593Smuzhiyun * to setup the page table using the DMA.
358*4882a593Smuzhiyun */
radeon_vm_set_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)359*4882a593Smuzhiyun static void radeon_vm_set_pages(struct radeon_device *rdev,
360*4882a593Smuzhiyun struct radeon_ib *ib,
361*4882a593Smuzhiyun uint64_t pe,
362*4882a593Smuzhiyun uint64_t addr, unsigned count,
363*4882a593Smuzhiyun uint32_t incr, uint32_t flags)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun trace_radeon_vm_set_page(pe, addr, count, incr, flags);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
368*4882a593Smuzhiyun uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
369*4882a593Smuzhiyun radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
372*4882a593Smuzhiyun radeon_asic_vm_write_pages(rdev, ib, pe, addr,
373*4882a593Smuzhiyun count, incr, flags);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun } else {
376*4882a593Smuzhiyun radeon_asic_vm_set_pages(rdev, ib, pe, addr,
377*4882a593Smuzhiyun count, incr, flags);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /**
382*4882a593Smuzhiyun * radeon_vm_clear_bo - initially clear the page dir/table
383*4882a593Smuzhiyun *
384*4882a593Smuzhiyun * @rdev: radeon_device pointer
385*4882a593Smuzhiyun * @bo: bo to clear
386*4882a593Smuzhiyun */
radeon_vm_clear_bo(struct radeon_device * rdev,struct radeon_bo * bo)387*4882a593Smuzhiyun static int radeon_vm_clear_bo(struct radeon_device *rdev,
388*4882a593Smuzhiyun struct radeon_bo *bo)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun struct ttm_operation_ctx ctx = { true, false };
391*4882a593Smuzhiyun struct radeon_ib ib;
392*4882a593Smuzhiyun unsigned entries;
393*4882a593Smuzhiyun uint64_t addr;
394*4882a593Smuzhiyun int r;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun r = radeon_bo_reserve(bo, false);
397*4882a593Smuzhiyun if (r)
398*4882a593Smuzhiyun return r;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
401*4882a593Smuzhiyun if (r)
402*4882a593Smuzhiyun goto error_unreserve;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun addr = radeon_bo_gpu_offset(bo);
405*4882a593Smuzhiyun entries = radeon_bo_size(bo) / 8;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
408*4882a593Smuzhiyun if (r)
409*4882a593Smuzhiyun goto error_unreserve;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun ib.length_dw = 0;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
414*4882a593Smuzhiyun radeon_asic_vm_pad_ib(rdev, &ib);
415*4882a593Smuzhiyun WARN_ON(ib.length_dw > 64);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun r = radeon_ib_schedule(rdev, &ib, NULL, false);
418*4882a593Smuzhiyun if (r)
419*4882a593Smuzhiyun goto error_free;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun ib.fence->is_vm_update = true;
422*4882a593Smuzhiyun radeon_bo_fence(bo, ib.fence, false);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun error_free:
425*4882a593Smuzhiyun radeon_ib_free(rdev, &ib);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun error_unreserve:
428*4882a593Smuzhiyun radeon_bo_unreserve(bo);
429*4882a593Smuzhiyun return r;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /**
433*4882a593Smuzhiyun * radeon_vm_bo_set_addr - set bos virtual address inside a vm
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * @rdev: radeon_device pointer
436*4882a593Smuzhiyun * @bo_va: bo_va to store the address
437*4882a593Smuzhiyun * @soffset: requested offset of the buffer in the VM address space
438*4882a593Smuzhiyun * @flags: attributes of pages (read/write/valid/etc.)
439*4882a593Smuzhiyun *
440*4882a593Smuzhiyun * Set offset of @bo_va (cayman+).
441*4882a593Smuzhiyun * Validate and set the offset requested within the vm address space.
442*4882a593Smuzhiyun * Returns 0 for success, error for failure.
443*4882a593Smuzhiyun *
444*4882a593Smuzhiyun * Object has to be reserved and gets unreserved by this function!
445*4882a593Smuzhiyun */
radeon_vm_bo_set_addr(struct radeon_device * rdev,struct radeon_bo_va * bo_va,uint64_t soffset,uint32_t flags)446*4882a593Smuzhiyun int radeon_vm_bo_set_addr(struct radeon_device *rdev,
447*4882a593Smuzhiyun struct radeon_bo_va *bo_va,
448*4882a593Smuzhiyun uint64_t soffset,
449*4882a593Smuzhiyun uint32_t flags)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun uint64_t size = radeon_bo_size(bo_va->bo);
452*4882a593Smuzhiyun struct radeon_vm *vm = bo_va->vm;
453*4882a593Smuzhiyun unsigned last_pfn, pt_idx;
454*4882a593Smuzhiyun uint64_t eoffset;
455*4882a593Smuzhiyun int r;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (soffset) {
458*4882a593Smuzhiyun /* make sure object fit at this offset */
459*4882a593Smuzhiyun eoffset = soffset + size - 1;
460*4882a593Smuzhiyun if (soffset >= eoffset) {
461*4882a593Smuzhiyun r = -EINVAL;
462*4882a593Smuzhiyun goto error_unreserve;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
466*4882a593Smuzhiyun if (last_pfn >= rdev->vm_manager.max_pfn) {
467*4882a593Smuzhiyun dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
468*4882a593Smuzhiyun last_pfn, rdev->vm_manager.max_pfn);
469*4882a593Smuzhiyun r = -EINVAL;
470*4882a593Smuzhiyun goto error_unreserve;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun } else {
474*4882a593Smuzhiyun eoffset = last_pfn = 0;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun mutex_lock(&vm->mutex);
478*4882a593Smuzhiyun soffset /= RADEON_GPU_PAGE_SIZE;
479*4882a593Smuzhiyun eoffset /= RADEON_GPU_PAGE_SIZE;
480*4882a593Smuzhiyun if (soffset || eoffset) {
481*4882a593Smuzhiyun struct interval_tree_node *it;
482*4882a593Smuzhiyun it = interval_tree_iter_first(&vm->va, soffset, eoffset);
483*4882a593Smuzhiyun if (it && it != &bo_va->it) {
484*4882a593Smuzhiyun struct radeon_bo_va *tmp;
485*4882a593Smuzhiyun tmp = container_of(it, struct radeon_bo_va, it);
486*4882a593Smuzhiyun /* bo and tmp overlap, invalid offset */
487*4882a593Smuzhiyun dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
488*4882a593Smuzhiyun "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
489*4882a593Smuzhiyun soffset, tmp->bo, tmp->it.start, tmp->it.last);
490*4882a593Smuzhiyun mutex_unlock(&vm->mutex);
491*4882a593Smuzhiyun r = -EINVAL;
492*4882a593Smuzhiyun goto error_unreserve;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun if (bo_va->it.start || bo_va->it.last) {
497*4882a593Smuzhiyun /* add a clone of the bo_va to clear the old address */
498*4882a593Smuzhiyun struct radeon_bo_va *tmp;
499*4882a593Smuzhiyun tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
500*4882a593Smuzhiyun if (!tmp) {
501*4882a593Smuzhiyun mutex_unlock(&vm->mutex);
502*4882a593Smuzhiyun r = -ENOMEM;
503*4882a593Smuzhiyun goto error_unreserve;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun tmp->it.start = bo_va->it.start;
506*4882a593Smuzhiyun tmp->it.last = bo_va->it.last;
507*4882a593Smuzhiyun tmp->vm = vm;
508*4882a593Smuzhiyun tmp->bo = radeon_bo_ref(bo_va->bo);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun interval_tree_remove(&bo_va->it, &vm->va);
511*4882a593Smuzhiyun spin_lock(&vm->status_lock);
512*4882a593Smuzhiyun bo_va->it.start = 0;
513*4882a593Smuzhiyun bo_va->it.last = 0;
514*4882a593Smuzhiyun list_del_init(&bo_va->vm_status);
515*4882a593Smuzhiyun list_add(&tmp->vm_status, &vm->freed);
516*4882a593Smuzhiyun spin_unlock(&vm->status_lock);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (soffset || eoffset) {
520*4882a593Smuzhiyun spin_lock(&vm->status_lock);
521*4882a593Smuzhiyun bo_va->it.start = soffset;
522*4882a593Smuzhiyun bo_va->it.last = eoffset;
523*4882a593Smuzhiyun list_add(&bo_va->vm_status, &vm->cleared);
524*4882a593Smuzhiyun spin_unlock(&vm->status_lock);
525*4882a593Smuzhiyun interval_tree_insert(&bo_va->it, &vm->va);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun bo_va->flags = flags;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun soffset >>= radeon_vm_block_size;
531*4882a593Smuzhiyun eoffset >>= radeon_vm_block_size;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (eoffset > vm->max_pde_used)
536*4882a593Smuzhiyun vm->max_pde_used = eoffset;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun radeon_bo_unreserve(bo_va->bo);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /* walk over the address space and allocate the page tables */
541*4882a593Smuzhiyun for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
542*4882a593Smuzhiyun struct radeon_bo *pt;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (vm->page_tables[pt_idx].bo)
545*4882a593Smuzhiyun continue;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* drop mutex to allocate and clear page table */
548*4882a593Smuzhiyun mutex_unlock(&vm->mutex);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
551*4882a593Smuzhiyun RADEON_GPU_PAGE_SIZE, true,
552*4882a593Smuzhiyun RADEON_GEM_DOMAIN_VRAM, 0,
553*4882a593Smuzhiyun NULL, NULL, &pt);
554*4882a593Smuzhiyun if (r)
555*4882a593Smuzhiyun return r;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun r = radeon_vm_clear_bo(rdev, pt);
558*4882a593Smuzhiyun if (r) {
559*4882a593Smuzhiyun radeon_bo_unref(&pt);
560*4882a593Smuzhiyun return r;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* aquire mutex again */
564*4882a593Smuzhiyun mutex_lock(&vm->mutex);
565*4882a593Smuzhiyun if (vm->page_tables[pt_idx].bo) {
566*4882a593Smuzhiyun /* someone else allocated the pt in the meantime */
567*4882a593Smuzhiyun mutex_unlock(&vm->mutex);
568*4882a593Smuzhiyun radeon_bo_unref(&pt);
569*4882a593Smuzhiyun mutex_lock(&vm->mutex);
570*4882a593Smuzhiyun continue;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun vm->page_tables[pt_idx].addr = 0;
574*4882a593Smuzhiyun vm->page_tables[pt_idx].bo = pt;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun mutex_unlock(&vm->mutex);
578*4882a593Smuzhiyun return 0;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun error_unreserve:
581*4882a593Smuzhiyun radeon_bo_unreserve(bo_va->bo);
582*4882a593Smuzhiyun return r;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /**
586*4882a593Smuzhiyun * radeon_vm_map_gart - get the physical address of a gart page
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * @rdev: radeon_device pointer
589*4882a593Smuzhiyun * @addr: the unmapped addr
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * Look up the physical address of the page that the pte resolves
592*4882a593Smuzhiyun * to (cayman+).
593*4882a593Smuzhiyun * Returns the physical address of the page.
594*4882a593Smuzhiyun */
radeon_vm_map_gart(struct radeon_device * rdev,uint64_t addr)595*4882a593Smuzhiyun uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun uint64_t result;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /* page table offset */
600*4882a593Smuzhiyun result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
601*4882a593Smuzhiyun result &= ~RADEON_GPU_PAGE_MASK;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun return result;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /**
607*4882a593Smuzhiyun * radeon_vm_page_flags - translate page flags to what the hw uses
608*4882a593Smuzhiyun *
609*4882a593Smuzhiyun * @flags: flags comming from userspace
610*4882a593Smuzhiyun *
611*4882a593Smuzhiyun * Translate the flags the userspace ABI uses to hw flags.
612*4882a593Smuzhiyun */
radeon_vm_page_flags(uint32_t flags)613*4882a593Smuzhiyun static uint32_t radeon_vm_page_flags(uint32_t flags)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun uint32_t hw_flags = 0;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
618*4882a593Smuzhiyun hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
619*4882a593Smuzhiyun hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
620*4882a593Smuzhiyun if (flags & RADEON_VM_PAGE_SYSTEM) {
621*4882a593Smuzhiyun hw_flags |= R600_PTE_SYSTEM;
622*4882a593Smuzhiyun hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun return hw_flags;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /**
628*4882a593Smuzhiyun * radeon_vm_update_pdes - make sure that page directory is valid
629*4882a593Smuzhiyun *
630*4882a593Smuzhiyun * @rdev: radeon_device pointer
631*4882a593Smuzhiyun * @vm: requested vm
632*4882a593Smuzhiyun * @start: start of GPU address range
633*4882a593Smuzhiyun * @end: end of GPU address range
634*4882a593Smuzhiyun *
635*4882a593Smuzhiyun * Allocates new page tables if necessary
636*4882a593Smuzhiyun * and updates the page directory (cayman+).
637*4882a593Smuzhiyun * Returns 0 for success, error for failure.
638*4882a593Smuzhiyun *
639*4882a593Smuzhiyun * Global and local mutex must be locked!
640*4882a593Smuzhiyun */
radeon_vm_update_page_directory(struct radeon_device * rdev,struct radeon_vm * vm)641*4882a593Smuzhiyun int radeon_vm_update_page_directory(struct radeon_device *rdev,
642*4882a593Smuzhiyun struct radeon_vm *vm)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct radeon_bo *pd = vm->page_directory;
645*4882a593Smuzhiyun uint64_t pd_addr = radeon_bo_gpu_offset(pd);
646*4882a593Smuzhiyun uint32_t incr = RADEON_VM_PTE_COUNT * 8;
647*4882a593Smuzhiyun uint64_t last_pde = ~0, last_pt = ~0;
648*4882a593Smuzhiyun unsigned count = 0, pt_idx, ndw;
649*4882a593Smuzhiyun struct radeon_ib ib;
650*4882a593Smuzhiyun int r;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /* padding, etc. */
653*4882a593Smuzhiyun ndw = 64;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* assume the worst case */
656*4882a593Smuzhiyun ndw += vm->max_pde_used * 6;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* update too big for an IB */
659*4882a593Smuzhiyun if (ndw > 0xfffff)
660*4882a593Smuzhiyun return -ENOMEM;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
663*4882a593Smuzhiyun if (r)
664*4882a593Smuzhiyun return r;
665*4882a593Smuzhiyun ib.length_dw = 0;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* walk over the address space and update the page directory */
668*4882a593Smuzhiyun for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
669*4882a593Smuzhiyun struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
670*4882a593Smuzhiyun uint64_t pde, pt;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (bo == NULL)
673*4882a593Smuzhiyun continue;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun pt = radeon_bo_gpu_offset(bo);
676*4882a593Smuzhiyun if (vm->page_tables[pt_idx].addr == pt)
677*4882a593Smuzhiyun continue;
678*4882a593Smuzhiyun vm->page_tables[pt_idx].addr = pt;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun pde = pd_addr + pt_idx * 8;
681*4882a593Smuzhiyun if (((last_pde + 8 * count) != pde) ||
682*4882a593Smuzhiyun ((last_pt + incr * count) != pt)) {
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (count) {
685*4882a593Smuzhiyun radeon_vm_set_pages(rdev, &ib, last_pde,
686*4882a593Smuzhiyun last_pt, count, incr,
687*4882a593Smuzhiyun R600_PTE_VALID);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun count = 1;
691*4882a593Smuzhiyun last_pde = pde;
692*4882a593Smuzhiyun last_pt = pt;
693*4882a593Smuzhiyun } else {
694*4882a593Smuzhiyun ++count;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun if (count)
699*4882a593Smuzhiyun radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
700*4882a593Smuzhiyun incr, R600_PTE_VALID);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (ib.length_dw != 0) {
703*4882a593Smuzhiyun radeon_asic_vm_pad_ib(rdev, &ib);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun radeon_sync_resv(rdev, &ib.sync, pd->tbo.base.resv, true);
706*4882a593Smuzhiyun WARN_ON(ib.length_dw > ndw);
707*4882a593Smuzhiyun r = radeon_ib_schedule(rdev, &ib, NULL, false);
708*4882a593Smuzhiyun if (r) {
709*4882a593Smuzhiyun radeon_ib_free(rdev, &ib);
710*4882a593Smuzhiyun return r;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun ib.fence->is_vm_update = true;
713*4882a593Smuzhiyun radeon_bo_fence(pd, ib.fence, false);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun radeon_ib_free(rdev, &ib);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun return 0;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /**
721*4882a593Smuzhiyun * radeon_vm_frag_ptes - add fragment information to PTEs
722*4882a593Smuzhiyun *
723*4882a593Smuzhiyun * @rdev: radeon_device pointer
724*4882a593Smuzhiyun * @ib: IB for the update
725*4882a593Smuzhiyun * @pe_start: first PTE to handle
726*4882a593Smuzhiyun * @pe_end: last PTE to handle
727*4882a593Smuzhiyun * @addr: addr those PTEs should point to
728*4882a593Smuzhiyun * @flags: hw mapping flags
729*4882a593Smuzhiyun *
730*4882a593Smuzhiyun * Global and local mutex must be locked!
731*4882a593Smuzhiyun */
radeon_vm_frag_ptes(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe_start,uint64_t pe_end,uint64_t addr,uint32_t flags)732*4882a593Smuzhiyun static void radeon_vm_frag_ptes(struct radeon_device *rdev,
733*4882a593Smuzhiyun struct radeon_ib *ib,
734*4882a593Smuzhiyun uint64_t pe_start, uint64_t pe_end,
735*4882a593Smuzhiyun uint64_t addr, uint32_t flags)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun /**
738*4882a593Smuzhiyun * The MC L1 TLB supports variable sized pages, based on a fragment
739*4882a593Smuzhiyun * field in the PTE. When this field is set to a non-zero value, page
740*4882a593Smuzhiyun * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
741*4882a593Smuzhiyun * flags are considered valid for all PTEs within the fragment range
742*4882a593Smuzhiyun * and corresponding mappings are assumed to be physically contiguous.
743*4882a593Smuzhiyun *
744*4882a593Smuzhiyun * The L1 TLB can store a single PTE for the whole fragment,
745*4882a593Smuzhiyun * significantly increasing the space available for translation
746*4882a593Smuzhiyun * caching. This leads to large improvements in throughput when the
747*4882a593Smuzhiyun * TLB is under pressure.
748*4882a593Smuzhiyun *
749*4882a593Smuzhiyun * The L2 TLB distributes small and large fragments into two
750*4882a593Smuzhiyun * asymmetric partitions. The large fragment cache is significantly
751*4882a593Smuzhiyun * larger. Thus, we try to use large fragments wherever possible.
752*4882a593Smuzhiyun * Userspace can support this by aligning virtual base address and
753*4882a593Smuzhiyun * allocation size to the fragment size.
754*4882a593Smuzhiyun */
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /* NI is optimized for 256KB fragments, SI and newer for 64KB */
757*4882a593Smuzhiyun uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
758*4882a593Smuzhiyun (rdev->family == CHIP_ARUBA)) ?
759*4882a593Smuzhiyun R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
760*4882a593Smuzhiyun uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
761*4882a593Smuzhiyun (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun uint64_t frag_start = ALIGN(pe_start, frag_align);
764*4882a593Smuzhiyun uint64_t frag_end = pe_end & ~(frag_align - 1);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun unsigned count;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /* system pages are non continuously */
769*4882a593Smuzhiyun if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
770*4882a593Smuzhiyun (frag_start >= frag_end)) {
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun count = (pe_end - pe_start) / 8;
773*4882a593Smuzhiyun radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
774*4882a593Smuzhiyun RADEON_GPU_PAGE_SIZE, flags);
775*4882a593Smuzhiyun return;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun /* handle the 4K area at the beginning */
779*4882a593Smuzhiyun if (pe_start != frag_start) {
780*4882a593Smuzhiyun count = (frag_start - pe_start) / 8;
781*4882a593Smuzhiyun radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
782*4882a593Smuzhiyun RADEON_GPU_PAGE_SIZE, flags);
783*4882a593Smuzhiyun addr += RADEON_GPU_PAGE_SIZE * count;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun /* handle the area in the middle */
787*4882a593Smuzhiyun count = (frag_end - frag_start) / 8;
788*4882a593Smuzhiyun radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
789*4882a593Smuzhiyun RADEON_GPU_PAGE_SIZE, flags | frag_flags);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /* handle the 4K area at the end */
792*4882a593Smuzhiyun if (frag_end != pe_end) {
793*4882a593Smuzhiyun addr += RADEON_GPU_PAGE_SIZE * count;
794*4882a593Smuzhiyun count = (pe_end - frag_end) / 8;
795*4882a593Smuzhiyun radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
796*4882a593Smuzhiyun RADEON_GPU_PAGE_SIZE, flags);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /**
801*4882a593Smuzhiyun * radeon_vm_update_ptes - make sure that page tables are valid
802*4882a593Smuzhiyun *
803*4882a593Smuzhiyun * @rdev: radeon_device pointer
804*4882a593Smuzhiyun * @vm: requested vm
805*4882a593Smuzhiyun * @start: start of GPU address range
806*4882a593Smuzhiyun * @end: end of GPU address range
807*4882a593Smuzhiyun * @dst: destination address to map to
808*4882a593Smuzhiyun * @flags: mapping flags
809*4882a593Smuzhiyun *
810*4882a593Smuzhiyun * Update the page tables in the range @start - @end (cayman+).
811*4882a593Smuzhiyun *
812*4882a593Smuzhiyun * Global and local mutex must be locked!
813*4882a593Smuzhiyun */
radeon_vm_update_ptes(struct radeon_device * rdev,struct radeon_vm * vm,struct radeon_ib * ib,uint64_t start,uint64_t end,uint64_t dst,uint32_t flags)814*4882a593Smuzhiyun static int radeon_vm_update_ptes(struct radeon_device *rdev,
815*4882a593Smuzhiyun struct radeon_vm *vm,
816*4882a593Smuzhiyun struct radeon_ib *ib,
817*4882a593Smuzhiyun uint64_t start, uint64_t end,
818*4882a593Smuzhiyun uint64_t dst, uint32_t flags)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun uint64_t mask = RADEON_VM_PTE_COUNT - 1;
821*4882a593Smuzhiyun uint64_t last_pte = ~0, last_dst = ~0;
822*4882a593Smuzhiyun unsigned count = 0;
823*4882a593Smuzhiyun uint64_t addr;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /* walk over the address space and update the page tables */
826*4882a593Smuzhiyun for (addr = start; addr < end; ) {
827*4882a593Smuzhiyun uint64_t pt_idx = addr >> radeon_vm_block_size;
828*4882a593Smuzhiyun struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
829*4882a593Smuzhiyun unsigned nptes;
830*4882a593Smuzhiyun uint64_t pte;
831*4882a593Smuzhiyun int r;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
834*4882a593Smuzhiyun r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
835*4882a593Smuzhiyun if (r)
836*4882a593Smuzhiyun return r;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun if ((addr & ~mask) == (end & ~mask))
839*4882a593Smuzhiyun nptes = end - addr;
840*4882a593Smuzhiyun else
841*4882a593Smuzhiyun nptes = RADEON_VM_PTE_COUNT - (addr & mask);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun pte = radeon_bo_gpu_offset(pt);
844*4882a593Smuzhiyun pte += (addr & mask) * 8;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if ((last_pte + 8 * count) != pte) {
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun if (count) {
849*4882a593Smuzhiyun radeon_vm_frag_ptes(rdev, ib, last_pte,
850*4882a593Smuzhiyun last_pte + 8 * count,
851*4882a593Smuzhiyun last_dst, flags);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun count = nptes;
855*4882a593Smuzhiyun last_pte = pte;
856*4882a593Smuzhiyun last_dst = dst;
857*4882a593Smuzhiyun } else {
858*4882a593Smuzhiyun count += nptes;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun addr += nptes;
862*4882a593Smuzhiyun dst += nptes * RADEON_GPU_PAGE_SIZE;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (count) {
866*4882a593Smuzhiyun radeon_vm_frag_ptes(rdev, ib, last_pte,
867*4882a593Smuzhiyun last_pte + 8 * count,
868*4882a593Smuzhiyun last_dst, flags);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun return 0;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /**
875*4882a593Smuzhiyun * radeon_vm_fence_pts - fence page tables after an update
876*4882a593Smuzhiyun *
877*4882a593Smuzhiyun * @vm: requested vm
878*4882a593Smuzhiyun * @start: start of GPU address range
879*4882a593Smuzhiyun * @end: end of GPU address range
880*4882a593Smuzhiyun * @fence: fence to use
881*4882a593Smuzhiyun *
882*4882a593Smuzhiyun * Fence the page tables in the range @start - @end (cayman+).
883*4882a593Smuzhiyun *
884*4882a593Smuzhiyun * Global and local mutex must be locked!
885*4882a593Smuzhiyun */
radeon_vm_fence_pts(struct radeon_vm * vm,uint64_t start,uint64_t end,struct radeon_fence * fence)886*4882a593Smuzhiyun static void radeon_vm_fence_pts(struct radeon_vm *vm,
887*4882a593Smuzhiyun uint64_t start, uint64_t end,
888*4882a593Smuzhiyun struct radeon_fence *fence)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun unsigned i;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun start >>= radeon_vm_block_size;
893*4882a593Smuzhiyun end = (end - 1) >> radeon_vm_block_size;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun for (i = start; i <= end; ++i)
896*4882a593Smuzhiyun radeon_bo_fence(vm->page_tables[i].bo, fence, true);
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /**
900*4882a593Smuzhiyun * radeon_vm_bo_update - map a bo into the vm page table
901*4882a593Smuzhiyun *
902*4882a593Smuzhiyun * @rdev: radeon_device pointer
903*4882a593Smuzhiyun * @vm: requested vm
904*4882a593Smuzhiyun * @bo: radeon buffer object
905*4882a593Smuzhiyun * @mem: ttm mem
906*4882a593Smuzhiyun *
907*4882a593Smuzhiyun * Fill in the page table entries for @bo (cayman+).
908*4882a593Smuzhiyun * Returns 0 for success, -EINVAL for failure.
909*4882a593Smuzhiyun *
910*4882a593Smuzhiyun * Object have to be reserved and mutex must be locked!
911*4882a593Smuzhiyun */
radeon_vm_bo_update(struct radeon_device * rdev,struct radeon_bo_va * bo_va,struct ttm_resource * mem)912*4882a593Smuzhiyun int radeon_vm_bo_update(struct radeon_device *rdev,
913*4882a593Smuzhiyun struct radeon_bo_va *bo_va,
914*4882a593Smuzhiyun struct ttm_resource *mem)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun struct radeon_vm *vm = bo_va->vm;
917*4882a593Smuzhiyun struct radeon_ib ib;
918*4882a593Smuzhiyun unsigned nptes, ncmds, ndw;
919*4882a593Smuzhiyun uint64_t addr;
920*4882a593Smuzhiyun uint32_t flags;
921*4882a593Smuzhiyun int r;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun if (!bo_va->it.start) {
924*4882a593Smuzhiyun dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
925*4882a593Smuzhiyun bo_va->bo, vm);
926*4882a593Smuzhiyun return -EINVAL;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun spin_lock(&vm->status_lock);
930*4882a593Smuzhiyun if (mem) {
931*4882a593Smuzhiyun if (list_empty(&bo_va->vm_status)) {
932*4882a593Smuzhiyun spin_unlock(&vm->status_lock);
933*4882a593Smuzhiyun return 0;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun list_del_init(&bo_va->vm_status);
936*4882a593Smuzhiyun } else {
937*4882a593Smuzhiyun list_del(&bo_va->vm_status);
938*4882a593Smuzhiyun list_add(&bo_va->vm_status, &vm->cleared);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun spin_unlock(&vm->status_lock);
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun bo_va->flags &= ~RADEON_VM_PAGE_VALID;
943*4882a593Smuzhiyun bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
944*4882a593Smuzhiyun bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
945*4882a593Smuzhiyun if (bo_va->bo && radeon_ttm_tt_is_readonly(rdev, bo_va->bo->tbo.ttm))
946*4882a593Smuzhiyun bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun if (mem) {
949*4882a593Smuzhiyun addr = (u64)mem->start << PAGE_SHIFT;
950*4882a593Smuzhiyun if (mem->mem_type != TTM_PL_SYSTEM)
951*4882a593Smuzhiyun bo_va->flags |= RADEON_VM_PAGE_VALID;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun if (mem->mem_type == TTM_PL_TT) {
954*4882a593Smuzhiyun bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
955*4882a593Smuzhiyun if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
956*4882a593Smuzhiyun bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun } else {
959*4882a593Smuzhiyun addr += rdev->vm_manager.vram_base_offset;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun } else {
962*4882a593Smuzhiyun addr = 0;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun trace_radeon_vm_bo_update(bo_va);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun nptes = bo_va->it.last - bo_va->it.start + 1;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /* reserve space for one command every (1 << BLOCK_SIZE) entries
970*4882a593Smuzhiyun or 2k dwords (whatever is smaller) */
971*4882a593Smuzhiyun ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun /* padding, etc. */
974*4882a593Smuzhiyun ndw = 64;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun flags = radeon_vm_page_flags(bo_va->flags);
977*4882a593Smuzhiyun if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
978*4882a593Smuzhiyun /* only copy commands needed */
979*4882a593Smuzhiyun ndw += ncmds * 7;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun } else if (flags & R600_PTE_SYSTEM) {
982*4882a593Smuzhiyun /* header for write data commands */
983*4882a593Smuzhiyun ndw += ncmds * 4;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun /* body of write data command */
986*4882a593Smuzhiyun ndw += nptes * 2;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun } else {
989*4882a593Smuzhiyun /* set page commands needed */
990*4882a593Smuzhiyun ndw += ncmds * 10;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun /* two extra commands for begin/end of fragment */
993*4882a593Smuzhiyun ndw += 2 * 10;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /* update too big for an IB */
997*4882a593Smuzhiyun if (ndw > 0xfffff)
998*4882a593Smuzhiyun return -ENOMEM;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
1001*4882a593Smuzhiyun if (r)
1002*4882a593Smuzhiyun return r;
1003*4882a593Smuzhiyun ib.length_dw = 0;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) {
1006*4882a593Smuzhiyun unsigned i;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun for (i = 0; i < RADEON_NUM_RINGS; ++i)
1009*4882a593Smuzhiyun radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
1013*4882a593Smuzhiyun bo_va->it.last + 1, addr,
1014*4882a593Smuzhiyun radeon_vm_page_flags(bo_va->flags));
1015*4882a593Smuzhiyun if (r) {
1016*4882a593Smuzhiyun radeon_ib_free(rdev, &ib);
1017*4882a593Smuzhiyun return r;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun radeon_asic_vm_pad_ib(rdev, &ib);
1021*4882a593Smuzhiyun WARN_ON(ib.length_dw > ndw);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun r = radeon_ib_schedule(rdev, &ib, NULL, false);
1024*4882a593Smuzhiyun if (r) {
1025*4882a593Smuzhiyun radeon_ib_free(rdev, &ib);
1026*4882a593Smuzhiyun return r;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun ib.fence->is_vm_update = true;
1029*4882a593Smuzhiyun radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
1030*4882a593Smuzhiyun radeon_fence_unref(&bo_va->last_pt_update);
1031*4882a593Smuzhiyun bo_va->last_pt_update = radeon_fence_ref(ib.fence);
1032*4882a593Smuzhiyun radeon_ib_free(rdev, &ib);
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun return 0;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun /**
1038*4882a593Smuzhiyun * radeon_vm_clear_freed - clear freed BOs in the PT
1039*4882a593Smuzhiyun *
1040*4882a593Smuzhiyun * @rdev: radeon_device pointer
1041*4882a593Smuzhiyun * @vm: requested vm
1042*4882a593Smuzhiyun *
1043*4882a593Smuzhiyun * Make sure all freed BOs are cleared in the PT.
1044*4882a593Smuzhiyun * Returns 0 for success.
1045*4882a593Smuzhiyun *
1046*4882a593Smuzhiyun * PTs have to be reserved and mutex must be locked!
1047*4882a593Smuzhiyun */
radeon_vm_clear_freed(struct radeon_device * rdev,struct radeon_vm * vm)1048*4882a593Smuzhiyun int radeon_vm_clear_freed(struct radeon_device *rdev,
1049*4882a593Smuzhiyun struct radeon_vm *vm)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun struct radeon_bo_va *bo_va;
1052*4882a593Smuzhiyun int r = 0;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun spin_lock(&vm->status_lock);
1055*4882a593Smuzhiyun while (!list_empty(&vm->freed)) {
1056*4882a593Smuzhiyun bo_va = list_first_entry(&vm->freed,
1057*4882a593Smuzhiyun struct radeon_bo_va, vm_status);
1058*4882a593Smuzhiyun spin_unlock(&vm->status_lock);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun r = radeon_vm_bo_update(rdev, bo_va, NULL);
1061*4882a593Smuzhiyun radeon_bo_unref(&bo_va->bo);
1062*4882a593Smuzhiyun radeon_fence_unref(&bo_va->last_pt_update);
1063*4882a593Smuzhiyun spin_lock(&vm->status_lock);
1064*4882a593Smuzhiyun list_del(&bo_va->vm_status);
1065*4882a593Smuzhiyun kfree(bo_va);
1066*4882a593Smuzhiyun if (r)
1067*4882a593Smuzhiyun break;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun spin_unlock(&vm->status_lock);
1071*4882a593Smuzhiyun return r;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun /**
1076*4882a593Smuzhiyun * radeon_vm_clear_invalids - clear invalidated BOs in the PT
1077*4882a593Smuzhiyun *
1078*4882a593Smuzhiyun * @rdev: radeon_device pointer
1079*4882a593Smuzhiyun * @vm: requested vm
1080*4882a593Smuzhiyun *
1081*4882a593Smuzhiyun * Make sure all invalidated BOs are cleared in the PT.
1082*4882a593Smuzhiyun * Returns 0 for success.
1083*4882a593Smuzhiyun *
1084*4882a593Smuzhiyun * PTs have to be reserved and mutex must be locked!
1085*4882a593Smuzhiyun */
radeon_vm_clear_invalids(struct radeon_device * rdev,struct radeon_vm * vm)1086*4882a593Smuzhiyun int radeon_vm_clear_invalids(struct radeon_device *rdev,
1087*4882a593Smuzhiyun struct radeon_vm *vm)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun struct radeon_bo_va *bo_va;
1090*4882a593Smuzhiyun int r;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun spin_lock(&vm->status_lock);
1093*4882a593Smuzhiyun while (!list_empty(&vm->invalidated)) {
1094*4882a593Smuzhiyun bo_va = list_first_entry(&vm->invalidated,
1095*4882a593Smuzhiyun struct radeon_bo_va, vm_status);
1096*4882a593Smuzhiyun spin_unlock(&vm->status_lock);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun r = radeon_vm_bo_update(rdev, bo_va, NULL);
1099*4882a593Smuzhiyun if (r)
1100*4882a593Smuzhiyun return r;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun spin_lock(&vm->status_lock);
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun spin_unlock(&vm->status_lock);
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun return 0;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun /**
1110*4882a593Smuzhiyun * radeon_vm_bo_rmv - remove a bo to a specific vm
1111*4882a593Smuzhiyun *
1112*4882a593Smuzhiyun * @rdev: radeon_device pointer
1113*4882a593Smuzhiyun * @bo_va: requested bo_va
1114*4882a593Smuzhiyun *
1115*4882a593Smuzhiyun * Remove @bo_va->bo from the requested vm (cayman+).
1116*4882a593Smuzhiyun *
1117*4882a593Smuzhiyun * Object have to be reserved!
1118*4882a593Smuzhiyun */
radeon_vm_bo_rmv(struct radeon_device * rdev,struct radeon_bo_va * bo_va)1119*4882a593Smuzhiyun void radeon_vm_bo_rmv(struct radeon_device *rdev,
1120*4882a593Smuzhiyun struct radeon_bo_va *bo_va)
1121*4882a593Smuzhiyun {
1122*4882a593Smuzhiyun struct radeon_vm *vm = bo_va->vm;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun list_del(&bo_va->bo_list);
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun mutex_lock(&vm->mutex);
1127*4882a593Smuzhiyun if (bo_va->it.start || bo_va->it.last)
1128*4882a593Smuzhiyun interval_tree_remove(&bo_va->it, &vm->va);
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun spin_lock(&vm->status_lock);
1131*4882a593Smuzhiyun list_del(&bo_va->vm_status);
1132*4882a593Smuzhiyun if (bo_va->it.start || bo_va->it.last) {
1133*4882a593Smuzhiyun bo_va->bo = radeon_bo_ref(bo_va->bo);
1134*4882a593Smuzhiyun list_add(&bo_va->vm_status, &vm->freed);
1135*4882a593Smuzhiyun } else {
1136*4882a593Smuzhiyun radeon_fence_unref(&bo_va->last_pt_update);
1137*4882a593Smuzhiyun kfree(bo_va);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun spin_unlock(&vm->status_lock);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun mutex_unlock(&vm->mutex);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /**
1145*4882a593Smuzhiyun * radeon_vm_bo_invalidate - mark the bo as invalid
1146*4882a593Smuzhiyun *
1147*4882a593Smuzhiyun * @rdev: radeon_device pointer
1148*4882a593Smuzhiyun * @vm: requested vm
1149*4882a593Smuzhiyun * @bo: radeon buffer object
1150*4882a593Smuzhiyun *
1151*4882a593Smuzhiyun * Mark @bo as invalid (cayman+).
1152*4882a593Smuzhiyun */
radeon_vm_bo_invalidate(struct radeon_device * rdev,struct radeon_bo * bo)1153*4882a593Smuzhiyun void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1154*4882a593Smuzhiyun struct radeon_bo *bo)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun struct radeon_bo_va *bo_va;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun list_for_each_entry(bo_va, &bo->va, bo_list) {
1159*4882a593Smuzhiyun spin_lock(&bo_va->vm->status_lock);
1160*4882a593Smuzhiyun if (list_empty(&bo_va->vm_status) &&
1161*4882a593Smuzhiyun (bo_va->it.start || bo_va->it.last))
1162*4882a593Smuzhiyun list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1163*4882a593Smuzhiyun spin_unlock(&bo_va->vm->status_lock);
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /**
1168*4882a593Smuzhiyun * radeon_vm_init - initialize a vm instance
1169*4882a593Smuzhiyun *
1170*4882a593Smuzhiyun * @rdev: radeon_device pointer
1171*4882a593Smuzhiyun * @vm: requested vm
1172*4882a593Smuzhiyun *
1173*4882a593Smuzhiyun * Init @vm fields (cayman+).
1174*4882a593Smuzhiyun */
radeon_vm_init(struct radeon_device * rdev,struct radeon_vm * vm)1175*4882a593Smuzhiyun int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
1178*4882a593Smuzhiyun RADEON_VM_PTE_COUNT * 8);
1179*4882a593Smuzhiyun unsigned pd_size, pd_entries, pts_size;
1180*4882a593Smuzhiyun int i, r;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun vm->ib_bo_va = NULL;
1183*4882a593Smuzhiyun for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1184*4882a593Smuzhiyun vm->ids[i].id = 0;
1185*4882a593Smuzhiyun vm->ids[i].flushed_updates = NULL;
1186*4882a593Smuzhiyun vm->ids[i].last_id_use = NULL;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun mutex_init(&vm->mutex);
1189*4882a593Smuzhiyun vm->va = RB_ROOT_CACHED;
1190*4882a593Smuzhiyun spin_lock_init(&vm->status_lock);
1191*4882a593Smuzhiyun INIT_LIST_HEAD(&vm->invalidated);
1192*4882a593Smuzhiyun INIT_LIST_HEAD(&vm->freed);
1193*4882a593Smuzhiyun INIT_LIST_HEAD(&vm->cleared);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun pd_size = radeon_vm_directory_size(rdev);
1196*4882a593Smuzhiyun pd_entries = radeon_vm_num_pdes(rdev);
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun /* allocate page table array */
1199*4882a593Smuzhiyun pts_size = pd_entries * sizeof(struct radeon_vm_pt);
1200*4882a593Smuzhiyun vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1201*4882a593Smuzhiyun if (vm->page_tables == NULL) {
1202*4882a593Smuzhiyun DRM_ERROR("Cannot allocate memory for page table array\n");
1203*4882a593Smuzhiyun return -ENOMEM;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun r = radeon_bo_create(rdev, pd_size, align, true,
1207*4882a593Smuzhiyun RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1208*4882a593Smuzhiyun NULL, &vm->page_directory);
1209*4882a593Smuzhiyun if (r)
1210*4882a593Smuzhiyun return r;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun r = radeon_vm_clear_bo(rdev, vm->page_directory);
1213*4882a593Smuzhiyun if (r) {
1214*4882a593Smuzhiyun radeon_bo_unref(&vm->page_directory);
1215*4882a593Smuzhiyun vm->page_directory = NULL;
1216*4882a593Smuzhiyun return r;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun return 0;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun /**
1223*4882a593Smuzhiyun * radeon_vm_fini - tear down a vm instance
1224*4882a593Smuzhiyun *
1225*4882a593Smuzhiyun * @rdev: radeon_device pointer
1226*4882a593Smuzhiyun * @vm: requested vm
1227*4882a593Smuzhiyun *
1228*4882a593Smuzhiyun * Tear down @vm (cayman+).
1229*4882a593Smuzhiyun * Unbind the VM and remove all bos from the vm bo list
1230*4882a593Smuzhiyun */
radeon_vm_fini(struct radeon_device * rdev,struct radeon_vm * vm)1231*4882a593Smuzhiyun void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1232*4882a593Smuzhiyun {
1233*4882a593Smuzhiyun struct radeon_bo_va *bo_va, *tmp;
1234*4882a593Smuzhiyun int i, r;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (!RB_EMPTY_ROOT(&vm->va.rb_root))
1237*4882a593Smuzhiyun dev_err(rdev->dev, "still active bo inside vm\n");
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun rbtree_postorder_for_each_entry_safe(bo_va, tmp,
1240*4882a593Smuzhiyun &vm->va.rb_root, it.rb) {
1241*4882a593Smuzhiyun interval_tree_remove(&bo_va->it, &vm->va);
1242*4882a593Smuzhiyun r = radeon_bo_reserve(bo_va->bo, false);
1243*4882a593Smuzhiyun if (!r) {
1244*4882a593Smuzhiyun list_del_init(&bo_va->bo_list);
1245*4882a593Smuzhiyun radeon_bo_unreserve(bo_va->bo);
1246*4882a593Smuzhiyun radeon_fence_unref(&bo_va->last_pt_update);
1247*4882a593Smuzhiyun kfree(bo_va);
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
1251*4882a593Smuzhiyun radeon_bo_unref(&bo_va->bo);
1252*4882a593Smuzhiyun radeon_fence_unref(&bo_va->last_pt_update);
1253*4882a593Smuzhiyun kfree(bo_va);
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1257*4882a593Smuzhiyun radeon_bo_unref(&vm->page_tables[i].bo);
1258*4882a593Smuzhiyun kfree(vm->page_tables);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun radeon_bo_unref(&vm->page_directory);
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1263*4882a593Smuzhiyun radeon_fence_unref(&vm->ids[i].flushed_updates);
1264*4882a593Smuzhiyun radeon_fence_unref(&vm->ids[i].last_id_use);
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun mutex_destroy(&vm->mutex);
1268*4882a593Smuzhiyun }
1269