1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun * the following conditions:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun * of the Software.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun **************************************************************************/
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include "vmwgfx_resource_priv.h"
31*4882a593Smuzhiyun #include "vmwgfx_binding.h"
32*4882a593Smuzhiyun #include "vmwgfx_drv.h"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define VMW_RES_EVICT_ERR_COUNT 10
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /**
37*4882a593Smuzhiyun * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38*4882a593Smuzhiyun * @res: The resource
39*4882a593Smuzhiyun */
vmw_resource_mob_attach(struct vmw_resource * res)40*4882a593Smuzhiyun void vmw_resource_mob_attach(struct vmw_resource *res)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun struct vmw_buffer_object *backup = res->backup;
43*4882a593Smuzhiyun struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun dma_resv_assert_held(res->backup->base.base.resv);
46*4882a593Smuzhiyun res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
47*4882a593Smuzhiyun res->func->prio;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun while (*new) {
50*4882a593Smuzhiyun struct vmw_resource *this =
51*4882a593Smuzhiyun container_of(*new, struct vmw_resource, mob_node);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun parent = *new;
54*4882a593Smuzhiyun new = (res->backup_offset < this->backup_offset) ?
55*4882a593Smuzhiyun &((*new)->rb_left) : &((*new)->rb_right);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun rb_link_node(&res->mob_node, parent, new);
59*4882a593Smuzhiyun rb_insert_color(&res->mob_node, &backup->res_tree);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun vmw_bo_prio_add(backup, res->used_prio);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /**
65*4882a593Smuzhiyun * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
66*4882a593Smuzhiyun * @res: The resource
67*4882a593Smuzhiyun */
vmw_resource_mob_detach(struct vmw_resource * res)68*4882a593Smuzhiyun void vmw_resource_mob_detach(struct vmw_resource *res)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun struct vmw_buffer_object *backup = res->backup;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun dma_resv_assert_held(backup->base.base.resv);
73*4882a593Smuzhiyun if (vmw_resource_mob_attached(res)) {
74*4882a593Smuzhiyun rb_erase(&res->mob_node, &backup->res_tree);
75*4882a593Smuzhiyun RB_CLEAR_NODE(&res->mob_node);
76*4882a593Smuzhiyun vmw_bo_prio_del(backup, res->used_prio);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
vmw_resource_reference(struct vmw_resource * res)80*4882a593Smuzhiyun struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun kref_get(&res->kref);
83*4882a593Smuzhiyun return res;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource * res)87*4882a593Smuzhiyun vmw_resource_reference_unless_doomed(struct vmw_resource *res)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun return kref_get_unless_zero(&res->kref) ? res : NULL;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun * vmw_resource_release_id - release a resource id to the id manager.
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * @res: Pointer to the resource.
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * Release the resource id to the resource id manager and set it to -1
98*4882a593Smuzhiyun */
vmw_resource_release_id(struct vmw_resource * res)99*4882a593Smuzhiyun void vmw_resource_release_id(struct vmw_resource *res)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
102*4882a593Smuzhiyun struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
105*4882a593Smuzhiyun if (res->id != -1)
106*4882a593Smuzhiyun idr_remove(idr, res->id);
107*4882a593Smuzhiyun res->id = -1;
108*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
vmw_resource_release(struct kref * kref)111*4882a593Smuzhiyun static void vmw_resource_release(struct kref *kref)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct vmw_resource *res =
114*4882a593Smuzhiyun container_of(kref, struct vmw_resource, kref);
115*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
116*4882a593Smuzhiyun int id;
117*4882a593Smuzhiyun struct idr *idr = &dev_priv->res_idr[res->func->res_type];
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
120*4882a593Smuzhiyun list_del_init(&res->lru_head);
121*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
122*4882a593Smuzhiyun if (res->backup) {
123*4882a593Smuzhiyun struct ttm_buffer_object *bo = &res->backup->base;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun ttm_bo_reserve(bo, false, false, NULL);
126*4882a593Smuzhiyun if (vmw_resource_mob_attached(res) &&
127*4882a593Smuzhiyun res->func->unbind != NULL) {
128*4882a593Smuzhiyun struct ttm_validate_buffer val_buf;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun val_buf.bo = bo;
131*4882a593Smuzhiyun val_buf.num_shared = 0;
132*4882a593Smuzhiyun res->func->unbind(res, false, &val_buf);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun res->backup_dirty = false;
135*4882a593Smuzhiyun vmw_resource_mob_detach(res);
136*4882a593Smuzhiyun if (res->dirty)
137*4882a593Smuzhiyun res->func->dirty_free(res);
138*4882a593Smuzhiyun if (res->coherent)
139*4882a593Smuzhiyun vmw_bo_dirty_release(res->backup);
140*4882a593Smuzhiyun ttm_bo_unreserve(bo);
141*4882a593Smuzhiyun vmw_bo_unreference(&res->backup);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (likely(res->hw_destroy != NULL)) {
145*4882a593Smuzhiyun mutex_lock(&dev_priv->binding_mutex);
146*4882a593Smuzhiyun vmw_binding_res_list_kill(&res->binding_head);
147*4882a593Smuzhiyun mutex_unlock(&dev_priv->binding_mutex);
148*4882a593Smuzhiyun res->hw_destroy(res);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun id = res->id;
152*4882a593Smuzhiyun if (res->res_free != NULL)
153*4882a593Smuzhiyun res->res_free(res);
154*4882a593Smuzhiyun else
155*4882a593Smuzhiyun kfree(res);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
158*4882a593Smuzhiyun if (id != -1)
159*4882a593Smuzhiyun idr_remove(idr, id);
160*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
vmw_resource_unreference(struct vmw_resource ** p_res)163*4882a593Smuzhiyun void vmw_resource_unreference(struct vmw_resource **p_res)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct vmw_resource *res = *p_res;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun *p_res = NULL;
168*4882a593Smuzhiyun kref_put(&res->kref, vmw_resource_release);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /**
173*4882a593Smuzhiyun * vmw_resource_alloc_id - release a resource id to the id manager.
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun * @res: Pointer to the resource.
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * Allocate the lowest free resource from the resource manager, and set
178*4882a593Smuzhiyun * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
179*4882a593Smuzhiyun */
vmw_resource_alloc_id(struct vmw_resource * res)180*4882a593Smuzhiyun int vmw_resource_alloc_id(struct vmw_resource *res)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
183*4882a593Smuzhiyun int ret;
184*4882a593Smuzhiyun struct idr *idr = &dev_priv->res_idr[res->func->res_type];
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun BUG_ON(res->id != -1);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun idr_preload(GFP_KERNEL);
189*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
192*4882a593Smuzhiyun if (ret >= 0)
193*4882a593Smuzhiyun res->id = ret;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
196*4882a593Smuzhiyun idr_preload_end();
197*4882a593Smuzhiyun return ret < 0 ? ret : 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun * vmw_resource_init - initialize a struct vmw_resource
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * @dev_priv: Pointer to a device private struct.
204*4882a593Smuzhiyun * @res: The struct vmw_resource to initialize.
205*4882a593Smuzhiyun * @obj_type: Resource object type.
206*4882a593Smuzhiyun * @delay_id: Boolean whether to defer device id allocation until
207*4882a593Smuzhiyun * the first validation.
208*4882a593Smuzhiyun * @res_free: Resource destructor.
209*4882a593Smuzhiyun * @func: Resource function table.
210*4882a593Smuzhiyun */
vmw_resource_init(struct vmw_private * dev_priv,struct vmw_resource * res,bool delay_id,void (* res_free)(struct vmw_resource * res),const struct vmw_res_func * func)211*4882a593Smuzhiyun int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
212*4882a593Smuzhiyun bool delay_id,
213*4882a593Smuzhiyun void (*res_free) (struct vmw_resource *res),
214*4882a593Smuzhiyun const struct vmw_res_func *func)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun kref_init(&res->kref);
217*4882a593Smuzhiyun res->hw_destroy = NULL;
218*4882a593Smuzhiyun res->res_free = res_free;
219*4882a593Smuzhiyun res->dev_priv = dev_priv;
220*4882a593Smuzhiyun res->func = func;
221*4882a593Smuzhiyun RB_CLEAR_NODE(&res->mob_node);
222*4882a593Smuzhiyun INIT_LIST_HEAD(&res->lru_head);
223*4882a593Smuzhiyun INIT_LIST_HEAD(&res->binding_head);
224*4882a593Smuzhiyun res->id = -1;
225*4882a593Smuzhiyun res->backup = NULL;
226*4882a593Smuzhiyun res->backup_offset = 0;
227*4882a593Smuzhiyun res->backup_dirty = false;
228*4882a593Smuzhiyun res->res_dirty = false;
229*4882a593Smuzhiyun res->coherent = false;
230*4882a593Smuzhiyun res->used_prio = 3;
231*4882a593Smuzhiyun res->dirty = NULL;
232*4882a593Smuzhiyun if (delay_id)
233*4882a593Smuzhiyun return 0;
234*4882a593Smuzhiyun else
235*4882a593Smuzhiyun return vmw_resource_alloc_id(res);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun * vmw_user_resource_lookup_handle - lookup a struct resource from a
241*4882a593Smuzhiyun * TTM user-space handle and perform basic type checks
242*4882a593Smuzhiyun *
243*4882a593Smuzhiyun * @dev_priv: Pointer to a device private struct
244*4882a593Smuzhiyun * @tfile: Pointer to a struct ttm_object_file identifying the caller
245*4882a593Smuzhiyun * @handle: The TTM user-space handle
246*4882a593Smuzhiyun * @converter: Pointer to an object describing the resource type
247*4882a593Smuzhiyun * @p_res: On successful return the location pointed to will contain
248*4882a593Smuzhiyun * a pointer to a refcounted struct vmw_resource.
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * If the handle can't be found or is associated with an incorrect resource
251*4882a593Smuzhiyun * type, -EINVAL will be returned.
252*4882a593Smuzhiyun */
vmw_user_resource_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,const struct vmw_user_resource_conv * converter,struct vmw_resource ** p_res)253*4882a593Smuzhiyun int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
254*4882a593Smuzhiyun struct ttm_object_file *tfile,
255*4882a593Smuzhiyun uint32_t handle,
256*4882a593Smuzhiyun const struct vmw_user_resource_conv
257*4882a593Smuzhiyun *converter,
258*4882a593Smuzhiyun struct vmw_resource **p_res)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct ttm_base_object *base;
261*4882a593Smuzhiyun struct vmw_resource *res;
262*4882a593Smuzhiyun int ret = -EINVAL;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun base = ttm_base_object_lookup(tfile, handle);
265*4882a593Smuzhiyun if (unlikely(base == NULL))
266*4882a593Smuzhiyun return -EINVAL;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (unlikely(ttm_base_object_type(base) != converter->object_type))
269*4882a593Smuzhiyun goto out_bad_resource;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun res = converter->base_obj_to_res(base);
272*4882a593Smuzhiyun kref_get(&res->kref);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun *p_res = res;
275*4882a593Smuzhiyun ret = 0;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun out_bad_resource:
278*4882a593Smuzhiyun ttm_base_object_unref(&base);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return ret;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /**
284*4882a593Smuzhiyun * vmw_user_resource_lookup_handle - lookup a struct resource from a
285*4882a593Smuzhiyun * TTM user-space handle and perform basic type checks
286*4882a593Smuzhiyun *
287*4882a593Smuzhiyun * @dev_priv: Pointer to a device private struct
288*4882a593Smuzhiyun * @tfile: Pointer to a struct ttm_object_file identifying the caller
289*4882a593Smuzhiyun * @handle: The TTM user-space handle
290*4882a593Smuzhiyun * @converter: Pointer to an object describing the resource type
291*4882a593Smuzhiyun * @p_res: On successful return the location pointed to will contain
292*4882a593Smuzhiyun * a pointer to a refcounted struct vmw_resource.
293*4882a593Smuzhiyun *
294*4882a593Smuzhiyun * If the handle can't be found or is associated with an incorrect resource
295*4882a593Smuzhiyun * type, -EINVAL will be returned.
296*4882a593Smuzhiyun */
297*4882a593Smuzhiyun struct vmw_resource *
vmw_user_resource_noref_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,const struct vmw_user_resource_conv * converter)298*4882a593Smuzhiyun vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
299*4882a593Smuzhiyun struct ttm_object_file *tfile,
300*4882a593Smuzhiyun uint32_t handle,
301*4882a593Smuzhiyun const struct vmw_user_resource_conv
302*4882a593Smuzhiyun *converter)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun struct ttm_base_object *base;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun base = ttm_base_object_noref_lookup(tfile, handle);
307*4882a593Smuzhiyun if (!base)
308*4882a593Smuzhiyun return ERR_PTR(-ESRCH);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
311*4882a593Smuzhiyun ttm_base_object_noref_release();
312*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return converter->base_obj_to_res(base);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /**
319*4882a593Smuzhiyun * Helper function that looks either a surface or bo.
320*4882a593Smuzhiyun *
321*4882a593Smuzhiyun * The pointer this pointed at by out_surf and out_buf needs to be null.
322*4882a593Smuzhiyun */
vmw_user_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,struct vmw_surface ** out_surf,struct vmw_buffer_object ** out_buf)323*4882a593Smuzhiyun int vmw_user_lookup_handle(struct vmw_private *dev_priv,
324*4882a593Smuzhiyun struct ttm_object_file *tfile,
325*4882a593Smuzhiyun uint32_t handle,
326*4882a593Smuzhiyun struct vmw_surface **out_surf,
327*4882a593Smuzhiyun struct vmw_buffer_object **out_buf)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun struct vmw_resource *res;
330*4882a593Smuzhiyun int ret;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun BUG_ON(*out_surf || *out_buf);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
335*4882a593Smuzhiyun user_surface_converter,
336*4882a593Smuzhiyun &res);
337*4882a593Smuzhiyun if (!ret) {
338*4882a593Smuzhiyun *out_surf = vmw_res_to_srf(res);
339*4882a593Smuzhiyun return 0;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun *out_surf = NULL;
343*4882a593Smuzhiyun ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
344*4882a593Smuzhiyun return ret;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /**
348*4882a593Smuzhiyun * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * @res: The resource for which to allocate a backup buffer.
351*4882a593Smuzhiyun * @interruptible: Whether any sleeps during allocation should be
352*4882a593Smuzhiyun * performed while interruptible.
353*4882a593Smuzhiyun */
vmw_resource_buf_alloc(struct vmw_resource * res,bool interruptible)354*4882a593Smuzhiyun static int vmw_resource_buf_alloc(struct vmw_resource *res,
355*4882a593Smuzhiyun bool interruptible)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun unsigned long size =
358*4882a593Smuzhiyun (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
359*4882a593Smuzhiyun struct vmw_buffer_object *backup;
360*4882a593Smuzhiyun int ret;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (likely(res->backup)) {
363*4882a593Smuzhiyun BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
364*4882a593Smuzhiyun return 0;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun backup = kzalloc(sizeof(*backup), GFP_KERNEL);
368*4882a593Smuzhiyun if (unlikely(!backup))
369*4882a593Smuzhiyun return -ENOMEM;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
372*4882a593Smuzhiyun res->func->backup_placement,
373*4882a593Smuzhiyun interruptible,
374*4882a593Smuzhiyun &vmw_bo_bo_free);
375*4882a593Smuzhiyun if (unlikely(ret != 0))
376*4882a593Smuzhiyun goto out_no_bo;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun res->backup = backup;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun out_no_bo:
381*4882a593Smuzhiyun return ret;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /**
385*4882a593Smuzhiyun * vmw_resource_do_validate - Make a resource up-to-date and visible
386*4882a593Smuzhiyun * to the device.
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * @res: The resource to make visible to the device.
389*4882a593Smuzhiyun * @val_buf: Information about a buffer possibly
390*4882a593Smuzhiyun * containing backup data if a bind operation is needed.
391*4882a593Smuzhiyun *
392*4882a593Smuzhiyun * On hardware resource shortage, this function returns -EBUSY and
393*4882a593Smuzhiyun * should be retried once resources have been freed up.
394*4882a593Smuzhiyun */
vmw_resource_do_validate(struct vmw_resource * res,struct ttm_validate_buffer * val_buf,bool dirtying)395*4882a593Smuzhiyun static int vmw_resource_do_validate(struct vmw_resource *res,
396*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf,
397*4882a593Smuzhiyun bool dirtying)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun int ret = 0;
400*4882a593Smuzhiyun const struct vmw_res_func *func = res->func;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (unlikely(res->id == -1)) {
403*4882a593Smuzhiyun ret = func->create(res);
404*4882a593Smuzhiyun if (unlikely(ret != 0))
405*4882a593Smuzhiyun return ret;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (func->bind &&
409*4882a593Smuzhiyun ((func->needs_backup && !vmw_resource_mob_attached(res) &&
410*4882a593Smuzhiyun val_buf->bo != NULL) ||
411*4882a593Smuzhiyun (!func->needs_backup && val_buf->bo != NULL))) {
412*4882a593Smuzhiyun ret = func->bind(res, val_buf);
413*4882a593Smuzhiyun if (unlikely(ret != 0))
414*4882a593Smuzhiyun goto out_bind_failed;
415*4882a593Smuzhiyun if (func->needs_backup)
416*4882a593Smuzhiyun vmw_resource_mob_attach(res);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * Handle the case where the backup mob is marked coherent but
421*4882a593Smuzhiyun * the resource isn't.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
424*4882a593Smuzhiyun !res->coherent) {
425*4882a593Smuzhiyun if (res->backup->dirty && !res->dirty) {
426*4882a593Smuzhiyun ret = func->dirty_alloc(res);
427*4882a593Smuzhiyun if (ret)
428*4882a593Smuzhiyun return ret;
429*4882a593Smuzhiyun } else if (!res->backup->dirty && res->dirty) {
430*4882a593Smuzhiyun func->dirty_free(res);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * Transfer the dirty regions to the resource and update
436*4882a593Smuzhiyun * the resource.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun if (res->dirty) {
439*4882a593Smuzhiyun if (dirtying && !res->res_dirty) {
440*4882a593Smuzhiyun pgoff_t start = res->backup_offset >> PAGE_SHIFT;
441*4882a593Smuzhiyun pgoff_t end = __KERNEL_DIV_ROUND_UP
442*4882a593Smuzhiyun (res->backup_offset + res->backup_size,
443*4882a593Smuzhiyun PAGE_SIZE);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun vmw_bo_dirty_unmap(res->backup, start, end);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun vmw_bo_dirty_transfer_to_res(res);
449*4882a593Smuzhiyun return func->dirty_sync(res);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun return 0;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun out_bind_failed:
455*4882a593Smuzhiyun func->destroy(res);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun return ret;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /**
461*4882a593Smuzhiyun * vmw_resource_unreserve - Unreserve a resource previously reserved for
462*4882a593Smuzhiyun * command submission.
463*4882a593Smuzhiyun *
464*4882a593Smuzhiyun * @res: Pointer to the struct vmw_resource to unreserve.
465*4882a593Smuzhiyun * @dirty_set: Change dirty status of the resource.
466*4882a593Smuzhiyun * @dirty: When changing dirty status indicates the new status.
467*4882a593Smuzhiyun * @switch_backup: Backup buffer has been switched.
468*4882a593Smuzhiyun * @new_backup: Pointer to new backup buffer if command submission
469*4882a593Smuzhiyun * switched. May be NULL.
470*4882a593Smuzhiyun * @new_backup_offset: New backup offset if @switch_backup is true.
471*4882a593Smuzhiyun *
472*4882a593Smuzhiyun * Currently unreserving a resource means putting it back on the device's
473*4882a593Smuzhiyun * resource lru list, so that it can be evicted if necessary.
474*4882a593Smuzhiyun */
vmw_resource_unreserve(struct vmw_resource * res,bool dirty_set,bool dirty,bool switch_backup,struct vmw_buffer_object * new_backup,unsigned long new_backup_offset)475*4882a593Smuzhiyun void vmw_resource_unreserve(struct vmw_resource *res,
476*4882a593Smuzhiyun bool dirty_set,
477*4882a593Smuzhiyun bool dirty,
478*4882a593Smuzhiyun bool switch_backup,
479*4882a593Smuzhiyun struct vmw_buffer_object *new_backup,
480*4882a593Smuzhiyun unsigned long new_backup_offset)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (!list_empty(&res->lru_head))
485*4882a593Smuzhiyun return;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (switch_backup && new_backup != res->backup) {
488*4882a593Smuzhiyun if (res->backup) {
489*4882a593Smuzhiyun vmw_resource_mob_detach(res);
490*4882a593Smuzhiyun if (res->coherent)
491*4882a593Smuzhiyun vmw_bo_dirty_release(res->backup);
492*4882a593Smuzhiyun vmw_bo_unreference(&res->backup);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (new_backup) {
496*4882a593Smuzhiyun res->backup = vmw_bo_reference(new_backup);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun * The validation code should already have added a
500*4882a593Smuzhiyun * dirty tracker here.
501*4882a593Smuzhiyun */
502*4882a593Smuzhiyun WARN_ON(res->coherent && !new_backup->dirty);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun vmw_resource_mob_attach(res);
505*4882a593Smuzhiyun } else {
506*4882a593Smuzhiyun res->backup = NULL;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun } else if (switch_backup && res->coherent) {
509*4882a593Smuzhiyun vmw_bo_dirty_release(res->backup);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun if (switch_backup)
513*4882a593Smuzhiyun res->backup_offset = new_backup_offset;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (dirty_set)
516*4882a593Smuzhiyun res->res_dirty = dirty;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (!res->func->may_evict || res->id == -1 || res->pin_count)
519*4882a593Smuzhiyun return;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
522*4882a593Smuzhiyun list_add_tail(&res->lru_head,
523*4882a593Smuzhiyun &res->dev_priv->res_lru[res->func->res_type]);
524*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /**
528*4882a593Smuzhiyun * vmw_resource_check_buffer - Check whether a backup buffer is needed
529*4882a593Smuzhiyun * for a resource and in that case, allocate
530*4882a593Smuzhiyun * one, reserve and validate it.
531*4882a593Smuzhiyun *
532*4882a593Smuzhiyun * @ticket: The ww aqcquire context to use, or NULL if trylocking.
533*4882a593Smuzhiyun * @res: The resource for which to allocate a backup buffer.
534*4882a593Smuzhiyun * @interruptible: Whether any sleeps during allocation should be
535*4882a593Smuzhiyun * performed while interruptible.
536*4882a593Smuzhiyun * @val_buf: On successful return contains data about the
537*4882a593Smuzhiyun * reserved and validated backup buffer.
538*4882a593Smuzhiyun */
539*4882a593Smuzhiyun static int
vmw_resource_check_buffer(struct ww_acquire_ctx * ticket,struct vmw_resource * res,bool interruptible,struct ttm_validate_buffer * val_buf)540*4882a593Smuzhiyun vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
541*4882a593Smuzhiyun struct vmw_resource *res,
542*4882a593Smuzhiyun bool interruptible,
543*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct ttm_operation_ctx ctx = { true, false };
546*4882a593Smuzhiyun struct list_head val_list;
547*4882a593Smuzhiyun bool backup_dirty = false;
548*4882a593Smuzhiyun int ret;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun if (unlikely(res->backup == NULL)) {
551*4882a593Smuzhiyun ret = vmw_resource_buf_alloc(res, interruptible);
552*4882a593Smuzhiyun if (unlikely(ret != 0))
553*4882a593Smuzhiyun return ret;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun INIT_LIST_HEAD(&val_list);
557*4882a593Smuzhiyun ttm_bo_get(&res->backup->base);
558*4882a593Smuzhiyun val_buf->bo = &res->backup->base;
559*4882a593Smuzhiyun val_buf->num_shared = 0;
560*4882a593Smuzhiyun list_add_tail(&val_buf->head, &val_list);
561*4882a593Smuzhiyun ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
562*4882a593Smuzhiyun if (unlikely(ret != 0))
563*4882a593Smuzhiyun goto out_no_reserve;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (res->func->needs_backup && !vmw_resource_mob_attached(res))
566*4882a593Smuzhiyun return 0;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun backup_dirty = res->backup_dirty;
569*4882a593Smuzhiyun ret = ttm_bo_validate(&res->backup->base,
570*4882a593Smuzhiyun res->func->backup_placement,
571*4882a593Smuzhiyun &ctx);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (unlikely(ret != 0))
574*4882a593Smuzhiyun goto out_no_validate;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun return 0;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun out_no_validate:
579*4882a593Smuzhiyun ttm_eu_backoff_reservation(ticket, &val_list);
580*4882a593Smuzhiyun out_no_reserve:
581*4882a593Smuzhiyun ttm_bo_put(val_buf->bo);
582*4882a593Smuzhiyun val_buf->bo = NULL;
583*4882a593Smuzhiyun if (backup_dirty)
584*4882a593Smuzhiyun vmw_bo_unreference(&res->backup);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun return ret;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /**
590*4882a593Smuzhiyun * vmw_resource_reserve - Reserve a resource for command submission
591*4882a593Smuzhiyun *
592*4882a593Smuzhiyun * @res: The resource to reserve.
593*4882a593Smuzhiyun *
594*4882a593Smuzhiyun * This function takes the resource off the LRU list and make sure
595*4882a593Smuzhiyun * a backup buffer is present for guest-backed resources. However,
596*4882a593Smuzhiyun * the buffer may not be bound to the resource at this point.
597*4882a593Smuzhiyun *
598*4882a593Smuzhiyun */
vmw_resource_reserve(struct vmw_resource * res,bool interruptible,bool no_backup)599*4882a593Smuzhiyun int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
600*4882a593Smuzhiyun bool no_backup)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
603*4882a593Smuzhiyun int ret;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
606*4882a593Smuzhiyun list_del_init(&res->lru_head);
607*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun if (res->func->needs_backup && res->backup == NULL &&
610*4882a593Smuzhiyun !no_backup) {
611*4882a593Smuzhiyun ret = vmw_resource_buf_alloc(res, interruptible);
612*4882a593Smuzhiyun if (unlikely(ret != 0)) {
613*4882a593Smuzhiyun DRM_ERROR("Failed to allocate a backup buffer "
614*4882a593Smuzhiyun "of size %lu. bytes\n",
615*4882a593Smuzhiyun (unsigned long) res->backup_size);
616*4882a593Smuzhiyun return ret;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun return 0;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /**
624*4882a593Smuzhiyun * vmw_resource_backoff_reservation - Unreserve and unreference a
625*4882a593Smuzhiyun * backup buffer
626*4882a593Smuzhiyun *.
627*4882a593Smuzhiyun * @ticket: The ww acquire ctx used for reservation.
628*4882a593Smuzhiyun * @val_buf: Backup buffer information.
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx * ticket,struct ttm_validate_buffer * val_buf)631*4882a593Smuzhiyun vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
632*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun struct list_head val_list;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (likely(val_buf->bo == NULL))
637*4882a593Smuzhiyun return;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun INIT_LIST_HEAD(&val_list);
640*4882a593Smuzhiyun list_add_tail(&val_buf->head, &val_list);
641*4882a593Smuzhiyun ttm_eu_backoff_reservation(ticket, &val_list);
642*4882a593Smuzhiyun ttm_bo_put(val_buf->bo);
643*4882a593Smuzhiyun val_buf->bo = NULL;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /**
647*4882a593Smuzhiyun * vmw_resource_do_evict - Evict a resource, and transfer its data
648*4882a593Smuzhiyun * to a backup buffer.
649*4882a593Smuzhiyun *
650*4882a593Smuzhiyun * @ticket: The ww acquire ticket to use, or NULL if trylocking.
651*4882a593Smuzhiyun * @res: The resource to evict.
652*4882a593Smuzhiyun * @interruptible: Whether to wait interruptible.
653*4882a593Smuzhiyun */
vmw_resource_do_evict(struct ww_acquire_ctx * ticket,struct vmw_resource * res,bool interruptible)654*4882a593Smuzhiyun static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
655*4882a593Smuzhiyun struct vmw_resource *res, bool interruptible)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun struct ttm_validate_buffer val_buf;
658*4882a593Smuzhiyun const struct vmw_res_func *func = res->func;
659*4882a593Smuzhiyun int ret;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun BUG_ON(!func->may_evict);
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun val_buf.bo = NULL;
664*4882a593Smuzhiyun val_buf.num_shared = 0;
665*4882a593Smuzhiyun ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
666*4882a593Smuzhiyun if (unlikely(ret != 0))
667*4882a593Smuzhiyun return ret;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (unlikely(func->unbind != NULL &&
670*4882a593Smuzhiyun (!func->needs_backup || vmw_resource_mob_attached(res)))) {
671*4882a593Smuzhiyun ret = func->unbind(res, res->res_dirty, &val_buf);
672*4882a593Smuzhiyun if (unlikely(ret != 0))
673*4882a593Smuzhiyun goto out_no_unbind;
674*4882a593Smuzhiyun vmw_resource_mob_detach(res);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun ret = func->destroy(res);
677*4882a593Smuzhiyun res->backup_dirty = true;
678*4882a593Smuzhiyun res->res_dirty = false;
679*4882a593Smuzhiyun out_no_unbind:
680*4882a593Smuzhiyun vmw_resource_backoff_reservation(ticket, &val_buf);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun return ret;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /**
687*4882a593Smuzhiyun * vmw_resource_validate - Make a resource up-to-date and visible
688*4882a593Smuzhiyun * to the device.
689*4882a593Smuzhiyun * @res: The resource to make visible to the device.
690*4882a593Smuzhiyun * @intr: Perform waits interruptible if possible.
691*4882a593Smuzhiyun * @dirtying: Pending GPU operation will dirty the resource
692*4882a593Smuzhiyun *
693*4882a593Smuzhiyun * On succesful return, any backup DMA buffer pointed to by @res->backup will
694*4882a593Smuzhiyun * be reserved and validated.
695*4882a593Smuzhiyun * On hardware resource shortage, this function will repeatedly evict
696*4882a593Smuzhiyun * resources of the same type until the validation succeeds.
697*4882a593Smuzhiyun *
698*4882a593Smuzhiyun * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
699*4882a593Smuzhiyun * on failure.
700*4882a593Smuzhiyun */
vmw_resource_validate(struct vmw_resource * res,bool intr,bool dirtying)701*4882a593Smuzhiyun int vmw_resource_validate(struct vmw_resource *res, bool intr,
702*4882a593Smuzhiyun bool dirtying)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun int ret;
705*4882a593Smuzhiyun struct vmw_resource *evict_res;
706*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
707*4882a593Smuzhiyun struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
708*4882a593Smuzhiyun struct ttm_validate_buffer val_buf;
709*4882a593Smuzhiyun unsigned err_count = 0;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (!res->func->create)
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun val_buf.bo = NULL;
715*4882a593Smuzhiyun val_buf.num_shared = 0;
716*4882a593Smuzhiyun if (res->backup)
717*4882a593Smuzhiyun val_buf.bo = &res->backup->base;
718*4882a593Smuzhiyun do {
719*4882a593Smuzhiyun ret = vmw_resource_do_validate(res, &val_buf, dirtying);
720*4882a593Smuzhiyun if (likely(ret != -EBUSY))
721*4882a593Smuzhiyun break;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
724*4882a593Smuzhiyun if (list_empty(lru_list) || !res->func->may_evict) {
725*4882a593Smuzhiyun DRM_ERROR("Out of device device resources "
726*4882a593Smuzhiyun "for %s.\n", res->func->type_name);
727*4882a593Smuzhiyun ret = -EBUSY;
728*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
729*4882a593Smuzhiyun break;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun evict_res = vmw_resource_reference
733*4882a593Smuzhiyun (list_first_entry(lru_list, struct vmw_resource,
734*4882a593Smuzhiyun lru_head));
735*4882a593Smuzhiyun list_del_init(&evict_res->lru_head);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /* Trylock backup buffers with a NULL ticket. */
740*4882a593Smuzhiyun ret = vmw_resource_do_evict(NULL, evict_res, intr);
741*4882a593Smuzhiyun if (unlikely(ret != 0)) {
742*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
743*4882a593Smuzhiyun list_add_tail(&evict_res->lru_head, lru_list);
744*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
745*4882a593Smuzhiyun if (ret == -ERESTARTSYS ||
746*4882a593Smuzhiyun ++err_count > VMW_RES_EVICT_ERR_COUNT) {
747*4882a593Smuzhiyun vmw_resource_unreference(&evict_res);
748*4882a593Smuzhiyun goto out_no_validate;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun vmw_resource_unreference(&evict_res);
753*4882a593Smuzhiyun } while (1);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (unlikely(ret != 0))
756*4882a593Smuzhiyun goto out_no_validate;
757*4882a593Smuzhiyun else if (!res->func->needs_backup && res->backup) {
758*4882a593Smuzhiyun WARN_ON_ONCE(vmw_resource_mob_attached(res));
759*4882a593Smuzhiyun vmw_bo_unreference(&res->backup);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun return 0;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun out_no_validate:
765*4882a593Smuzhiyun return ret;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /**
770*4882a593Smuzhiyun * vmw_resource_unbind_list
771*4882a593Smuzhiyun *
772*4882a593Smuzhiyun * @vbo: Pointer to the current backing MOB.
773*4882a593Smuzhiyun *
774*4882a593Smuzhiyun * Evicts the Guest Backed hardware resource if the backup
775*4882a593Smuzhiyun * buffer is being moved out of MOB memory.
776*4882a593Smuzhiyun * Note that this function will not race with the resource
777*4882a593Smuzhiyun * validation code, since resource validation and eviction
778*4882a593Smuzhiyun * both require the backup buffer to be reserved.
779*4882a593Smuzhiyun */
vmw_resource_unbind_list(struct vmw_buffer_object * vbo)780*4882a593Smuzhiyun void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun struct ttm_validate_buffer val_buf = {
783*4882a593Smuzhiyun .bo = &vbo->base,
784*4882a593Smuzhiyun .num_shared = 0
785*4882a593Smuzhiyun };
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun dma_resv_assert_held(vbo->base.base.resv);
788*4882a593Smuzhiyun while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
789*4882a593Smuzhiyun struct rb_node *node = vbo->res_tree.rb_node;
790*4882a593Smuzhiyun struct vmw_resource *res =
791*4882a593Smuzhiyun container_of(node, struct vmw_resource, mob_node);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun if (!WARN_ON_ONCE(!res->func->unbind))
794*4882a593Smuzhiyun (void) res->func->unbind(res, res->res_dirty, &val_buf);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun res->backup_dirty = true;
797*4882a593Smuzhiyun res->res_dirty = false;
798*4882a593Smuzhiyun vmw_resource_mob_detach(res);
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun (void) ttm_bo_wait(&vbo->base, false, false);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /**
806*4882a593Smuzhiyun * vmw_query_readback_all - Read back cached query states
807*4882a593Smuzhiyun *
808*4882a593Smuzhiyun * @dx_query_mob: Buffer containing the DX query MOB
809*4882a593Smuzhiyun *
810*4882a593Smuzhiyun * Read back cached states from the device if they exist. This function
811*4882a593Smuzhiyun * assumings binding_mutex is held.
812*4882a593Smuzhiyun */
vmw_query_readback_all(struct vmw_buffer_object * dx_query_mob)813*4882a593Smuzhiyun int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun struct vmw_resource *dx_query_ctx;
816*4882a593Smuzhiyun struct vmw_private *dev_priv;
817*4882a593Smuzhiyun struct {
818*4882a593Smuzhiyun SVGA3dCmdHeader header;
819*4882a593Smuzhiyun SVGA3dCmdDXReadbackAllQuery body;
820*4882a593Smuzhiyun } *cmd;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* No query bound, so do nothing */
824*4882a593Smuzhiyun if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
825*4882a593Smuzhiyun return 0;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun dx_query_ctx = dx_query_mob->dx_query_ctx;
828*4882a593Smuzhiyun dev_priv = dx_query_ctx->dev_priv;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
831*4882a593Smuzhiyun if (unlikely(cmd == NULL))
832*4882a593Smuzhiyun return -ENOMEM;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
835*4882a593Smuzhiyun cmd->header.size = sizeof(cmd->body);
836*4882a593Smuzhiyun cmd->body.cid = dx_query_ctx->id;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmd));
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Triggers a rebind the next time affected context is bound */
841*4882a593Smuzhiyun dx_query_mob->dx_query_ctx = NULL;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun return 0;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /**
849*4882a593Smuzhiyun * vmw_query_move_notify - Read back cached query states
850*4882a593Smuzhiyun *
851*4882a593Smuzhiyun * @bo: The TTM buffer object about to move.
852*4882a593Smuzhiyun * @mem: The memory region @bo is moving to.
853*4882a593Smuzhiyun *
854*4882a593Smuzhiyun * Called before the query MOB is swapped out to read back cached query
855*4882a593Smuzhiyun * states from the device.
856*4882a593Smuzhiyun */
vmw_query_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)857*4882a593Smuzhiyun void vmw_query_move_notify(struct ttm_buffer_object *bo,
858*4882a593Smuzhiyun struct ttm_resource *mem)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun struct vmw_buffer_object *dx_query_mob;
861*4882a593Smuzhiyun struct ttm_bo_device *bdev = bo->bdev;
862*4882a593Smuzhiyun struct vmw_private *dev_priv;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun dev_priv = container_of(bdev, struct vmw_private, bdev);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun mutex_lock(&dev_priv->binding_mutex);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
870*4882a593Smuzhiyun if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
871*4882a593Smuzhiyun mutex_unlock(&dev_priv->binding_mutex);
872*4882a593Smuzhiyun return;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun /* If BO is being moved from MOB to system memory */
876*4882a593Smuzhiyun if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
877*4882a593Smuzhiyun struct vmw_fence_obj *fence;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun (void) vmw_query_readback_all(dx_query_mob);
880*4882a593Smuzhiyun mutex_unlock(&dev_priv->binding_mutex);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun /* Create a fence and attach the BO to it */
883*4882a593Smuzhiyun (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
884*4882a593Smuzhiyun vmw_bo_fence_single(bo, fence);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun if (fence != NULL)
887*4882a593Smuzhiyun vmw_fence_obj_unreference(&fence);
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun (void) ttm_bo_wait(bo, false, false);
890*4882a593Smuzhiyun } else
891*4882a593Smuzhiyun mutex_unlock(&dev_priv->binding_mutex);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun /**
896*4882a593Smuzhiyun * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
897*4882a593Smuzhiyun *
898*4882a593Smuzhiyun * @res: The resource being queried.
899*4882a593Smuzhiyun */
vmw_resource_needs_backup(const struct vmw_resource * res)900*4882a593Smuzhiyun bool vmw_resource_needs_backup(const struct vmw_resource *res)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun return res->func->needs_backup;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun /**
906*4882a593Smuzhiyun * vmw_resource_evict_type - Evict all resources of a specific type
907*4882a593Smuzhiyun *
908*4882a593Smuzhiyun * @dev_priv: Pointer to a device private struct
909*4882a593Smuzhiyun * @type: The resource type to evict
910*4882a593Smuzhiyun *
911*4882a593Smuzhiyun * To avoid thrashing starvation or as part of the hibernation sequence,
912*4882a593Smuzhiyun * try to evict all evictable resources of a specific type.
913*4882a593Smuzhiyun */
vmw_resource_evict_type(struct vmw_private * dev_priv,enum vmw_res_type type)914*4882a593Smuzhiyun static void vmw_resource_evict_type(struct vmw_private *dev_priv,
915*4882a593Smuzhiyun enum vmw_res_type type)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun struct list_head *lru_list = &dev_priv->res_lru[type];
918*4882a593Smuzhiyun struct vmw_resource *evict_res;
919*4882a593Smuzhiyun unsigned err_count = 0;
920*4882a593Smuzhiyun int ret;
921*4882a593Smuzhiyun struct ww_acquire_ctx ticket;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun do {
924*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun if (list_empty(lru_list))
927*4882a593Smuzhiyun goto out_unlock;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun evict_res = vmw_resource_reference(
930*4882a593Smuzhiyun list_first_entry(lru_list, struct vmw_resource,
931*4882a593Smuzhiyun lru_head));
932*4882a593Smuzhiyun list_del_init(&evict_res->lru_head);
933*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /* Wait lock backup buffers with a ticket. */
936*4882a593Smuzhiyun ret = vmw_resource_do_evict(&ticket, evict_res, false);
937*4882a593Smuzhiyun if (unlikely(ret != 0)) {
938*4882a593Smuzhiyun spin_lock(&dev_priv->resource_lock);
939*4882a593Smuzhiyun list_add_tail(&evict_res->lru_head, lru_list);
940*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
941*4882a593Smuzhiyun if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
942*4882a593Smuzhiyun vmw_resource_unreference(&evict_res);
943*4882a593Smuzhiyun return;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun vmw_resource_unreference(&evict_res);
948*4882a593Smuzhiyun } while (1);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun out_unlock:
951*4882a593Smuzhiyun spin_unlock(&dev_priv->resource_lock);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun /**
955*4882a593Smuzhiyun * vmw_resource_evict_all - Evict all evictable resources
956*4882a593Smuzhiyun *
957*4882a593Smuzhiyun * @dev_priv: Pointer to a device private struct
958*4882a593Smuzhiyun *
959*4882a593Smuzhiyun * To avoid thrashing starvation or as part of the hibernation sequence,
960*4882a593Smuzhiyun * evict all evictable resources. In particular this means that all
961*4882a593Smuzhiyun * guest-backed resources that are registered with the device are
962*4882a593Smuzhiyun * evicted and the OTable becomes clean.
963*4882a593Smuzhiyun */
vmw_resource_evict_all(struct vmw_private * dev_priv)964*4882a593Smuzhiyun void vmw_resource_evict_all(struct vmw_private *dev_priv)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun enum vmw_res_type type;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun mutex_lock(&dev_priv->cmdbuf_mutex);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun for (type = 0; type < vmw_res_max; ++type)
971*4882a593Smuzhiyun vmw_resource_evict_type(dev_priv, type);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun mutex_unlock(&dev_priv->cmdbuf_mutex);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun /**
977*4882a593Smuzhiyun * vmw_resource_pin - Add a pin reference on a resource
978*4882a593Smuzhiyun *
979*4882a593Smuzhiyun * @res: The resource to add a pin reference on
980*4882a593Smuzhiyun *
981*4882a593Smuzhiyun * This function adds a pin reference, and if needed validates the resource.
982*4882a593Smuzhiyun * Having a pin reference means that the resource can never be evicted, and
983*4882a593Smuzhiyun * its id will never change as long as there is a pin reference.
984*4882a593Smuzhiyun * This function returns 0 on success and a negative error code on failure.
985*4882a593Smuzhiyun */
vmw_resource_pin(struct vmw_resource * res,bool interruptible)986*4882a593Smuzhiyun int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun struct ttm_operation_ctx ctx = { interruptible, false };
989*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
990*4882a593Smuzhiyun int ret;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun ttm_write_lock(&dev_priv->reservation_sem, interruptible);
993*4882a593Smuzhiyun mutex_lock(&dev_priv->cmdbuf_mutex);
994*4882a593Smuzhiyun ret = vmw_resource_reserve(res, interruptible, false);
995*4882a593Smuzhiyun if (ret)
996*4882a593Smuzhiyun goto out_no_reserve;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (res->pin_count == 0) {
999*4882a593Smuzhiyun struct vmw_buffer_object *vbo = NULL;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun if (res->backup) {
1002*4882a593Smuzhiyun vbo = res->backup;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1005*4882a593Smuzhiyun if (!vbo->pin_count) {
1006*4882a593Smuzhiyun ret = ttm_bo_validate
1007*4882a593Smuzhiyun (&vbo->base,
1008*4882a593Smuzhiyun res->func->backup_placement,
1009*4882a593Smuzhiyun &ctx);
1010*4882a593Smuzhiyun if (ret) {
1011*4882a593Smuzhiyun ttm_bo_unreserve(&vbo->base);
1012*4882a593Smuzhiyun goto out_no_validate;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun /* Do we really need to pin the MOB as well? */
1017*4882a593Smuzhiyun vmw_bo_pin_reserved(vbo, true);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun ret = vmw_resource_validate(res, interruptible, true);
1020*4882a593Smuzhiyun if (vbo)
1021*4882a593Smuzhiyun ttm_bo_unreserve(&vbo->base);
1022*4882a593Smuzhiyun if (ret)
1023*4882a593Smuzhiyun goto out_no_validate;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun res->pin_count++;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun out_no_validate:
1028*4882a593Smuzhiyun vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1029*4882a593Smuzhiyun out_no_reserve:
1030*4882a593Smuzhiyun mutex_unlock(&dev_priv->cmdbuf_mutex);
1031*4882a593Smuzhiyun ttm_write_unlock(&dev_priv->reservation_sem);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun return ret;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /**
1037*4882a593Smuzhiyun * vmw_resource_unpin - Remove a pin reference from a resource
1038*4882a593Smuzhiyun *
1039*4882a593Smuzhiyun * @res: The resource to remove a pin reference from
1040*4882a593Smuzhiyun *
1041*4882a593Smuzhiyun * Having a pin reference means that the resource can never be evicted, and
1042*4882a593Smuzhiyun * its id will never change as long as there is a pin reference.
1043*4882a593Smuzhiyun */
vmw_resource_unpin(struct vmw_resource * res)1044*4882a593Smuzhiyun void vmw_resource_unpin(struct vmw_resource *res)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
1047*4882a593Smuzhiyun int ret;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1050*4882a593Smuzhiyun mutex_lock(&dev_priv->cmdbuf_mutex);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun ret = vmw_resource_reserve(res, false, true);
1053*4882a593Smuzhiyun WARN_ON(ret);
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun WARN_ON(res->pin_count == 0);
1056*4882a593Smuzhiyun if (--res->pin_count == 0 && res->backup) {
1057*4882a593Smuzhiyun struct vmw_buffer_object *vbo = res->backup;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1060*4882a593Smuzhiyun vmw_bo_pin_reserved(vbo, false);
1061*4882a593Smuzhiyun ttm_bo_unreserve(&vbo->base);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun mutex_unlock(&dev_priv->cmdbuf_mutex);
1067*4882a593Smuzhiyun ttm_read_unlock(&dev_priv->reservation_sem);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun /**
1071*4882a593Smuzhiyun * vmw_res_type - Return the resource type
1072*4882a593Smuzhiyun *
1073*4882a593Smuzhiyun * @res: Pointer to the resource
1074*4882a593Smuzhiyun */
vmw_res_type(const struct vmw_resource * res)1075*4882a593Smuzhiyun enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun return res->func->res_type;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun /**
1081*4882a593Smuzhiyun * vmw_resource_update_dirty - Update a resource's dirty tracker with a
1082*4882a593Smuzhiyun * sequential range of touched backing store memory.
1083*4882a593Smuzhiyun * @res: The resource.
1084*4882a593Smuzhiyun * @start: The first page touched.
1085*4882a593Smuzhiyun * @end: The last page touched + 1.
1086*4882a593Smuzhiyun */
vmw_resource_dirty_update(struct vmw_resource * res,pgoff_t start,pgoff_t end)1087*4882a593Smuzhiyun void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1088*4882a593Smuzhiyun pgoff_t end)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun if (res->dirty)
1091*4882a593Smuzhiyun res->func->dirty_range_add(res, start << PAGE_SHIFT,
1092*4882a593Smuzhiyun end << PAGE_SHIFT);
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /**
1096*4882a593Smuzhiyun * vmw_resources_clean - Clean resources intersecting a mob range
1097*4882a593Smuzhiyun * @vbo: The mob buffer object
1098*4882a593Smuzhiyun * @start: The mob page offset starting the range
1099*4882a593Smuzhiyun * @end: The mob page offset ending the range
1100*4882a593Smuzhiyun * @num_prefault: Returns how many pages including the first have been
1101*4882a593Smuzhiyun * cleaned and are ok to prefault
1102*4882a593Smuzhiyun */
vmw_resources_clean(struct vmw_buffer_object * vbo,pgoff_t start,pgoff_t end,pgoff_t * num_prefault)1103*4882a593Smuzhiyun int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1104*4882a593Smuzhiyun pgoff_t end, pgoff_t *num_prefault)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun struct rb_node *cur = vbo->res_tree.rb_node;
1107*4882a593Smuzhiyun struct vmw_resource *found = NULL;
1108*4882a593Smuzhiyun unsigned long res_start = start << PAGE_SHIFT;
1109*4882a593Smuzhiyun unsigned long res_end = end << PAGE_SHIFT;
1110*4882a593Smuzhiyun unsigned long last_cleaned = 0;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun /*
1113*4882a593Smuzhiyun * Find the resource with lowest backup_offset that intersects the
1114*4882a593Smuzhiyun * range.
1115*4882a593Smuzhiyun */
1116*4882a593Smuzhiyun while (cur) {
1117*4882a593Smuzhiyun struct vmw_resource *cur_res =
1118*4882a593Smuzhiyun container_of(cur, struct vmw_resource, mob_node);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun if (cur_res->backup_offset >= res_end) {
1121*4882a593Smuzhiyun cur = cur->rb_left;
1122*4882a593Smuzhiyun } else if (cur_res->backup_offset + cur_res->backup_size <=
1123*4882a593Smuzhiyun res_start) {
1124*4882a593Smuzhiyun cur = cur->rb_right;
1125*4882a593Smuzhiyun } else {
1126*4882a593Smuzhiyun found = cur_res;
1127*4882a593Smuzhiyun cur = cur->rb_left;
1128*4882a593Smuzhiyun /* Continue to look for resources with lower offsets */
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun /*
1133*4882a593Smuzhiyun * In order of increasing backup_offset, clean dirty resorces
1134*4882a593Smuzhiyun * intersecting the range.
1135*4882a593Smuzhiyun */
1136*4882a593Smuzhiyun while (found) {
1137*4882a593Smuzhiyun if (found->res_dirty) {
1138*4882a593Smuzhiyun int ret;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun if (!found->func->clean)
1141*4882a593Smuzhiyun return -EINVAL;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun ret = found->func->clean(found);
1144*4882a593Smuzhiyun if (ret)
1145*4882a593Smuzhiyun return ret;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun found->res_dirty = false;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun last_cleaned = found->backup_offset + found->backup_size;
1150*4882a593Smuzhiyun cur = rb_next(&found->mob_node);
1151*4882a593Smuzhiyun if (!cur)
1152*4882a593Smuzhiyun break;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun found = container_of(cur, struct vmw_resource, mob_node);
1155*4882a593Smuzhiyun if (found->backup_offset >= res_end)
1156*4882a593Smuzhiyun break;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /*
1160*4882a593Smuzhiyun * Set number of pages allowed prefaulting and fence the buffer object
1161*4882a593Smuzhiyun */
1162*4882a593Smuzhiyun *num_prefault = 1;
1163*4882a593Smuzhiyun if (last_cleaned > res_start) {
1164*4882a593Smuzhiyun struct ttm_buffer_object *bo = &vbo->base;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1167*4882a593Smuzhiyun PAGE_SIZE);
1168*4882a593Smuzhiyun vmw_bo_fence_single(bo, NULL);
1169*4882a593Smuzhiyun if (bo->moving)
1170*4882a593Smuzhiyun dma_fence_put(bo->moving);
1171*4882a593Smuzhiyun bo->moving = dma_fence_get
1172*4882a593Smuzhiyun (dma_resv_get_excl(bo->base.resv));
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun return 0;
1176*4882a593Smuzhiyun }
1177