1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun * All Rights Reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
8*4882a593Smuzhiyun * copy of this software and associated documentation files (the
9*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including
10*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish,
11*4882a593Smuzhiyun * distribute, sub license, and/or sell copies of the Software, and to
12*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to
13*4882a593Smuzhiyun * the following conditions:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
16*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions
17*4882a593Smuzhiyun * of the Software.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22*4882a593Smuzhiyun * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23*4882a593Smuzhiyun * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24*4882a593Smuzhiyun * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25*4882a593Smuzhiyun * USE OR OTHER DEALINGS IN THE SOFTWARE.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun **************************************************************************/
28*4882a593Smuzhiyun #include <linux/slab.h>
29*4882a593Smuzhiyun #include "vmwgfx_validation.h"
30*4882a593Smuzhiyun #include "vmwgfx_drv.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /**
33*4882a593Smuzhiyun * struct vmw_validation_bo_node - Buffer object validation metadata.
34*4882a593Smuzhiyun * @base: Metadata used for TTM reservation- and validation.
35*4882a593Smuzhiyun * @hash: A hash entry used for the duplicate detection hash table.
36*4882a593Smuzhiyun * @coherent_count: If switching backup buffers, number of new coherent
37*4882a593Smuzhiyun * resources that will have this buffer as a backup buffer.
38*4882a593Smuzhiyun * @as_mob: Validate as mob.
39*4882a593Smuzhiyun * @cpu_blit: Validate for cpu blit access.
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * Bit fields are used since these structures are allocated and freed in
42*4882a593Smuzhiyun * large numbers and space conservation is desired.
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun struct vmw_validation_bo_node {
45*4882a593Smuzhiyun struct ttm_validate_buffer base;
46*4882a593Smuzhiyun struct drm_hash_item hash;
47*4882a593Smuzhiyun unsigned int coherent_count;
48*4882a593Smuzhiyun u32 as_mob : 1;
49*4882a593Smuzhiyun u32 cpu_blit : 1;
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun * struct vmw_validation_res_node - Resource validation metadata.
54*4882a593Smuzhiyun * @head: List head for the resource validation list.
55*4882a593Smuzhiyun * @hash: A hash entry used for the duplicate detection hash table.
56*4882a593Smuzhiyun * @res: Reference counted resource pointer.
57*4882a593Smuzhiyun * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
58*4882a593Smuzhiyun * to a resource.
59*4882a593Smuzhiyun * @new_backup_offset: Offset into the new backup mob for resources that can
60*4882a593Smuzhiyun * share MOBs.
61*4882a593Smuzhiyun * @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
62*4882a593Smuzhiyun * the command stream provides a mob bind operation.
63*4882a593Smuzhiyun * @switching_backup: The validation process is switching backup MOB.
64*4882a593Smuzhiyun * @first_usage: True iff the resource has been seen only once in the current
65*4882a593Smuzhiyun * validation batch.
66*4882a593Smuzhiyun * @reserved: Whether the resource is currently reserved by this process.
67*4882a593Smuzhiyun * @private: Optionally additional memory for caller-private data.
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Bit fields are used since these structures are allocated and freed in
70*4882a593Smuzhiyun * large numbers and space conservation is desired.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun struct vmw_validation_res_node {
73*4882a593Smuzhiyun struct list_head head;
74*4882a593Smuzhiyun struct drm_hash_item hash;
75*4882a593Smuzhiyun struct vmw_resource *res;
76*4882a593Smuzhiyun struct vmw_buffer_object *new_backup;
77*4882a593Smuzhiyun unsigned long new_backup_offset;
78*4882a593Smuzhiyun u32 no_buffer_needed : 1;
79*4882a593Smuzhiyun u32 switching_backup : 1;
80*4882a593Smuzhiyun u32 first_usage : 1;
81*4882a593Smuzhiyun u32 reserved : 1;
82*4882a593Smuzhiyun u32 dirty : 1;
83*4882a593Smuzhiyun u32 dirty_set : 1;
84*4882a593Smuzhiyun unsigned long private[0];
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun * vmw_validation_mem_alloc - Allocate kernel memory from the validation
89*4882a593Smuzhiyun * context based allocator
90*4882a593Smuzhiyun * @ctx: The validation context
91*4882a593Smuzhiyun * @size: The number of bytes to allocated.
92*4882a593Smuzhiyun *
93*4882a593Smuzhiyun * The memory allocated may not exceed PAGE_SIZE, and the returned
94*4882a593Smuzhiyun * address is aligned to sizeof(long). All memory allocated this way is
95*4882a593Smuzhiyun * reclaimed after validation when calling any of the exported functions:
96*4882a593Smuzhiyun * vmw_validation_unref_lists()
97*4882a593Smuzhiyun * vmw_validation_revert()
98*4882a593Smuzhiyun * vmw_validation_done()
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Return: Pointer to the allocated memory on success. NULL on failure.
101*4882a593Smuzhiyun */
vmw_validation_mem_alloc(struct vmw_validation_context * ctx,unsigned int size)102*4882a593Smuzhiyun void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
103*4882a593Smuzhiyun unsigned int size)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun void *addr;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun size = vmw_validation_align(size);
108*4882a593Smuzhiyun if (size > PAGE_SIZE)
109*4882a593Smuzhiyun return NULL;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (ctx->mem_size_left < size) {
112*4882a593Smuzhiyun struct page *page;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
115*4882a593Smuzhiyun int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (ret)
118*4882a593Smuzhiyun return NULL;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun ctx->vm_size_left += ctx->vm->gran;
121*4882a593Smuzhiyun ctx->total_mem += ctx->vm->gran;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun page = alloc_page(GFP_KERNEL | __GFP_ZERO);
125*4882a593Smuzhiyun if (!page)
126*4882a593Smuzhiyun return NULL;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if (ctx->vm)
129*4882a593Smuzhiyun ctx->vm_size_left -= PAGE_SIZE;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun list_add_tail(&page->lru, &ctx->page_list);
132*4882a593Smuzhiyun ctx->page_address = page_address(page);
133*4882a593Smuzhiyun ctx->mem_size_left = PAGE_SIZE;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun addr = (void *) (ctx->page_address + (PAGE_SIZE - ctx->mem_size_left));
137*4882a593Smuzhiyun ctx->mem_size_left -= size;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return addr;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * vmw_validation_mem_free - Free all memory allocated using
144*4882a593Smuzhiyun * vmw_validation_mem_alloc()
145*4882a593Smuzhiyun * @ctx: The validation context
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * All memory previously allocated for this context using
148*4882a593Smuzhiyun * vmw_validation_mem_alloc() is freed.
149*4882a593Smuzhiyun */
vmw_validation_mem_free(struct vmw_validation_context * ctx)150*4882a593Smuzhiyun static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct page *entry, *next;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun list_for_each_entry_safe(entry, next, &ctx->page_list, lru) {
155*4882a593Smuzhiyun list_del_init(&entry->lru);
156*4882a593Smuzhiyun __free_page(entry);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun ctx->mem_size_left = 0;
160*4882a593Smuzhiyun if (ctx->vm && ctx->total_mem) {
161*4882a593Smuzhiyun ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
162*4882a593Smuzhiyun ctx->total_mem = 0;
163*4882a593Smuzhiyun ctx->vm_size_left = 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun * vmw_validation_find_bo_dup - Find a duplicate buffer object entry in the
169*4882a593Smuzhiyun * validation context's lists.
170*4882a593Smuzhiyun * @ctx: The validation context to search.
171*4882a593Smuzhiyun * @vbo: The buffer object to search for.
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun * Return: Pointer to the struct vmw_validation_bo_node referencing the
174*4882a593Smuzhiyun * duplicate, or NULL if none found.
175*4882a593Smuzhiyun */
176*4882a593Smuzhiyun static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context * ctx,struct vmw_buffer_object * vbo)177*4882a593Smuzhiyun vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
178*4882a593Smuzhiyun struct vmw_buffer_object *vbo)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun struct vmw_validation_bo_node *bo_node = NULL;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (!ctx->merge_dups)
183*4882a593Smuzhiyun return NULL;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (ctx->ht) {
186*4882a593Smuzhiyun struct drm_hash_item *hash;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash))
189*4882a593Smuzhiyun bo_node = container_of(hash, typeof(*bo_node), hash);
190*4882a593Smuzhiyun } else {
191*4882a593Smuzhiyun struct vmw_validation_bo_node *entry;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun list_for_each_entry(entry, &ctx->bo_list, base.head) {
194*4882a593Smuzhiyun if (entry->base.bo == &vbo->base) {
195*4882a593Smuzhiyun bo_node = entry;
196*4882a593Smuzhiyun break;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return bo_node;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /**
205*4882a593Smuzhiyun * vmw_validation_find_res_dup - Find a duplicate resource entry in the
206*4882a593Smuzhiyun * validation context's lists.
207*4882a593Smuzhiyun * @ctx: The validation context to search.
208*4882a593Smuzhiyun * @vbo: The buffer object to search for.
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * Return: Pointer to the struct vmw_validation_bo_node referencing the
211*4882a593Smuzhiyun * duplicate, or NULL if none found.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun static struct vmw_validation_res_node *
vmw_validation_find_res_dup(struct vmw_validation_context * ctx,struct vmw_resource * res)214*4882a593Smuzhiyun vmw_validation_find_res_dup(struct vmw_validation_context *ctx,
215*4882a593Smuzhiyun struct vmw_resource *res)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct vmw_validation_res_node *res_node = NULL;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun if (!ctx->merge_dups)
220*4882a593Smuzhiyun return NULL;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (ctx->ht) {
223*4882a593Smuzhiyun struct drm_hash_item *hash;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash))
226*4882a593Smuzhiyun res_node = container_of(hash, typeof(*res_node), hash);
227*4882a593Smuzhiyun } else {
228*4882a593Smuzhiyun struct vmw_validation_res_node *entry;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun list_for_each_entry(entry, &ctx->resource_ctx_list, head) {
231*4882a593Smuzhiyun if (entry->res == res) {
232*4882a593Smuzhiyun res_node = entry;
233*4882a593Smuzhiyun goto out;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun list_for_each_entry(entry, &ctx->resource_list, head) {
238*4882a593Smuzhiyun if (entry->res == res) {
239*4882a593Smuzhiyun res_node = entry;
240*4882a593Smuzhiyun break;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun out:
246*4882a593Smuzhiyun return res_node;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /**
250*4882a593Smuzhiyun * vmw_validation_add_bo - Add a buffer object to the validation context.
251*4882a593Smuzhiyun * @ctx: The validation context.
252*4882a593Smuzhiyun * @vbo: The buffer object.
253*4882a593Smuzhiyun * @as_mob: Validate as mob, otherwise suitable for GMR operations.
254*4882a593Smuzhiyun * @cpu_blit: Validate in a page-mappable location.
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun * Return: Zero on success, negative error code otherwise.
257*4882a593Smuzhiyun */
vmw_validation_add_bo(struct vmw_validation_context * ctx,struct vmw_buffer_object * vbo,bool as_mob,bool cpu_blit)258*4882a593Smuzhiyun int vmw_validation_add_bo(struct vmw_validation_context *ctx,
259*4882a593Smuzhiyun struct vmw_buffer_object *vbo,
260*4882a593Smuzhiyun bool as_mob,
261*4882a593Smuzhiyun bool cpu_blit)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct vmw_validation_bo_node *bo_node;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun bo_node = vmw_validation_find_bo_dup(ctx, vbo);
266*4882a593Smuzhiyun if (bo_node) {
267*4882a593Smuzhiyun if (bo_node->as_mob != as_mob ||
268*4882a593Smuzhiyun bo_node->cpu_blit != cpu_blit) {
269*4882a593Smuzhiyun DRM_ERROR("Inconsistent buffer usage.\n");
270*4882a593Smuzhiyun return -EINVAL;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun } else {
273*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf;
274*4882a593Smuzhiyun int ret;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
277*4882a593Smuzhiyun if (!bo_node)
278*4882a593Smuzhiyun return -ENOMEM;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (ctx->ht) {
281*4882a593Smuzhiyun bo_node->hash.key = (unsigned long) vbo;
282*4882a593Smuzhiyun ret = drm_ht_insert_item(ctx->ht, &bo_node->hash);
283*4882a593Smuzhiyun if (ret) {
284*4882a593Smuzhiyun DRM_ERROR("Failed to initialize a buffer "
285*4882a593Smuzhiyun "validation entry.\n");
286*4882a593Smuzhiyun return ret;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun val_buf = &bo_node->base;
290*4882a593Smuzhiyun val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
291*4882a593Smuzhiyun if (!val_buf->bo)
292*4882a593Smuzhiyun return -ESRCH;
293*4882a593Smuzhiyun val_buf->num_shared = 0;
294*4882a593Smuzhiyun list_add_tail(&val_buf->head, &ctx->bo_list);
295*4882a593Smuzhiyun bo_node->as_mob = as_mob;
296*4882a593Smuzhiyun bo_node->cpu_blit = cpu_blit;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun return 0;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /**
303*4882a593Smuzhiyun * vmw_validation_add_resource - Add a resource to the validation context.
304*4882a593Smuzhiyun * @ctx: The validation context.
305*4882a593Smuzhiyun * @res: The resource.
306*4882a593Smuzhiyun * @priv_size: Size of private, additional metadata.
307*4882a593Smuzhiyun * @dirty: Whether to change dirty status.
308*4882a593Smuzhiyun * @p_node: Output pointer of additional metadata address.
309*4882a593Smuzhiyun * @first_usage: Whether this was the first time this resource was seen.
310*4882a593Smuzhiyun *
311*4882a593Smuzhiyun * Return: Zero on success, negative error code otherwise.
312*4882a593Smuzhiyun */
vmw_validation_add_resource(struct vmw_validation_context * ctx,struct vmw_resource * res,size_t priv_size,u32 dirty,void ** p_node,bool * first_usage)313*4882a593Smuzhiyun int vmw_validation_add_resource(struct vmw_validation_context *ctx,
314*4882a593Smuzhiyun struct vmw_resource *res,
315*4882a593Smuzhiyun size_t priv_size,
316*4882a593Smuzhiyun u32 dirty,
317*4882a593Smuzhiyun void **p_node,
318*4882a593Smuzhiyun bool *first_usage)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct vmw_validation_res_node *node;
321*4882a593Smuzhiyun int ret;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun node = vmw_validation_find_res_dup(ctx, res);
324*4882a593Smuzhiyun if (node) {
325*4882a593Smuzhiyun node->first_usage = 0;
326*4882a593Smuzhiyun goto out_fill;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
330*4882a593Smuzhiyun if (!node) {
331*4882a593Smuzhiyun VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
332*4882a593Smuzhiyun return -ENOMEM;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (ctx->ht) {
336*4882a593Smuzhiyun node->hash.key = (unsigned long) res;
337*4882a593Smuzhiyun ret = drm_ht_insert_item(ctx->ht, &node->hash);
338*4882a593Smuzhiyun if (ret) {
339*4882a593Smuzhiyun DRM_ERROR("Failed to initialize a resource validation "
340*4882a593Smuzhiyun "entry.\n");
341*4882a593Smuzhiyun return ret;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun node->res = vmw_resource_reference_unless_doomed(res);
345*4882a593Smuzhiyun if (!node->res)
346*4882a593Smuzhiyun return -ESRCH;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun node->first_usage = 1;
349*4882a593Smuzhiyun if (!res->dev_priv->has_mob) {
350*4882a593Smuzhiyun list_add_tail(&node->head, &ctx->resource_list);
351*4882a593Smuzhiyun } else {
352*4882a593Smuzhiyun switch (vmw_res_type(res)) {
353*4882a593Smuzhiyun case vmw_res_context:
354*4882a593Smuzhiyun case vmw_res_dx_context:
355*4882a593Smuzhiyun list_add(&node->head, &ctx->resource_ctx_list);
356*4882a593Smuzhiyun break;
357*4882a593Smuzhiyun case vmw_res_cotable:
358*4882a593Smuzhiyun list_add_tail(&node->head, &ctx->resource_ctx_list);
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun default:
361*4882a593Smuzhiyun list_add_tail(&node->head, &ctx->resource_list);
362*4882a593Smuzhiyun break;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun out_fill:
367*4882a593Smuzhiyun if (dirty) {
368*4882a593Smuzhiyun node->dirty_set = 1;
369*4882a593Smuzhiyun /* Overwriting previous information here is intentional! */
370*4882a593Smuzhiyun node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun if (first_usage)
373*4882a593Smuzhiyun *first_usage = node->first_usage;
374*4882a593Smuzhiyun if (p_node)
375*4882a593Smuzhiyun *p_node = &node->private;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun return 0;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /**
381*4882a593Smuzhiyun * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
382*4882a593Smuzhiyun * validation.
383*4882a593Smuzhiyun * @ctx: The validation context.
384*4882a593Smuzhiyun * @val_private: The additional meta-data pointer returned when the
385*4882a593Smuzhiyun * resource was registered with the validation context. Used to identify
386*4882a593Smuzhiyun * the resource.
387*4882a593Smuzhiyun * @dirty: Dirty information VMW_RES_DIRTY_XX
388*4882a593Smuzhiyun */
vmw_validation_res_set_dirty(struct vmw_validation_context * ctx,void * val_private,u32 dirty)389*4882a593Smuzhiyun void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
390*4882a593Smuzhiyun void *val_private, u32 dirty)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct vmw_validation_res_node *val;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (!dirty)
395*4882a593Smuzhiyun return;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun val = container_of(val_private, typeof(*val), private);
398*4882a593Smuzhiyun val->dirty_set = 1;
399*4882a593Smuzhiyun /* Overwriting previous information here is intentional! */
400*4882a593Smuzhiyun val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /**
404*4882a593Smuzhiyun * vmw_validation_res_switch_backup - Register a backup MOB switch during
405*4882a593Smuzhiyun * validation.
406*4882a593Smuzhiyun * @ctx: The validation context.
407*4882a593Smuzhiyun * @val_private: The additional meta-data pointer returned when the
408*4882a593Smuzhiyun * resource was registered with the validation context. Used to identify
409*4882a593Smuzhiyun * the resource.
410*4882a593Smuzhiyun * @vbo: The new backup buffer object MOB. This buffer object needs to have
411*4882a593Smuzhiyun * already been registered with the validation context.
412*4882a593Smuzhiyun * @backup_offset: Offset into the new backup MOB.
413*4882a593Smuzhiyun */
vmw_validation_res_switch_backup(struct vmw_validation_context * ctx,void * val_private,struct vmw_buffer_object * vbo,unsigned long backup_offset)414*4882a593Smuzhiyun void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
415*4882a593Smuzhiyun void *val_private,
416*4882a593Smuzhiyun struct vmw_buffer_object *vbo,
417*4882a593Smuzhiyun unsigned long backup_offset)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct vmw_validation_res_node *val;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun val = container_of(val_private, typeof(*val), private);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun val->switching_backup = 1;
424*4882a593Smuzhiyun if (val->first_usage)
425*4882a593Smuzhiyun val->no_buffer_needed = 1;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun val->new_backup = vbo;
428*4882a593Smuzhiyun val->new_backup_offset = backup_offset;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /**
432*4882a593Smuzhiyun * vmw_validation_res_reserve - Reserve all resources registered with this
433*4882a593Smuzhiyun * validation context.
434*4882a593Smuzhiyun * @ctx: The validation context.
435*4882a593Smuzhiyun * @intr: Use interruptible waits when possible.
436*4882a593Smuzhiyun *
437*4882a593Smuzhiyun * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
438*4882a593Smuzhiyun * code on failure.
439*4882a593Smuzhiyun */
vmw_validation_res_reserve(struct vmw_validation_context * ctx,bool intr)440*4882a593Smuzhiyun int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
441*4882a593Smuzhiyun bool intr)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun struct vmw_validation_res_node *val;
444*4882a593Smuzhiyun int ret = 0;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun list_for_each_entry(val, &ctx->resource_list, head) {
449*4882a593Smuzhiyun struct vmw_resource *res = val->res;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun ret = vmw_resource_reserve(res, intr, val->no_buffer_needed);
452*4882a593Smuzhiyun if (ret)
453*4882a593Smuzhiyun goto out_unreserve;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun val->reserved = 1;
456*4882a593Smuzhiyun if (res->backup) {
457*4882a593Smuzhiyun struct vmw_buffer_object *vbo = res->backup;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun ret = vmw_validation_add_bo
460*4882a593Smuzhiyun (ctx, vbo, vmw_resource_needs_backup(res),
461*4882a593Smuzhiyun false);
462*4882a593Smuzhiyun if (ret)
463*4882a593Smuzhiyun goto out_unreserve;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (val->switching_backup && val->new_backup &&
467*4882a593Smuzhiyun res->coherent) {
468*4882a593Smuzhiyun struct vmw_validation_bo_node *bo_node =
469*4882a593Smuzhiyun vmw_validation_find_bo_dup(ctx,
470*4882a593Smuzhiyun val->new_backup);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (WARN_ON(!bo_node)) {
473*4882a593Smuzhiyun ret = -EINVAL;
474*4882a593Smuzhiyun goto out_unreserve;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun bo_node->coherent_count++;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun return 0;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun out_unreserve:
483*4882a593Smuzhiyun vmw_validation_res_unreserve(ctx, true);
484*4882a593Smuzhiyun return ret;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun /**
488*4882a593Smuzhiyun * vmw_validation_res_unreserve - Unreserve all reserved resources
489*4882a593Smuzhiyun * registered with this validation context.
490*4882a593Smuzhiyun * @ctx: The validation context.
491*4882a593Smuzhiyun * @backoff: Whether this is a backoff- of a commit-type operation. This
492*4882a593Smuzhiyun * is used to determine whether to switch backup MOBs or not.
493*4882a593Smuzhiyun */
vmw_validation_res_unreserve(struct vmw_validation_context * ctx,bool backoff)494*4882a593Smuzhiyun void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
495*4882a593Smuzhiyun bool backoff)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun struct vmw_validation_res_node *val;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
500*4882a593Smuzhiyun if (backoff)
501*4882a593Smuzhiyun list_for_each_entry(val, &ctx->resource_list, head) {
502*4882a593Smuzhiyun if (val->reserved)
503*4882a593Smuzhiyun vmw_resource_unreserve(val->res,
504*4882a593Smuzhiyun false, false, false,
505*4882a593Smuzhiyun NULL, 0);
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun else
508*4882a593Smuzhiyun list_for_each_entry(val, &ctx->resource_list, head) {
509*4882a593Smuzhiyun if (val->reserved)
510*4882a593Smuzhiyun vmw_resource_unreserve(val->res,
511*4882a593Smuzhiyun val->dirty_set,
512*4882a593Smuzhiyun val->dirty,
513*4882a593Smuzhiyun val->switching_backup,
514*4882a593Smuzhiyun val->new_backup,
515*4882a593Smuzhiyun val->new_backup_offset);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /**
520*4882a593Smuzhiyun * vmw_validation_bo_validate_single - Validate a single buffer object.
521*4882a593Smuzhiyun * @bo: The TTM buffer object base.
522*4882a593Smuzhiyun * @interruptible: Whether to perform waits interruptible if possible.
523*4882a593Smuzhiyun * @validate_as_mob: Whether to validate in MOB memory.
524*4882a593Smuzhiyun *
525*4882a593Smuzhiyun * Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
526*4882a593Smuzhiyun * code on failure.
527*4882a593Smuzhiyun */
vmw_validation_bo_validate_single(struct ttm_buffer_object * bo,bool interruptible,bool validate_as_mob)528*4882a593Smuzhiyun int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
529*4882a593Smuzhiyun bool interruptible,
530*4882a593Smuzhiyun bool validate_as_mob)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct vmw_buffer_object *vbo =
533*4882a593Smuzhiyun container_of(bo, struct vmw_buffer_object, base);
534*4882a593Smuzhiyun struct ttm_operation_ctx ctx = {
535*4882a593Smuzhiyun .interruptible = interruptible,
536*4882a593Smuzhiyun .no_wait_gpu = false
537*4882a593Smuzhiyun };
538*4882a593Smuzhiyun int ret;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (atomic_read(&vbo->cpu_writers))
541*4882a593Smuzhiyun return -EBUSY;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (vbo->pin_count > 0)
544*4882a593Smuzhiyun return 0;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun if (validate_as_mob)
547*4882a593Smuzhiyun return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /**
550*4882a593Smuzhiyun * Put BO in VRAM if there is space, otherwise as a GMR.
551*4882a593Smuzhiyun * If there is no space in VRAM and GMR ids are all used up,
552*4882a593Smuzhiyun * start evicting GMRs to make room. If the DMA buffer can't be
553*4882a593Smuzhiyun * used as a GMR, this will return -ENOMEM.
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
557*4882a593Smuzhiyun if (ret == 0 || ret == -ERESTARTSYS)
558*4882a593Smuzhiyun return ret;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /**
561*4882a593Smuzhiyun * If that failed, try VRAM again, this time evicting
562*4882a593Smuzhiyun * previous contents.
563*4882a593Smuzhiyun */
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
566*4882a593Smuzhiyun return ret;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /**
570*4882a593Smuzhiyun * vmw_validation_bo_validate - Validate all buffer objects registered with
571*4882a593Smuzhiyun * the validation context.
572*4882a593Smuzhiyun * @ctx: The validation context.
573*4882a593Smuzhiyun * @intr: Whether to perform waits interruptible if possible.
574*4882a593Smuzhiyun *
575*4882a593Smuzhiyun * Return: Zero on success, -ERESTARTSYS if interrupted,
576*4882a593Smuzhiyun * negative error code on failure.
577*4882a593Smuzhiyun */
vmw_validation_bo_validate(struct vmw_validation_context * ctx,bool intr)578*4882a593Smuzhiyun int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct vmw_validation_bo_node *entry;
581*4882a593Smuzhiyun int ret;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun list_for_each_entry(entry, &ctx->bo_list, base.head) {
584*4882a593Smuzhiyun struct vmw_buffer_object *vbo =
585*4882a593Smuzhiyun container_of(entry->base.bo, typeof(*vbo), base);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (entry->cpu_blit) {
588*4882a593Smuzhiyun struct ttm_operation_ctx ctx = {
589*4882a593Smuzhiyun .interruptible = intr,
590*4882a593Smuzhiyun .no_wait_gpu = false
591*4882a593Smuzhiyun };
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun ret = ttm_bo_validate(entry->base.bo,
594*4882a593Smuzhiyun &vmw_nonfixed_placement, &ctx);
595*4882a593Smuzhiyun } else {
596*4882a593Smuzhiyun ret = vmw_validation_bo_validate_single
597*4882a593Smuzhiyun (entry->base.bo, intr, entry->as_mob);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun if (ret)
600*4882a593Smuzhiyun return ret;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /*
603*4882a593Smuzhiyun * Rather than having the resource code allocating the bo
604*4882a593Smuzhiyun * dirty tracker in resource_unreserve() where we can't fail,
605*4882a593Smuzhiyun * Do it here when validating the buffer object.
606*4882a593Smuzhiyun */
607*4882a593Smuzhiyun if (entry->coherent_count) {
608*4882a593Smuzhiyun unsigned int coherent_count = entry->coherent_count;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun while (coherent_count) {
611*4882a593Smuzhiyun ret = vmw_bo_dirty_add(vbo);
612*4882a593Smuzhiyun if (ret)
613*4882a593Smuzhiyun return ret;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun coherent_count--;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun entry->coherent_count -= coherent_count;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (vbo->dirty)
621*4882a593Smuzhiyun vmw_bo_dirty_scan(vbo);
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun return 0;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /**
627*4882a593Smuzhiyun * vmw_validation_res_validate - Validate all resources registered with the
628*4882a593Smuzhiyun * validation context.
629*4882a593Smuzhiyun * @ctx: The validation context.
630*4882a593Smuzhiyun * @intr: Whether to perform waits interruptible if possible.
631*4882a593Smuzhiyun *
632*4882a593Smuzhiyun * Before this function is called, all resource backup buffers must have
633*4882a593Smuzhiyun * been validated.
634*4882a593Smuzhiyun *
635*4882a593Smuzhiyun * Return: Zero on success, -ERESTARTSYS if interrupted,
636*4882a593Smuzhiyun * negative error code on failure.
637*4882a593Smuzhiyun */
vmw_validation_res_validate(struct vmw_validation_context * ctx,bool intr)638*4882a593Smuzhiyun int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun struct vmw_validation_res_node *val;
641*4882a593Smuzhiyun int ret;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun list_for_each_entry(val, &ctx->resource_list, head) {
644*4882a593Smuzhiyun struct vmw_resource *res = val->res;
645*4882a593Smuzhiyun struct vmw_buffer_object *backup = res->backup;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun ret = vmw_resource_validate(res, intr, val->dirty_set &&
648*4882a593Smuzhiyun val->dirty);
649*4882a593Smuzhiyun if (ret) {
650*4882a593Smuzhiyun if (ret != -ERESTARTSYS)
651*4882a593Smuzhiyun DRM_ERROR("Failed to validate resource.\n");
652*4882a593Smuzhiyun return ret;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* Check if the resource switched backup buffer */
656*4882a593Smuzhiyun if (backup && res->backup && (backup != res->backup)) {
657*4882a593Smuzhiyun struct vmw_buffer_object *vbo = res->backup;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun ret = vmw_validation_add_bo
660*4882a593Smuzhiyun (ctx, vbo, vmw_resource_needs_backup(res),
661*4882a593Smuzhiyun false);
662*4882a593Smuzhiyun if (ret)
663*4882a593Smuzhiyun return ret;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun return 0;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /**
670*4882a593Smuzhiyun * vmw_validation_drop_ht - Reset the hash table used for duplicate finding
671*4882a593Smuzhiyun * and unregister it from this validation context.
672*4882a593Smuzhiyun * @ctx: The validation context.
673*4882a593Smuzhiyun *
674*4882a593Smuzhiyun * The hash table used for duplicate finding is an expensive resource and
675*4882a593Smuzhiyun * may be protected by mutexes that may cause deadlocks during resource
676*4882a593Smuzhiyun * unreferencing if held. After resource- and buffer object registering,
677*4882a593Smuzhiyun * there is no longer any use for this hash table, so allow freeing it
678*4882a593Smuzhiyun * either to shorten any mutex locking time, or before resources- and
679*4882a593Smuzhiyun * buffer objects are freed during validation context cleanup.
680*4882a593Smuzhiyun */
vmw_validation_drop_ht(struct vmw_validation_context * ctx)681*4882a593Smuzhiyun void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun struct vmw_validation_bo_node *entry;
684*4882a593Smuzhiyun struct vmw_validation_res_node *val;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if (!ctx->ht)
687*4882a593Smuzhiyun return;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun list_for_each_entry(entry, &ctx->bo_list, base.head)
690*4882a593Smuzhiyun (void) drm_ht_remove_item(ctx->ht, &entry->hash);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun list_for_each_entry(val, &ctx->resource_list, head)
693*4882a593Smuzhiyun (void) drm_ht_remove_item(ctx->ht, &val->hash);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun list_for_each_entry(val, &ctx->resource_ctx_list, head)
696*4882a593Smuzhiyun (void) drm_ht_remove_item(ctx->ht, &val->hash);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun ctx->ht = NULL;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /**
702*4882a593Smuzhiyun * vmw_validation_unref_lists - Unregister previously registered buffer
703*4882a593Smuzhiyun * object and resources.
704*4882a593Smuzhiyun * @ctx: The validation context.
705*4882a593Smuzhiyun *
706*4882a593Smuzhiyun * Note that this function may cause buffer object- and resource destructors
707*4882a593Smuzhiyun * to be invoked.
708*4882a593Smuzhiyun */
vmw_validation_unref_lists(struct vmw_validation_context * ctx)709*4882a593Smuzhiyun void vmw_validation_unref_lists(struct vmw_validation_context *ctx)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun struct vmw_validation_bo_node *entry;
712*4882a593Smuzhiyun struct vmw_validation_res_node *val;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun list_for_each_entry(entry, &ctx->bo_list, base.head) {
715*4882a593Smuzhiyun ttm_bo_put(entry->base.bo);
716*4882a593Smuzhiyun entry->base.bo = NULL;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
720*4882a593Smuzhiyun list_for_each_entry(val, &ctx->resource_list, head)
721*4882a593Smuzhiyun vmw_resource_unreference(&val->res);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /*
724*4882a593Smuzhiyun * No need to detach each list entry since they are all freed with
725*4882a593Smuzhiyun * vmw_validation_free_mem. Just make the inaccessible.
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun INIT_LIST_HEAD(&ctx->bo_list);
728*4882a593Smuzhiyun INIT_LIST_HEAD(&ctx->resource_list);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun vmw_validation_mem_free(ctx);
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /**
734*4882a593Smuzhiyun * vmw_validation_prepare - Prepare a validation context for command
735*4882a593Smuzhiyun * submission.
736*4882a593Smuzhiyun * @ctx: The validation context.
737*4882a593Smuzhiyun * @mutex: The mutex used to protect resource reservation.
738*4882a593Smuzhiyun * @intr: Whether to perform waits interruptible if possible.
739*4882a593Smuzhiyun *
740*4882a593Smuzhiyun * Note that the single reservation mutex @mutex is an unfortunate
741*4882a593Smuzhiyun * construct. Ideally resource reservation should be moved to per-resource
742*4882a593Smuzhiyun * ww_mutexes.
743*4882a593Smuzhiyun * If this functions doesn't return Zero to indicate success, all resources
744*4882a593Smuzhiyun * are left unreserved but still referenced.
745*4882a593Smuzhiyun * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
746*4882a593Smuzhiyun * on error.
747*4882a593Smuzhiyun */
vmw_validation_prepare(struct vmw_validation_context * ctx,struct mutex * mutex,bool intr)748*4882a593Smuzhiyun int vmw_validation_prepare(struct vmw_validation_context *ctx,
749*4882a593Smuzhiyun struct mutex *mutex,
750*4882a593Smuzhiyun bool intr)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun int ret = 0;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun if (mutex) {
755*4882a593Smuzhiyun if (intr)
756*4882a593Smuzhiyun ret = mutex_lock_interruptible(mutex);
757*4882a593Smuzhiyun else
758*4882a593Smuzhiyun mutex_lock(mutex);
759*4882a593Smuzhiyun if (ret)
760*4882a593Smuzhiyun return -ERESTARTSYS;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun ctx->res_mutex = mutex;
764*4882a593Smuzhiyun ret = vmw_validation_res_reserve(ctx, intr);
765*4882a593Smuzhiyun if (ret)
766*4882a593Smuzhiyun goto out_no_res_reserve;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun ret = vmw_validation_bo_reserve(ctx, intr);
769*4882a593Smuzhiyun if (ret)
770*4882a593Smuzhiyun goto out_no_bo_reserve;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun ret = vmw_validation_bo_validate(ctx, intr);
773*4882a593Smuzhiyun if (ret)
774*4882a593Smuzhiyun goto out_no_validate;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun ret = vmw_validation_res_validate(ctx, intr);
777*4882a593Smuzhiyun if (ret)
778*4882a593Smuzhiyun goto out_no_validate;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun return 0;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun out_no_validate:
783*4882a593Smuzhiyun vmw_validation_bo_backoff(ctx);
784*4882a593Smuzhiyun out_no_bo_reserve:
785*4882a593Smuzhiyun vmw_validation_res_unreserve(ctx, true);
786*4882a593Smuzhiyun out_no_res_reserve:
787*4882a593Smuzhiyun if (mutex)
788*4882a593Smuzhiyun mutex_unlock(mutex);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun return ret;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /**
794*4882a593Smuzhiyun * vmw_validation_revert - Revert validation actions if command submission
795*4882a593Smuzhiyun * failed.
796*4882a593Smuzhiyun *
797*4882a593Smuzhiyun * @ctx: The validation context.
798*4882a593Smuzhiyun *
799*4882a593Smuzhiyun * The caller still needs to unref resources after a call to this function.
800*4882a593Smuzhiyun */
vmw_validation_revert(struct vmw_validation_context * ctx)801*4882a593Smuzhiyun void vmw_validation_revert(struct vmw_validation_context *ctx)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun vmw_validation_bo_backoff(ctx);
804*4882a593Smuzhiyun vmw_validation_res_unreserve(ctx, true);
805*4882a593Smuzhiyun if (ctx->res_mutex)
806*4882a593Smuzhiyun mutex_unlock(ctx->res_mutex);
807*4882a593Smuzhiyun vmw_validation_unref_lists(ctx);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /**
811*4882a593Smuzhiyun * vmw_validation_cone - Commit validation actions after command submission
812*4882a593Smuzhiyun * success.
813*4882a593Smuzhiyun * @ctx: The validation context.
814*4882a593Smuzhiyun * @fence: Fence with which to fence all buffer objects taking part in the
815*4882a593Smuzhiyun * command submission.
816*4882a593Smuzhiyun *
817*4882a593Smuzhiyun * The caller does NOT need to unref resources after a call to this function.
818*4882a593Smuzhiyun */
vmw_validation_done(struct vmw_validation_context * ctx,struct vmw_fence_obj * fence)819*4882a593Smuzhiyun void vmw_validation_done(struct vmw_validation_context *ctx,
820*4882a593Smuzhiyun struct vmw_fence_obj *fence)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun vmw_validation_bo_fence(ctx, fence);
823*4882a593Smuzhiyun vmw_validation_res_unreserve(ctx, false);
824*4882a593Smuzhiyun if (ctx->res_mutex)
825*4882a593Smuzhiyun mutex_unlock(ctx->res_mutex);
826*4882a593Smuzhiyun vmw_validation_unref_lists(ctx);
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /**
830*4882a593Smuzhiyun * vmw_validation_preload_bo - Preload the validation memory allocator for a
831*4882a593Smuzhiyun * call to vmw_validation_add_bo().
832*4882a593Smuzhiyun * @ctx: Pointer to the validation context.
833*4882a593Smuzhiyun *
834*4882a593Smuzhiyun * Iff this function returns successfully, the next call to
835*4882a593Smuzhiyun * vmw_validation_add_bo() is guaranteed not to sleep. An error is not fatal
836*4882a593Smuzhiyun * but voids the guarantee.
837*4882a593Smuzhiyun *
838*4882a593Smuzhiyun * Returns: Zero if successful, %-EINVAL otherwise.
839*4882a593Smuzhiyun */
vmw_validation_preload_bo(struct vmw_validation_context * ctx)840*4882a593Smuzhiyun int vmw_validation_preload_bo(struct vmw_validation_context *ctx)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun unsigned int size = sizeof(struct vmw_validation_bo_node);
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (!vmw_validation_mem_alloc(ctx, size))
845*4882a593Smuzhiyun return -ENOMEM;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun ctx->mem_size_left += size;
848*4882a593Smuzhiyun return 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /**
852*4882a593Smuzhiyun * vmw_validation_preload_res - Preload the validation memory allocator for a
853*4882a593Smuzhiyun * call to vmw_validation_add_res().
854*4882a593Smuzhiyun * @ctx: Pointer to the validation context.
855*4882a593Smuzhiyun * @size: Size of the validation node extra data. See below.
856*4882a593Smuzhiyun *
857*4882a593Smuzhiyun * Iff this function returns successfully, the next call to
858*4882a593Smuzhiyun * vmw_validation_add_res() with the same or smaller @size is guaranteed not to
859*4882a593Smuzhiyun * sleep. An error is not fatal but voids the guarantee.
860*4882a593Smuzhiyun *
861*4882a593Smuzhiyun * Returns: Zero if successful, %-EINVAL otherwise.
862*4882a593Smuzhiyun */
vmw_validation_preload_res(struct vmw_validation_context * ctx,unsigned int size)863*4882a593Smuzhiyun int vmw_validation_preload_res(struct vmw_validation_context *ctx,
864*4882a593Smuzhiyun unsigned int size)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun size = vmw_validation_align(sizeof(struct vmw_validation_res_node) +
867*4882a593Smuzhiyun size) +
868*4882a593Smuzhiyun vmw_validation_align(sizeof(struct vmw_validation_bo_node));
869*4882a593Smuzhiyun if (!vmw_validation_mem_alloc(ctx, size))
870*4882a593Smuzhiyun return -ENOMEM;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun ctx->mem_size_left += size;
873*4882a593Smuzhiyun return 0;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /**
877*4882a593Smuzhiyun * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
878*4882a593Smuzhiyun * validation context
879*4882a593Smuzhiyun * @ctx: The validation context
880*4882a593Smuzhiyun *
881*4882a593Smuzhiyun * This function unreserves the buffer objects previously reserved using
882*4882a593Smuzhiyun * vmw_validation_bo_reserve. It's typically used as part of an error path
883*4882a593Smuzhiyun */
vmw_validation_bo_backoff(struct vmw_validation_context * ctx)884*4882a593Smuzhiyun void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun struct vmw_validation_bo_node *entry;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /*
889*4882a593Smuzhiyun * Switching coherent resource backup buffers failed.
890*4882a593Smuzhiyun * Release corresponding buffer object dirty trackers.
891*4882a593Smuzhiyun */
892*4882a593Smuzhiyun list_for_each_entry(entry, &ctx->bo_list, base.head) {
893*4882a593Smuzhiyun if (entry->coherent_count) {
894*4882a593Smuzhiyun unsigned int coherent_count = entry->coherent_count;
895*4882a593Smuzhiyun struct vmw_buffer_object *vbo =
896*4882a593Smuzhiyun container_of(entry->base.bo, typeof(*vbo),
897*4882a593Smuzhiyun base);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun while (coherent_count--)
900*4882a593Smuzhiyun vmw_bo_dirty_release(vbo);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
905*4882a593Smuzhiyun }
906