xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun  * the following conditions:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun  * of the Software.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  **************************************************************************/
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <linux/dmapool.h>
29*4882a593Smuzhiyun #include <linux/pci.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <drm/ttm/ttm_bo_api.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include "vmwgfx_drv.h"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun  * Size of inline command buffers. Try to make sure that a page size is a
37*4882a593Smuzhiyun  * multiple of the DMA pool allocation size.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define VMW_CMDBUF_INLINE_ALIGN 64
40*4882a593Smuzhiyun #define VMW_CMDBUF_INLINE_SIZE \
41*4882a593Smuzhiyun 	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /**
44*4882a593Smuzhiyun  * struct vmw_cmdbuf_context - Command buffer context queues
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * @submitted: List of command buffers that have been submitted to the
47*4882a593Smuzhiyun  * manager but not yet submitted to hardware.
48*4882a593Smuzhiyun  * @hw_submitted: List of command buffers submitted to hardware.
49*4882a593Smuzhiyun  * @preempted: List of preempted command buffers.
50*4882a593Smuzhiyun  * @num_hw_submitted: Number of buffers currently being processed by hardware
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun struct vmw_cmdbuf_context {
53*4882a593Smuzhiyun 	struct list_head submitted;
54*4882a593Smuzhiyun 	struct list_head hw_submitted;
55*4882a593Smuzhiyun 	struct list_head preempted;
56*4882a593Smuzhiyun 	unsigned num_hw_submitted;
57*4882a593Smuzhiyun 	bool block_submission;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun  * struct vmw_cmdbuf_man: - Command buffer manager
62*4882a593Smuzhiyun  *
63*4882a593Smuzhiyun  * @cur_mutex: Mutex protecting the command buffer used for incremental small
64*4882a593Smuzhiyun  * kernel command submissions, @cur.
65*4882a593Smuzhiyun  * @space_mutex: Mutex to protect against starvation when we allocate
66*4882a593Smuzhiyun  * main pool buffer space.
67*4882a593Smuzhiyun  * @error_mutex: Mutex to serialize the work queue error handling.
68*4882a593Smuzhiyun  * Note this is not needed if the same workqueue handler
69*4882a593Smuzhiyun  * can't race with itself...
70*4882a593Smuzhiyun  * @work: A struct work_struct implementeing command buffer error handling.
71*4882a593Smuzhiyun  * Immutable.
72*4882a593Smuzhiyun  * @dev_priv: Pointer to the device private struct. Immutable.
73*4882a593Smuzhiyun  * @ctx: Array of command buffer context queues. The queues and the context
74*4882a593Smuzhiyun  * data is protected by @lock.
75*4882a593Smuzhiyun  * @error: List of command buffers that have caused device errors.
76*4882a593Smuzhiyun  * Protected by @lock.
77*4882a593Smuzhiyun  * @mm: Range manager for the command buffer space. Manager allocations and
78*4882a593Smuzhiyun  * frees are protected by @lock.
79*4882a593Smuzhiyun  * @cmd_space: Buffer object for the command buffer space, unless we were
80*4882a593Smuzhiyun  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
81*4882a593Smuzhiyun  * @map_obj: Mapping state for @cmd_space. Immutable.
82*4882a593Smuzhiyun  * @map: Pointer to command buffer space. May be a mapped buffer object or
83*4882a593Smuzhiyun  * a contigous coherent DMA memory allocation. Immutable.
84*4882a593Smuzhiyun  * @cur: Command buffer for small kernel command submissions. Protected by
85*4882a593Smuzhiyun  * the @cur_mutex.
86*4882a593Smuzhiyun  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
87*4882a593Smuzhiyun  * @default_size: Default size for the @cur command buffer. Immutable.
88*4882a593Smuzhiyun  * @max_hw_submitted: Max number of in-flight command buffers the device can
89*4882a593Smuzhiyun  * handle. Immutable.
90*4882a593Smuzhiyun  * @lock: Spinlock protecting command submission queues.
91*4882a593Smuzhiyun  * @header: Pool of DMA memory for device command buffer headers.
92*4882a593Smuzhiyun  * Internal protection.
93*4882a593Smuzhiyun  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
94*4882a593Smuzhiyun  * space for inline data. Internal protection.
95*4882a593Smuzhiyun  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
96*4882a593Smuzhiyun  * space.
97*4882a593Smuzhiyun  * @idle_queue: Wait queue for processes waiting for command buffer idle.
98*4882a593Smuzhiyun  * @irq_on: Whether the process function has requested irq to be turned on.
99*4882a593Smuzhiyun  * Protected by @lock.
100*4882a593Smuzhiyun  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
101*4882a593Smuzhiyun  * allocation. Immutable.
102*4882a593Smuzhiyun  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
103*4882a593Smuzhiyun  * Typically this is false only during bootstrap.
104*4882a593Smuzhiyun  * @handle: DMA address handle for the command buffer space if @using_mob is
105*4882a593Smuzhiyun  * false. Immutable.
106*4882a593Smuzhiyun  * @size: The size of the command buffer space. Immutable.
107*4882a593Smuzhiyun  * @num_contexts: Number of contexts actually enabled.
108*4882a593Smuzhiyun  */
109*4882a593Smuzhiyun struct vmw_cmdbuf_man {
110*4882a593Smuzhiyun 	struct mutex cur_mutex;
111*4882a593Smuzhiyun 	struct mutex space_mutex;
112*4882a593Smuzhiyun 	struct mutex error_mutex;
113*4882a593Smuzhiyun 	struct work_struct work;
114*4882a593Smuzhiyun 	struct vmw_private *dev_priv;
115*4882a593Smuzhiyun 	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
116*4882a593Smuzhiyun 	struct list_head error;
117*4882a593Smuzhiyun 	struct drm_mm mm;
118*4882a593Smuzhiyun 	struct ttm_buffer_object *cmd_space;
119*4882a593Smuzhiyun 	struct ttm_bo_kmap_obj map_obj;
120*4882a593Smuzhiyun 	u8 *map;
121*4882a593Smuzhiyun 	struct vmw_cmdbuf_header *cur;
122*4882a593Smuzhiyun 	size_t cur_pos;
123*4882a593Smuzhiyun 	size_t default_size;
124*4882a593Smuzhiyun 	unsigned max_hw_submitted;
125*4882a593Smuzhiyun 	spinlock_t lock;
126*4882a593Smuzhiyun 	struct dma_pool *headers;
127*4882a593Smuzhiyun 	struct dma_pool *dheaders;
128*4882a593Smuzhiyun 	wait_queue_head_t alloc_queue;
129*4882a593Smuzhiyun 	wait_queue_head_t idle_queue;
130*4882a593Smuzhiyun 	bool irq_on;
131*4882a593Smuzhiyun 	bool using_mob;
132*4882a593Smuzhiyun 	bool has_pool;
133*4882a593Smuzhiyun 	dma_addr_t handle;
134*4882a593Smuzhiyun 	size_t size;
135*4882a593Smuzhiyun 	u32 num_contexts;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun  * struct vmw_cmdbuf_header - Command buffer metadata
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  * @man: The command buffer manager.
142*4882a593Smuzhiyun  * @cb_header: Device command buffer header, allocated from a DMA pool.
143*4882a593Smuzhiyun  * @cb_context: The device command buffer context.
144*4882a593Smuzhiyun  * @list: List head for attaching to the manager lists.
145*4882a593Smuzhiyun  * @node: The range manager node.
146*4882a593Smuzhiyun  * @handle. The DMA address of @cb_header. Handed to the device on command
147*4882a593Smuzhiyun  * buffer submission.
148*4882a593Smuzhiyun  * @cmd: Pointer to the command buffer space of this buffer.
149*4882a593Smuzhiyun  * @size: Size of the command buffer space of this buffer.
150*4882a593Smuzhiyun  * @reserved: Reserved space of this buffer.
151*4882a593Smuzhiyun  * @inline_space: Whether inline command buffer space is used.
152*4882a593Smuzhiyun  */
153*4882a593Smuzhiyun struct vmw_cmdbuf_header {
154*4882a593Smuzhiyun 	struct vmw_cmdbuf_man *man;
155*4882a593Smuzhiyun 	SVGACBHeader *cb_header;
156*4882a593Smuzhiyun 	SVGACBContext cb_context;
157*4882a593Smuzhiyun 	struct list_head list;
158*4882a593Smuzhiyun 	struct drm_mm_node node;
159*4882a593Smuzhiyun 	dma_addr_t handle;
160*4882a593Smuzhiyun 	u8 *cmd;
161*4882a593Smuzhiyun 	size_t size;
162*4882a593Smuzhiyun 	size_t reserved;
163*4882a593Smuzhiyun 	bool inline_space;
164*4882a593Smuzhiyun };
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
168*4882a593Smuzhiyun  * command buffer space.
169*4882a593Smuzhiyun  *
170*4882a593Smuzhiyun  * @cb_header: Device command buffer header.
171*4882a593Smuzhiyun  * @cmd: Inline command buffer space.
172*4882a593Smuzhiyun  */
173*4882a593Smuzhiyun struct vmw_cmdbuf_dheader {
174*4882a593Smuzhiyun 	SVGACBHeader cb_header;
175*4882a593Smuzhiyun 	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  * @page_size: Size of requested command buffer space in pages.
182*4882a593Smuzhiyun  * @node: Pointer to the range manager node.
183*4882a593Smuzhiyun  * @done: True if this allocation has succeeded.
184*4882a593Smuzhiyun  */
185*4882a593Smuzhiyun struct vmw_cmdbuf_alloc_info {
186*4882a593Smuzhiyun 	size_t page_size;
187*4882a593Smuzhiyun 	struct drm_mm_node *node;
188*4882a593Smuzhiyun 	bool done;
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /* Loop over each context in the command buffer manager. */
192*4882a593Smuzhiyun #define for_each_cmdbuf_ctx(_man, _i, _ctx)				\
193*4882a593Smuzhiyun 	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
194*4882a593Smuzhiyun 	     ++(_i), ++(_ctx))
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
197*4882a593Smuzhiyun 				bool enable);
198*4882a593Smuzhiyun static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
202*4882a593Smuzhiyun  *
203*4882a593Smuzhiyun  * @man: The range manager.
204*4882a593Smuzhiyun  * @interruptible: Whether to wait interruptible when locking.
205*4882a593Smuzhiyun  */
vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man * man,bool interruptible)206*4882a593Smuzhiyun static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	if (interruptible) {
209*4882a593Smuzhiyun 		if (mutex_lock_interruptible(&man->cur_mutex))
210*4882a593Smuzhiyun 			return -ERESTARTSYS;
211*4882a593Smuzhiyun 	} else {
212*4882a593Smuzhiyun 		mutex_lock(&man->cur_mutex);
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	return 0;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  * @man: The range manager.
222*4882a593Smuzhiyun  */
vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man * man)223*4882a593Smuzhiyun static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	mutex_unlock(&man->cur_mutex);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun /**
229*4882a593Smuzhiyun  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
230*4882a593Smuzhiyun  * been used for the device context with inline command buffers.
231*4882a593Smuzhiyun  * Need not be called locked.
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * @header: Pointer to the header to free.
234*4882a593Smuzhiyun  */
vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header * header)235*4882a593Smuzhiyun static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct vmw_cmdbuf_dheader *dheader;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!header->inline_space))
240*4882a593Smuzhiyun 		return;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
243*4882a593Smuzhiyun 			       cb_header);
244*4882a593Smuzhiyun 	dma_pool_free(header->man->dheaders, dheader, header->handle);
245*4882a593Smuzhiyun 	kfree(header);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun /**
249*4882a593Smuzhiyun  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
250*4882a593Smuzhiyun  * associated structures.
251*4882a593Smuzhiyun  *
252*4882a593Smuzhiyun  * header: Pointer to the header to free.
253*4882a593Smuzhiyun  *
254*4882a593Smuzhiyun  * For internal use. Must be called with man::lock held.
255*4882a593Smuzhiyun  */
__vmw_cmdbuf_header_free(struct vmw_cmdbuf_header * header)256*4882a593Smuzhiyun static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct vmw_cmdbuf_man *man = header->man;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	lockdep_assert_held_once(&man->lock);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (header->inline_space) {
263*4882a593Smuzhiyun 		vmw_cmdbuf_header_inline_free(header);
264*4882a593Smuzhiyun 		return;
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	drm_mm_remove_node(&header->node);
268*4882a593Smuzhiyun 	wake_up_all(&man->alloc_queue);
269*4882a593Smuzhiyun 	if (header->cb_header)
270*4882a593Smuzhiyun 		dma_pool_free(man->headers, header->cb_header,
271*4882a593Smuzhiyun 			      header->handle);
272*4882a593Smuzhiyun 	kfree(header);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
277*4882a593Smuzhiyun  * associated structures.
278*4882a593Smuzhiyun  *
279*4882a593Smuzhiyun  * @header: Pointer to the header to free.
280*4882a593Smuzhiyun  */
vmw_cmdbuf_header_free(struct vmw_cmdbuf_header * header)281*4882a593Smuzhiyun void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	struct vmw_cmdbuf_man *man = header->man;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* Avoid locking if inline_space */
286*4882a593Smuzhiyun 	if (header->inline_space) {
287*4882a593Smuzhiyun 		vmw_cmdbuf_header_inline_free(header);
288*4882a593Smuzhiyun 		return;
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 	spin_lock(&man->lock);
291*4882a593Smuzhiyun 	__vmw_cmdbuf_header_free(header);
292*4882a593Smuzhiyun 	spin_unlock(&man->lock);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /**
297*4882a593Smuzhiyun  * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  * @header: The header of the buffer to submit.
300*4882a593Smuzhiyun  */
vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header * header)301*4882a593Smuzhiyun static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	struct vmw_cmdbuf_man *man = header->man;
304*4882a593Smuzhiyun 	u32 val;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	val = upper_32_bits(header->handle);
307*4882a593Smuzhiyun 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	val = lower_32_bits(header->handle);
310*4882a593Smuzhiyun 	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
311*4882a593Smuzhiyun 	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	return header->cb_header->status;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun /**
317*4882a593Smuzhiyun  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
318*4882a593Smuzhiyun  *
319*4882a593Smuzhiyun  * @ctx: The command buffer context to initialize
320*4882a593Smuzhiyun  */
vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context * ctx)321*4882a593Smuzhiyun static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ctx->hw_submitted);
324*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ctx->submitted);
325*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ctx->preempted);
326*4882a593Smuzhiyun 	ctx->num_hw_submitted = 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun /**
330*4882a593Smuzhiyun  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
331*4882a593Smuzhiyun  * context.
332*4882a593Smuzhiyun  *
333*4882a593Smuzhiyun  * @man: The command buffer manager.
334*4882a593Smuzhiyun  * @ctx: The command buffer context.
335*4882a593Smuzhiyun  *
336*4882a593Smuzhiyun  * Submits command buffers to hardware until there are no more command
337*4882a593Smuzhiyun  * buffers to submit or the hardware can't handle more command buffers.
338*4882a593Smuzhiyun  */
vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_context * ctx)339*4882a593Smuzhiyun static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
340*4882a593Smuzhiyun 				  struct vmw_cmdbuf_context *ctx)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	while (ctx->num_hw_submitted < man->max_hw_submitted &&
343*4882a593Smuzhiyun 	       !list_empty(&ctx->submitted) &&
344*4882a593Smuzhiyun 	       !ctx->block_submission) {
345*4882a593Smuzhiyun 		struct vmw_cmdbuf_header *entry;
346*4882a593Smuzhiyun 		SVGACBStatus status;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		entry = list_first_entry(&ctx->submitted,
349*4882a593Smuzhiyun 					 struct vmw_cmdbuf_header,
350*4882a593Smuzhiyun 					 list);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 		status = vmw_cmdbuf_header_submit(entry);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		/* This should never happen */
355*4882a593Smuzhiyun 		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
356*4882a593Smuzhiyun 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
357*4882a593Smuzhiyun 			break;
358*4882a593Smuzhiyun 		}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 		list_del(&entry->list);
361*4882a593Smuzhiyun 		list_add_tail(&entry->list, &ctx->hw_submitted);
362*4882a593Smuzhiyun 		ctx->num_hw_submitted++;
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun  * vmw_cmdbuf_ctx_submit: Process a command buffer context.
369*4882a593Smuzhiyun  *
370*4882a593Smuzhiyun  * @man: The command buffer manager.
371*4882a593Smuzhiyun  * @ctx: The command buffer context.
372*4882a593Smuzhiyun  *
373*4882a593Smuzhiyun  * Submit command buffers to hardware if possible, and process finished
374*4882a593Smuzhiyun  * buffers. Typically freeing them, but on preemption or error take
375*4882a593Smuzhiyun  * appropriate action. Wake up waiters if appropriate.
376*4882a593Smuzhiyun  */
vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_context * ctx,int * notempty)377*4882a593Smuzhiyun static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
378*4882a593Smuzhiyun 				   struct vmw_cmdbuf_context *ctx,
379*4882a593Smuzhiyun 				   int *notempty)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct vmw_cmdbuf_header *entry, *next;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	vmw_cmdbuf_ctx_submit(man, ctx);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
386*4882a593Smuzhiyun 		SVGACBStatus status = entry->cb_header->status;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 		if (status == SVGA_CB_STATUS_NONE)
389*4882a593Smuzhiyun 			break;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 		list_del(&entry->list);
392*4882a593Smuzhiyun 		wake_up_all(&man->idle_queue);
393*4882a593Smuzhiyun 		ctx->num_hw_submitted--;
394*4882a593Smuzhiyun 		switch (status) {
395*4882a593Smuzhiyun 		case SVGA_CB_STATUS_COMPLETED:
396*4882a593Smuzhiyun 			__vmw_cmdbuf_header_free(entry);
397*4882a593Smuzhiyun 			break;
398*4882a593Smuzhiyun 		case SVGA_CB_STATUS_COMMAND_ERROR:
399*4882a593Smuzhiyun 			WARN_ONCE(true, "Command buffer error.\n");
400*4882a593Smuzhiyun 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
401*4882a593Smuzhiyun 			list_add_tail(&entry->list, &man->error);
402*4882a593Smuzhiyun 			schedule_work(&man->work);
403*4882a593Smuzhiyun 			break;
404*4882a593Smuzhiyun 		case SVGA_CB_STATUS_PREEMPTED:
405*4882a593Smuzhiyun 			entry->cb_header->status = SVGA_CB_STATUS_NONE;
406*4882a593Smuzhiyun 			list_add_tail(&entry->list, &ctx->preempted);
407*4882a593Smuzhiyun 			break;
408*4882a593Smuzhiyun 		case SVGA_CB_STATUS_CB_HEADER_ERROR:
409*4882a593Smuzhiyun 			WARN_ONCE(true, "Command buffer header error.\n");
410*4882a593Smuzhiyun 			__vmw_cmdbuf_header_free(entry);
411*4882a593Smuzhiyun 			break;
412*4882a593Smuzhiyun 		default:
413*4882a593Smuzhiyun 			WARN_ONCE(true, "Undefined command buffer status.\n");
414*4882a593Smuzhiyun 			__vmw_cmdbuf_header_free(entry);
415*4882a593Smuzhiyun 			break;
416*4882a593Smuzhiyun 		}
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	vmw_cmdbuf_ctx_submit(man, ctx);
420*4882a593Smuzhiyun 	if (!list_empty(&ctx->submitted))
421*4882a593Smuzhiyun 		(*notempty)++;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun /**
425*4882a593Smuzhiyun  * vmw_cmdbuf_man_process - Process all command buffer contexts and
426*4882a593Smuzhiyun  * switch on and off irqs as appropriate.
427*4882a593Smuzhiyun  *
428*4882a593Smuzhiyun  * @man: The command buffer manager.
429*4882a593Smuzhiyun  *
430*4882a593Smuzhiyun  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
431*4882a593Smuzhiyun  * command buffers left that are not submitted to hardware, Make sure
432*4882a593Smuzhiyun  * IRQ handling is turned on. Otherwise, make sure it's turned off.
433*4882a593Smuzhiyun  */
vmw_cmdbuf_man_process(struct vmw_cmdbuf_man * man)434*4882a593Smuzhiyun static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	int notempty;
437*4882a593Smuzhiyun 	struct vmw_cmdbuf_context *ctx;
438*4882a593Smuzhiyun 	int i;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun retry:
441*4882a593Smuzhiyun 	notempty = 0;
442*4882a593Smuzhiyun 	for_each_cmdbuf_ctx(man, i, ctx)
443*4882a593Smuzhiyun 		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if (man->irq_on && !notempty) {
446*4882a593Smuzhiyun 		vmw_generic_waiter_remove(man->dev_priv,
447*4882a593Smuzhiyun 					  SVGA_IRQFLAG_COMMAND_BUFFER,
448*4882a593Smuzhiyun 					  &man->dev_priv->cmdbuf_waiters);
449*4882a593Smuzhiyun 		man->irq_on = false;
450*4882a593Smuzhiyun 	} else if (!man->irq_on && notempty) {
451*4882a593Smuzhiyun 		vmw_generic_waiter_add(man->dev_priv,
452*4882a593Smuzhiyun 				       SVGA_IRQFLAG_COMMAND_BUFFER,
453*4882a593Smuzhiyun 				       &man->dev_priv->cmdbuf_waiters);
454*4882a593Smuzhiyun 		man->irq_on = true;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		/* Rerun in case we just missed an irq. */
457*4882a593Smuzhiyun 		goto retry;
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun /**
462*4882a593Smuzhiyun  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
463*4882a593Smuzhiyun  * command buffer context
464*4882a593Smuzhiyun  *
465*4882a593Smuzhiyun  * @man: The command buffer manager.
466*4882a593Smuzhiyun  * @header: The header of the buffer to submit.
467*4882a593Smuzhiyun  * @cb_context: The command buffer context to use.
468*4882a593Smuzhiyun  *
469*4882a593Smuzhiyun  * This function adds @header to the "submitted" queue of the command
470*4882a593Smuzhiyun  * buffer context identified by @cb_context. It then calls the command buffer
471*4882a593Smuzhiyun  * manager processing to potentially submit the buffer to hardware.
472*4882a593Smuzhiyun  * @man->lock needs to be held when calling this function.
473*4882a593Smuzhiyun  */
vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,SVGACBContext cb_context)474*4882a593Smuzhiyun static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
475*4882a593Smuzhiyun 			       struct vmw_cmdbuf_header *header,
476*4882a593Smuzhiyun 			       SVGACBContext cb_context)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
479*4882a593Smuzhiyun 		header->cb_header->dxContext = 0;
480*4882a593Smuzhiyun 	header->cb_context = cb_context;
481*4882a593Smuzhiyun 	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	vmw_cmdbuf_man_process(man);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /**
487*4882a593Smuzhiyun  * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
488*4882a593Smuzhiyun  * handler implemented as a threaded irq task.
489*4882a593Smuzhiyun  *
490*4882a593Smuzhiyun  * @man: Pointer to the command buffer manager.
491*4882a593Smuzhiyun  *
492*4882a593Smuzhiyun  * The bottom half of the interrupt handler simply calls into the
493*4882a593Smuzhiyun  * command buffer processor to free finished buffers and submit any
494*4882a593Smuzhiyun  * queued buffers to hardware.
495*4882a593Smuzhiyun  */
vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man * man)496*4882a593Smuzhiyun void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	spin_lock(&man->lock);
499*4882a593Smuzhiyun 	vmw_cmdbuf_man_process(man);
500*4882a593Smuzhiyun 	spin_unlock(&man->lock);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun /**
504*4882a593Smuzhiyun  * vmw_cmdbuf_work_func - The deferred work function that handles
505*4882a593Smuzhiyun  * command buffer errors.
506*4882a593Smuzhiyun  *
507*4882a593Smuzhiyun  * @work: The work func closure argument.
508*4882a593Smuzhiyun  *
509*4882a593Smuzhiyun  * Restarting the command buffer context after an error requires process
510*4882a593Smuzhiyun  * context, so it is deferred to this work function.
511*4882a593Smuzhiyun  */
vmw_cmdbuf_work_func(struct work_struct * work)512*4882a593Smuzhiyun static void vmw_cmdbuf_work_func(struct work_struct *work)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	struct vmw_cmdbuf_man *man =
515*4882a593Smuzhiyun 		container_of(work, struct vmw_cmdbuf_man, work);
516*4882a593Smuzhiyun 	struct vmw_cmdbuf_header *entry, *next;
517*4882a593Smuzhiyun 	uint32_t dummy;
518*4882a593Smuzhiyun 	bool send_fence = false;
519*4882a593Smuzhiyun 	struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
520*4882a593Smuzhiyun 	int i;
521*4882a593Smuzhiyun 	struct vmw_cmdbuf_context *ctx;
522*4882a593Smuzhiyun 	bool global_block = false;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	for_each_cmdbuf_ctx(man, i, ctx)
525*4882a593Smuzhiyun 		INIT_LIST_HEAD(&restart_head[i]);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	mutex_lock(&man->error_mutex);
528*4882a593Smuzhiyun 	spin_lock(&man->lock);
529*4882a593Smuzhiyun 	list_for_each_entry_safe(entry, next, &man->error, list) {
530*4882a593Smuzhiyun 		SVGACBHeader *cb_hdr = entry->cb_header;
531*4882a593Smuzhiyun 		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
532*4882a593Smuzhiyun 			(entry->cmd + cb_hdr->errorOffset);
533*4882a593Smuzhiyun 		u32 error_cmd_size, new_start_offset;
534*4882a593Smuzhiyun 		const char *cmd_name;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 		list_del_init(&entry->list);
537*4882a593Smuzhiyun 		global_block = true;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
540*4882a593Smuzhiyun 			VMW_DEBUG_USER("Unknown command causing device error.\n");
541*4882a593Smuzhiyun 			VMW_DEBUG_USER("Command buffer offset is %lu\n",
542*4882a593Smuzhiyun 				       (unsigned long) cb_hdr->errorOffset);
543*4882a593Smuzhiyun 			__vmw_cmdbuf_header_free(entry);
544*4882a593Smuzhiyun 			send_fence = true;
545*4882a593Smuzhiyun 			continue;
546*4882a593Smuzhiyun 		}
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 		VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
549*4882a593Smuzhiyun 			       cmd_name);
550*4882a593Smuzhiyun 		VMW_DEBUG_USER("Command buffer offset is %lu\n",
551*4882a593Smuzhiyun 			       (unsigned long) cb_hdr->errorOffset);
552*4882a593Smuzhiyun 		VMW_DEBUG_USER("Command size is %lu\n",
553*4882a593Smuzhiyun 			       (unsigned long) error_cmd_size);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		new_start_offset = cb_hdr->errorOffset + error_cmd_size;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		if (new_start_offset >= cb_hdr->length) {
558*4882a593Smuzhiyun 			__vmw_cmdbuf_header_free(entry);
559*4882a593Smuzhiyun 			send_fence = true;
560*4882a593Smuzhiyun 			continue;
561*4882a593Smuzhiyun 		}
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 		if (man->using_mob)
564*4882a593Smuzhiyun 			cb_hdr->ptr.mob.mobOffset += new_start_offset;
565*4882a593Smuzhiyun 		else
566*4882a593Smuzhiyun 			cb_hdr->ptr.pa += (u64) new_start_offset;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 		entry->cmd += new_start_offset;
569*4882a593Smuzhiyun 		cb_hdr->length -= new_start_offset;
570*4882a593Smuzhiyun 		cb_hdr->errorOffset = 0;
571*4882a593Smuzhiyun 		cb_hdr->offset = 0;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		list_add_tail(&entry->list, &restart_head[entry->cb_context]);
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	for_each_cmdbuf_ctx(man, i, ctx)
577*4882a593Smuzhiyun 		man->ctx[i].block_submission = true;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	spin_unlock(&man->lock);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	/* Preempt all contexts */
582*4882a593Smuzhiyun 	if (global_block && vmw_cmdbuf_preempt(man, 0))
583*4882a593Smuzhiyun 		DRM_ERROR("Failed preempting command buffer contexts\n");
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	spin_lock(&man->lock);
586*4882a593Smuzhiyun 	for_each_cmdbuf_ctx(man, i, ctx) {
587*4882a593Smuzhiyun 		/* Move preempted command buffers to the preempted queue. */
588*4882a593Smuzhiyun 		vmw_cmdbuf_ctx_process(man, ctx, &dummy);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 		/*
591*4882a593Smuzhiyun 		 * Add the preempted queue after the command buffer
592*4882a593Smuzhiyun 		 * that caused an error.
593*4882a593Smuzhiyun 		 */
594*4882a593Smuzhiyun 		list_splice_init(&ctx->preempted, restart_head[i].prev);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 		/*
597*4882a593Smuzhiyun 		 * Finally add all command buffers first in the submitted
598*4882a593Smuzhiyun 		 * queue, to rerun them.
599*4882a593Smuzhiyun 		 */
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 		ctx->block_submission = false;
602*4882a593Smuzhiyun 		list_splice_init(&restart_head[i], &ctx->submitted);
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	vmw_cmdbuf_man_process(man);
606*4882a593Smuzhiyun 	spin_unlock(&man->lock);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	if (global_block && vmw_cmdbuf_startstop(man, 0, true))
609*4882a593Smuzhiyun 		DRM_ERROR("Failed restarting command buffer contexts\n");
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	/* Send a new fence in case one was removed */
612*4882a593Smuzhiyun 	if (send_fence) {
613*4882a593Smuzhiyun 		vmw_fifo_send_fence(man->dev_priv, &dummy);
614*4882a593Smuzhiyun 		wake_up_all(&man->idle_queue);
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	mutex_unlock(&man->error_mutex);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun /**
621*4882a593Smuzhiyun  * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
622*4882a593Smuzhiyun  *
623*4882a593Smuzhiyun  * @man: The command buffer manager.
624*4882a593Smuzhiyun  * @check_preempted: Check also the preempted queue for pending command buffers.
625*4882a593Smuzhiyun  *
626*4882a593Smuzhiyun  */
vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man * man,bool check_preempted)627*4882a593Smuzhiyun static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
628*4882a593Smuzhiyun 				bool check_preempted)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	struct vmw_cmdbuf_context *ctx;
631*4882a593Smuzhiyun 	bool idle = false;
632*4882a593Smuzhiyun 	int i;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	spin_lock(&man->lock);
635*4882a593Smuzhiyun 	vmw_cmdbuf_man_process(man);
636*4882a593Smuzhiyun 	for_each_cmdbuf_ctx(man, i, ctx) {
637*4882a593Smuzhiyun 		if (!list_empty(&ctx->submitted) ||
638*4882a593Smuzhiyun 		    !list_empty(&ctx->hw_submitted) ||
639*4882a593Smuzhiyun 		    (check_preempted && !list_empty(&ctx->preempted)))
640*4882a593Smuzhiyun 			goto out_unlock;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	idle = list_empty(&man->error);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun out_unlock:
646*4882a593Smuzhiyun 	spin_unlock(&man->lock);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	return idle;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun /**
652*4882a593Smuzhiyun  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
653*4882a593Smuzhiyun  * command submissions
654*4882a593Smuzhiyun  *
655*4882a593Smuzhiyun  * @man: The command buffer manager.
656*4882a593Smuzhiyun  *
657*4882a593Smuzhiyun  * Flushes the current command buffer without allocating a new one. A new one
658*4882a593Smuzhiyun  * is automatically allocated when needed. Call with @man->cur_mutex held.
659*4882a593Smuzhiyun  */
__vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man * man)660*4882a593Smuzhiyun static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	struct vmw_cmdbuf_header *cur = man->cur;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	lockdep_assert_held_once(&man->cur_mutex);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	if (!cur)
667*4882a593Smuzhiyun 		return;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	spin_lock(&man->lock);
670*4882a593Smuzhiyun 	if (man->cur_pos == 0) {
671*4882a593Smuzhiyun 		__vmw_cmdbuf_header_free(cur);
672*4882a593Smuzhiyun 		goto out_unlock;
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	man->cur->cb_header->length = man->cur_pos;
676*4882a593Smuzhiyun 	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
677*4882a593Smuzhiyun out_unlock:
678*4882a593Smuzhiyun 	spin_unlock(&man->lock);
679*4882a593Smuzhiyun 	man->cur = NULL;
680*4882a593Smuzhiyun 	man->cur_pos = 0;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun /**
684*4882a593Smuzhiyun  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
685*4882a593Smuzhiyun  * command submissions
686*4882a593Smuzhiyun  *
687*4882a593Smuzhiyun  * @man: The command buffer manager.
688*4882a593Smuzhiyun  * @interruptible: Whether to sleep interruptible when sleeping.
689*4882a593Smuzhiyun  *
690*4882a593Smuzhiyun  * Flushes the current command buffer without allocating a new one. A new one
691*4882a593Smuzhiyun  * is automatically allocated when needed.
692*4882a593Smuzhiyun  */
vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man * man,bool interruptible)693*4882a593Smuzhiyun int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
694*4882a593Smuzhiyun 			 bool interruptible)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	if (ret)
699*4882a593Smuzhiyun 		return ret;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	__vmw_cmdbuf_cur_flush(man);
702*4882a593Smuzhiyun 	vmw_cmdbuf_cur_unlock(man);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	return 0;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun /**
708*4882a593Smuzhiyun  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
709*4882a593Smuzhiyun  *
710*4882a593Smuzhiyun  * @man: The command buffer manager.
711*4882a593Smuzhiyun  * @interruptible: Sleep interruptible while waiting.
712*4882a593Smuzhiyun  * @timeout: Time out after this many ticks.
713*4882a593Smuzhiyun  *
714*4882a593Smuzhiyun  * Wait until the command buffer manager has processed all command buffers,
715*4882a593Smuzhiyun  * or until a timeout occurs. If a timeout occurs, the function will return
716*4882a593Smuzhiyun  * -EBUSY.
717*4882a593Smuzhiyun  */
vmw_cmdbuf_idle(struct vmw_cmdbuf_man * man,bool interruptible,unsigned long timeout)718*4882a593Smuzhiyun int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
719*4882a593Smuzhiyun 		    unsigned long timeout)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun 	int ret;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	ret = vmw_cmdbuf_cur_flush(man, interruptible);
724*4882a593Smuzhiyun 	vmw_generic_waiter_add(man->dev_priv,
725*4882a593Smuzhiyun 			       SVGA_IRQFLAG_COMMAND_BUFFER,
726*4882a593Smuzhiyun 			       &man->dev_priv->cmdbuf_waiters);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	if (interruptible) {
729*4882a593Smuzhiyun 		ret = wait_event_interruptible_timeout
730*4882a593Smuzhiyun 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
731*4882a593Smuzhiyun 			 timeout);
732*4882a593Smuzhiyun 	} else {
733*4882a593Smuzhiyun 		ret = wait_event_timeout
734*4882a593Smuzhiyun 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
735*4882a593Smuzhiyun 			 timeout);
736*4882a593Smuzhiyun 	}
737*4882a593Smuzhiyun 	vmw_generic_waiter_remove(man->dev_priv,
738*4882a593Smuzhiyun 				  SVGA_IRQFLAG_COMMAND_BUFFER,
739*4882a593Smuzhiyun 				  &man->dev_priv->cmdbuf_waiters);
740*4882a593Smuzhiyun 	if (ret == 0) {
741*4882a593Smuzhiyun 		if (!vmw_cmdbuf_man_idle(man, true))
742*4882a593Smuzhiyun 			ret = -EBUSY;
743*4882a593Smuzhiyun 		else
744*4882a593Smuzhiyun 			ret = 0;
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun 	if (ret > 0)
747*4882a593Smuzhiyun 		ret = 0;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	return ret;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun /**
753*4882a593Smuzhiyun  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
754*4882a593Smuzhiyun  *
755*4882a593Smuzhiyun  * @man: The command buffer manager.
756*4882a593Smuzhiyun  * @info: Allocation info. Will hold the size on entry and allocated mm node
757*4882a593Smuzhiyun  * on successful return.
758*4882a593Smuzhiyun  *
759*4882a593Smuzhiyun  * Try to allocate buffer space from the main pool. Returns true if succeeded.
760*4882a593Smuzhiyun  * If a fatal error was hit, the error code is returned in @info->ret.
761*4882a593Smuzhiyun  */
vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_alloc_info * info)762*4882a593Smuzhiyun static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
763*4882a593Smuzhiyun 				 struct vmw_cmdbuf_alloc_info *info)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	int ret;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if (info->done)
768*4882a593Smuzhiyun 		return true;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	memset(info->node, 0, sizeof(*info->node));
771*4882a593Smuzhiyun 	spin_lock(&man->lock);
772*4882a593Smuzhiyun 	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
773*4882a593Smuzhiyun 	if (ret) {
774*4882a593Smuzhiyun 		vmw_cmdbuf_man_process(man);
775*4882a593Smuzhiyun 		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
776*4882a593Smuzhiyun 	}
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	spin_unlock(&man->lock);
779*4882a593Smuzhiyun 	info->done = !ret;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	return info->done;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun /**
785*4882a593Smuzhiyun  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
786*4882a593Smuzhiyun  *
787*4882a593Smuzhiyun  * @man: The command buffer manager.
788*4882a593Smuzhiyun  * @node: Pointer to pre-allocated range-manager node.
789*4882a593Smuzhiyun  * @size: The size of the allocation.
790*4882a593Smuzhiyun  * @interruptible: Whether to sleep interruptible while waiting for space.
791*4882a593Smuzhiyun  *
792*4882a593Smuzhiyun  * This function allocates buffer space from the main pool, and if there is
793*4882a593Smuzhiyun  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
794*4882a593Smuzhiyun  * become available.
795*4882a593Smuzhiyun  */
vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man * man,struct drm_mm_node * node,size_t size,bool interruptible)796*4882a593Smuzhiyun static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
797*4882a593Smuzhiyun 				  struct drm_mm_node *node,
798*4882a593Smuzhiyun 				  size_t size,
799*4882a593Smuzhiyun 				  bool interruptible)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun 	struct vmw_cmdbuf_alloc_info info;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
804*4882a593Smuzhiyun 	info.node = node;
805*4882a593Smuzhiyun 	info.done = false;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	/*
808*4882a593Smuzhiyun 	 * To prevent starvation of large requests, only one allocating call
809*4882a593Smuzhiyun 	 * at a time waiting for space.
810*4882a593Smuzhiyun 	 */
811*4882a593Smuzhiyun 	if (interruptible) {
812*4882a593Smuzhiyun 		if (mutex_lock_interruptible(&man->space_mutex))
813*4882a593Smuzhiyun 			return -ERESTARTSYS;
814*4882a593Smuzhiyun 	} else {
815*4882a593Smuzhiyun 		mutex_lock(&man->space_mutex);
816*4882a593Smuzhiyun 	}
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	/* Try to allocate space without waiting. */
819*4882a593Smuzhiyun 	if (vmw_cmdbuf_try_alloc(man, &info))
820*4882a593Smuzhiyun 		goto out_unlock;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	vmw_generic_waiter_add(man->dev_priv,
823*4882a593Smuzhiyun 			       SVGA_IRQFLAG_COMMAND_BUFFER,
824*4882a593Smuzhiyun 			       &man->dev_priv->cmdbuf_waiters);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (interruptible) {
827*4882a593Smuzhiyun 		int ret;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 		ret = wait_event_interruptible
830*4882a593Smuzhiyun 			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
831*4882a593Smuzhiyun 		if (ret) {
832*4882a593Smuzhiyun 			vmw_generic_waiter_remove
833*4882a593Smuzhiyun 				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
834*4882a593Smuzhiyun 				 &man->dev_priv->cmdbuf_waiters);
835*4882a593Smuzhiyun 			mutex_unlock(&man->space_mutex);
836*4882a593Smuzhiyun 			return ret;
837*4882a593Smuzhiyun 		}
838*4882a593Smuzhiyun 	} else {
839*4882a593Smuzhiyun 		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun 	vmw_generic_waiter_remove(man->dev_priv,
842*4882a593Smuzhiyun 				  SVGA_IRQFLAG_COMMAND_BUFFER,
843*4882a593Smuzhiyun 				  &man->dev_priv->cmdbuf_waiters);
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun out_unlock:
846*4882a593Smuzhiyun 	mutex_unlock(&man->space_mutex);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	return 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun /**
852*4882a593Smuzhiyun  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
853*4882a593Smuzhiyun  * space from the main pool.
854*4882a593Smuzhiyun  *
855*4882a593Smuzhiyun  * @man: The command buffer manager.
856*4882a593Smuzhiyun  * @header: Pointer to the header to set up.
857*4882a593Smuzhiyun  * @size: The requested size of the buffer space.
858*4882a593Smuzhiyun  * @interruptible: Whether to sleep interruptible while waiting for space.
859*4882a593Smuzhiyun  */
vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,size_t size,bool interruptible)860*4882a593Smuzhiyun static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
861*4882a593Smuzhiyun 				 struct vmw_cmdbuf_header *header,
862*4882a593Smuzhiyun 				 size_t size,
863*4882a593Smuzhiyun 				 bool interruptible)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	SVGACBHeader *cb_hdr;
866*4882a593Smuzhiyun 	size_t offset;
867*4882a593Smuzhiyun 	int ret;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	if (!man->has_pool)
870*4882a593Smuzhiyun 		return -ENOMEM;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	if (ret)
875*4882a593Smuzhiyun 		return ret;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
878*4882a593Smuzhiyun 					    &header->handle);
879*4882a593Smuzhiyun 	if (!header->cb_header) {
880*4882a593Smuzhiyun 		ret = -ENOMEM;
881*4882a593Smuzhiyun 		goto out_no_cb_header;
882*4882a593Smuzhiyun 	}
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	header->size = header->node.size << PAGE_SHIFT;
885*4882a593Smuzhiyun 	cb_hdr = header->cb_header;
886*4882a593Smuzhiyun 	offset = header->node.start << PAGE_SHIFT;
887*4882a593Smuzhiyun 	header->cmd = man->map + offset;
888*4882a593Smuzhiyun 	if (man->using_mob) {
889*4882a593Smuzhiyun 		cb_hdr->flags = SVGA_CB_FLAG_MOB;
890*4882a593Smuzhiyun 		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
891*4882a593Smuzhiyun 		cb_hdr->ptr.mob.mobOffset = offset;
892*4882a593Smuzhiyun 	} else {
893*4882a593Smuzhiyun 		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	return 0;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun out_no_cb_header:
899*4882a593Smuzhiyun 	spin_lock(&man->lock);
900*4882a593Smuzhiyun 	drm_mm_remove_node(&header->node);
901*4882a593Smuzhiyun 	spin_unlock(&man->lock);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	return ret;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun /**
907*4882a593Smuzhiyun  * vmw_cmdbuf_space_inline - Set up a command buffer header with
908*4882a593Smuzhiyun  * inline command buffer space.
909*4882a593Smuzhiyun  *
910*4882a593Smuzhiyun  * @man: The command buffer manager.
911*4882a593Smuzhiyun  * @header: Pointer to the header to set up.
912*4882a593Smuzhiyun  * @size: The requested size of the buffer space.
913*4882a593Smuzhiyun  */
vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man * man,struct vmw_cmdbuf_header * header,int size)914*4882a593Smuzhiyun static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
915*4882a593Smuzhiyun 				   struct vmw_cmdbuf_header *header,
916*4882a593Smuzhiyun 				   int size)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun 	struct vmw_cmdbuf_dheader *dheader;
919*4882a593Smuzhiyun 	SVGACBHeader *cb_hdr;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
922*4882a593Smuzhiyun 		return -ENOMEM;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
925*4882a593Smuzhiyun 				  &header->handle);
926*4882a593Smuzhiyun 	if (!dheader)
927*4882a593Smuzhiyun 		return -ENOMEM;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	header->inline_space = true;
930*4882a593Smuzhiyun 	header->size = VMW_CMDBUF_INLINE_SIZE;
931*4882a593Smuzhiyun 	cb_hdr = &dheader->cb_header;
932*4882a593Smuzhiyun 	header->cb_header = cb_hdr;
933*4882a593Smuzhiyun 	header->cmd = dheader->cmd;
934*4882a593Smuzhiyun 	cb_hdr->status = SVGA_CB_STATUS_NONE;
935*4882a593Smuzhiyun 	cb_hdr->flags = SVGA_CB_FLAG_NONE;
936*4882a593Smuzhiyun 	cb_hdr->ptr.pa = (u64)header->handle +
937*4882a593Smuzhiyun 		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	return 0;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun /**
943*4882a593Smuzhiyun  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
944*4882a593Smuzhiyun  * command buffer space.
945*4882a593Smuzhiyun  *
946*4882a593Smuzhiyun  * @man: The command buffer manager.
947*4882a593Smuzhiyun  * @size: The requested size of the buffer space.
948*4882a593Smuzhiyun  * @interruptible: Whether to sleep interruptible while waiting for space.
949*4882a593Smuzhiyun  * @p_header: points to a header pointer to populate on successful return.
950*4882a593Smuzhiyun  *
951*4882a593Smuzhiyun  * Returns a pointer to command buffer space if successful. Otherwise
952*4882a593Smuzhiyun  * returns an error pointer. The header pointer returned in @p_header should
953*4882a593Smuzhiyun  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
954*4882a593Smuzhiyun  */
vmw_cmdbuf_alloc(struct vmw_cmdbuf_man * man,size_t size,bool interruptible,struct vmw_cmdbuf_header ** p_header)955*4882a593Smuzhiyun void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
956*4882a593Smuzhiyun 		       size_t size, bool interruptible,
957*4882a593Smuzhiyun 		       struct vmw_cmdbuf_header **p_header)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun 	struct vmw_cmdbuf_header *header;
960*4882a593Smuzhiyun 	int ret = 0;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	*p_header = NULL;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	header = kzalloc(sizeof(*header), GFP_KERNEL);
965*4882a593Smuzhiyun 	if (!header)
966*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	if (size <= VMW_CMDBUF_INLINE_SIZE)
969*4882a593Smuzhiyun 		ret = vmw_cmdbuf_space_inline(man, header, size);
970*4882a593Smuzhiyun 	else
971*4882a593Smuzhiyun 		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	if (ret) {
974*4882a593Smuzhiyun 		kfree(header);
975*4882a593Smuzhiyun 		return ERR_PTR(ret);
976*4882a593Smuzhiyun 	}
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	header->man = man;
979*4882a593Smuzhiyun 	INIT_LIST_HEAD(&header->list);
980*4882a593Smuzhiyun 	header->cb_header->status = SVGA_CB_STATUS_NONE;
981*4882a593Smuzhiyun 	*p_header = header;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	return header->cmd;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun /**
987*4882a593Smuzhiyun  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
988*4882a593Smuzhiyun  * command buffer.
989*4882a593Smuzhiyun  *
990*4882a593Smuzhiyun  * @man: The command buffer manager.
991*4882a593Smuzhiyun  * @size: The requested size of the commands.
992*4882a593Smuzhiyun  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
993*4882a593Smuzhiyun  * @interruptible: Whether to sleep interruptible while waiting for space.
994*4882a593Smuzhiyun  *
995*4882a593Smuzhiyun  * Returns a pointer to command buffer space if successful. Otherwise
996*4882a593Smuzhiyun  * returns an error pointer.
997*4882a593Smuzhiyun  */
vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man * man,size_t size,int ctx_id,bool interruptible)998*4882a593Smuzhiyun static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
999*4882a593Smuzhiyun 				    size_t size,
1000*4882a593Smuzhiyun 				    int ctx_id,
1001*4882a593Smuzhiyun 				    bool interruptible)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun 	struct vmw_cmdbuf_header *cur;
1004*4882a593Smuzhiyun 	void *ret;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	if (vmw_cmdbuf_cur_lock(man, interruptible))
1007*4882a593Smuzhiyun 		return ERR_PTR(-ERESTARTSYS);
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	cur = man->cur;
1010*4882a593Smuzhiyun 	if (cur && (size + man->cur_pos > cur->size ||
1011*4882a593Smuzhiyun 		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1012*4882a593Smuzhiyun 		     ctx_id != cur->cb_header->dxContext)))
1013*4882a593Smuzhiyun 		__vmw_cmdbuf_cur_flush(man);
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	if (!man->cur) {
1016*4882a593Smuzhiyun 		ret = vmw_cmdbuf_alloc(man,
1017*4882a593Smuzhiyun 				       max_t(size_t, size, man->default_size),
1018*4882a593Smuzhiyun 				       interruptible, &man->cur);
1019*4882a593Smuzhiyun 		if (IS_ERR(ret)) {
1020*4882a593Smuzhiyun 			vmw_cmdbuf_cur_unlock(man);
1021*4882a593Smuzhiyun 			return ret;
1022*4882a593Smuzhiyun 		}
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 		cur = man->cur;
1025*4882a593Smuzhiyun 	}
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	if (ctx_id != SVGA3D_INVALID_ID) {
1028*4882a593Smuzhiyun 		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1029*4882a593Smuzhiyun 		cur->cb_header->dxContext = ctx_id;
1030*4882a593Smuzhiyun 	}
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	cur->reserved = size;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	return (void *) (man->cur->cmd + man->cur_pos);
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun /**
1038*4882a593Smuzhiyun  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1039*4882a593Smuzhiyun  *
1040*4882a593Smuzhiyun  * @man: The command buffer manager.
1041*4882a593Smuzhiyun  * @size: The size of the commands actually written.
1042*4882a593Smuzhiyun  * @flush: Whether to flush the command buffer immediately.
1043*4882a593Smuzhiyun  */
vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man * man,size_t size,bool flush)1044*4882a593Smuzhiyun static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1045*4882a593Smuzhiyun 				  size_t size, bool flush)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	struct vmw_cmdbuf_header *cur = man->cur;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	lockdep_assert_held_once(&man->cur_mutex);
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	WARN_ON(size > cur->reserved);
1052*4882a593Smuzhiyun 	man->cur_pos += size;
1053*4882a593Smuzhiyun 	if (!size)
1054*4882a593Smuzhiyun 		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1055*4882a593Smuzhiyun 	if (flush)
1056*4882a593Smuzhiyun 		__vmw_cmdbuf_cur_flush(man);
1057*4882a593Smuzhiyun 	vmw_cmdbuf_cur_unlock(man);
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun /**
1061*4882a593Smuzhiyun  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1062*4882a593Smuzhiyun  *
1063*4882a593Smuzhiyun  * @man: The command buffer manager.
1064*4882a593Smuzhiyun  * @size: The requested size of the commands.
1065*4882a593Smuzhiyun  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1066*4882a593Smuzhiyun  * @interruptible: Whether to sleep interruptible while waiting for space.
1067*4882a593Smuzhiyun  * @header: Header of the command buffer. NULL if the current command buffer
1068*4882a593Smuzhiyun  * should be used.
1069*4882a593Smuzhiyun  *
1070*4882a593Smuzhiyun  * Returns a pointer to command buffer space if successful. Otherwise
1071*4882a593Smuzhiyun  * returns an error pointer.
1072*4882a593Smuzhiyun  */
vmw_cmdbuf_reserve(struct vmw_cmdbuf_man * man,size_t size,int ctx_id,bool interruptible,struct vmw_cmdbuf_header * header)1073*4882a593Smuzhiyun void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1074*4882a593Smuzhiyun 			 int ctx_id, bool interruptible,
1075*4882a593Smuzhiyun 			 struct vmw_cmdbuf_header *header)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun 	if (!header)
1078*4882a593Smuzhiyun 		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	if (size > header->size)
1081*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	if (ctx_id != SVGA3D_INVALID_ID) {
1084*4882a593Smuzhiyun 		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1085*4882a593Smuzhiyun 		header->cb_header->dxContext = ctx_id;
1086*4882a593Smuzhiyun 	}
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	header->reserved = size;
1089*4882a593Smuzhiyun 	return header->cmd;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun /**
1093*4882a593Smuzhiyun  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1094*4882a593Smuzhiyun  *
1095*4882a593Smuzhiyun  * @man: The command buffer manager.
1096*4882a593Smuzhiyun  * @size: The size of the commands actually written.
1097*4882a593Smuzhiyun  * @header: Header of the command buffer. NULL if the current command buffer
1098*4882a593Smuzhiyun  * should be used.
1099*4882a593Smuzhiyun  * @flush: Whether to flush the command buffer immediately.
1100*4882a593Smuzhiyun  */
vmw_cmdbuf_commit(struct vmw_cmdbuf_man * man,size_t size,struct vmw_cmdbuf_header * header,bool flush)1101*4882a593Smuzhiyun void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1102*4882a593Smuzhiyun 		       struct vmw_cmdbuf_header *header, bool flush)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun 	if (!header) {
1105*4882a593Smuzhiyun 		vmw_cmdbuf_commit_cur(man, size, flush);
1106*4882a593Smuzhiyun 		return;
1107*4882a593Smuzhiyun 	}
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	(void) vmw_cmdbuf_cur_lock(man, false);
1110*4882a593Smuzhiyun 	__vmw_cmdbuf_cur_flush(man);
1111*4882a593Smuzhiyun 	WARN_ON(size > header->reserved);
1112*4882a593Smuzhiyun 	man->cur = header;
1113*4882a593Smuzhiyun 	man->cur_pos = size;
1114*4882a593Smuzhiyun 	if (!size)
1115*4882a593Smuzhiyun 		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1116*4882a593Smuzhiyun 	if (flush)
1117*4882a593Smuzhiyun 		__vmw_cmdbuf_cur_flush(man);
1118*4882a593Smuzhiyun 	vmw_cmdbuf_cur_unlock(man);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun /**
1123*4882a593Smuzhiyun  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1124*4882a593Smuzhiyun  *
1125*4882a593Smuzhiyun  * @man: The command buffer manager.
1126*4882a593Smuzhiyun  * @command: Pointer to the command to send.
1127*4882a593Smuzhiyun  * @size: Size of the command.
1128*4882a593Smuzhiyun  *
1129*4882a593Smuzhiyun  * Synchronously sends a device context command.
1130*4882a593Smuzhiyun  */
vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man * man,const void * command,size_t size)1131*4882a593Smuzhiyun static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1132*4882a593Smuzhiyun 					  const void *command,
1133*4882a593Smuzhiyun 					  size_t size)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun 	struct vmw_cmdbuf_header *header;
1136*4882a593Smuzhiyun 	int status;
1137*4882a593Smuzhiyun 	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	if (IS_ERR(cmd))
1140*4882a593Smuzhiyun 		return PTR_ERR(cmd);
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	memcpy(cmd, command, size);
1143*4882a593Smuzhiyun 	header->cb_header->length = size;
1144*4882a593Smuzhiyun 	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1145*4882a593Smuzhiyun 	spin_lock(&man->lock);
1146*4882a593Smuzhiyun 	status = vmw_cmdbuf_header_submit(header);
1147*4882a593Smuzhiyun 	spin_unlock(&man->lock);
1148*4882a593Smuzhiyun 	vmw_cmdbuf_header_free(header);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	if (status != SVGA_CB_STATUS_COMPLETED) {
1151*4882a593Smuzhiyun 		DRM_ERROR("Device context command failed with status %d\n",
1152*4882a593Smuzhiyun 			  status);
1153*4882a593Smuzhiyun 		return -EINVAL;
1154*4882a593Smuzhiyun 	}
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	return 0;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun /**
1160*4882a593Smuzhiyun  * vmw_cmdbuf_preempt - Send a preempt command through the device
1161*4882a593Smuzhiyun  * context.
1162*4882a593Smuzhiyun  *
1163*4882a593Smuzhiyun  * @man: The command buffer manager.
1164*4882a593Smuzhiyun  *
1165*4882a593Smuzhiyun  * Synchronously sends a preempt command.
1166*4882a593Smuzhiyun  */
vmw_cmdbuf_preempt(struct vmw_cmdbuf_man * man,u32 context)1167*4882a593Smuzhiyun static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1168*4882a593Smuzhiyun {
1169*4882a593Smuzhiyun 	struct {
1170*4882a593Smuzhiyun 		uint32 id;
1171*4882a593Smuzhiyun 		SVGADCCmdPreempt body;
1172*4882a593Smuzhiyun 	} __packed cmd;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	cmd.id = SVGA_DC_CMD_PREEMPT;
1175*4882a593Smuzhiyun 	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1176*4882a593Smuzhiyun 	cmd.body.ignoreIDZero = 0;
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun /**
1183*4882a593Smuzhiyun  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1184*4882a593Smuzhiyun  * context.
1185*4882a593Smuzhiyun  *
1186*4882a593Smuzhiyun  * @man: The command buffer manager.
1187*4882a593Smuzhiyun  * @enable: Whether to enable or disable the context.
1188*4882a593Smuzhiyun  *
1189*4882a593Smuzhiyun  * Synchronously sends a device start / stop context command.
1190*4882a593Smuzhiyun  */
vmw_cmdbuf_startstop(struct vmw_cmdbuf_man * man,u32 context,bool enable)1191*4882a593Smuzhiyun static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1192*4882a593Smuzhiyun 				bool enable)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun 	struct {
1195*4882a593Smuzhiyun 		uint32 id;
1196*4882a593Smuzhiyun 		SVGADCCmdStartStop body;
1197*4882a593Smuzhiyun 	} __packed cmd;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1200*4882a593Smuzhiyun 	cmd.body.enable = (enable) ? 1 : 0;
1201*4882a593Smuzhiyun 	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun /**
1207*4882a593Smuzhiyun  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1208*4882a593Smuzhiyun  *
1209*4882a593Smuzhiyun  * @man: The command buffer manager.
1210*4882a593Smuzhiyun  * @size: The size of the main space pool.
1211*4882a593Smuzhiyun  * @default_size: The default size of the command buffer for small kernel
1212*4882a593Smuzhiyun  * submissions.
1213*4882a593Smuzhiyun  *
1214*4882a593Smuzhiyun  * Set the size and allocate the main command buffer space pool,
1215*4882a593Smuzhiyun  * as well as the default size of the command buffer for
1216*4882a593Smuzhiyun  * small kernel submissions. If successful, this enables large command
1217*4882a593Smuzhiyun  * submissions. Note that this function requires that rudimentary command
1218*4882a593Smuzhiyun  * submission is already available and that the MOB memory manager is alive.
1219*4882a593Smuzhiyun  * Returns 0 on success. Negative error code on failure.
1220*4882a593Smuzhiyun  */
vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man * man,size_t size,size_t default_size)1221*4882a593Smuzhiyun int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1222*4882a593Smuzhiyun 			     size_t size, size_t default_size)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun 	struct vmw_private *dev_priv = man->dev_priv;
1225*4882a593Smuzhiyun 	bool dummy;
1226*4882a593Smuzhiyun 	int ret;
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	if (man->has_pool)
1229*4882a593Smuzhiyun 		return -EINVAL;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	/* First, try to allocate a huge chunk of DMA memory */
1232*4882a593Smuzhiyun 	size = PAGE_ALIGN(size);
1233*4882a593Smuzhiyun 	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1234*4882a593Smuzhiyun 				      &man->handle, GFP_KERNEL);
1235*4882a593Smuzhiyun 	if (man->map) {
1236*4882a593Smuzhiyun 		man->using_mob = false;
1237*4882a593Smuzhiyun 	} else {
1238*4882a593Smuzhiyun 		/*
1239*4882a593Smuzhiyun 		 * DMA memory failed. If we can have command buffers in a
1240*4882a593Smuzhiyun 		 * MOB, try to use that instead. Note that this will
1241*4882a593Smuzhiyun 		 * actually call into the already enabled manager, when
1242*4882a593Smuzhiyun 		 * binding the MOB.
1243*4882a593Smuzhiyun 		 */
1244*4882a593Smuzhiyun 		if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1245*4882a593Smuzhiyun 		    !dev_priv->has_mob)
1246*4882a593Smuzhiyun 			return -ENOMEM;
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1249*4882a593Smuzhiyun 				    &vmw_mob_ne_placement, 0, false,
1250*4882a593Smuzhiyun 				    &man->cmd_space);
1251*4882a593Smuzhiyun 		if (ret)
1252*4882a593Smuzhiyun 			return ret;
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 		man->using_mob = true;
1255*4882a593Smuzhiyun 		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1256*4882a593Smuzhiyun 				  &man->map_obj);
1257*4882a593Smuzhiyun 		if (ret)
1258*4882a593Smuzhiyun 			goto out_no_map;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1261*4882a593Smuzhiyun 	}
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	man->size = size;
1264*4882a593Smuzhiyun 	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	man->has_pool = true;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	/*
1269*4882a593Smuzhiyun 	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1270*4882a593Smuzhiyun 	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1271*4882a593Smuzhiyun 	 * needs to wait for space and we block on further command
1272*4882a593Smuzhiyun 	 * submissions to be able to free up space.
1273*4882a593Smuzhiyun 	 */
1274*4882a593Smuzhiyun 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1275*4882a593Smuzhiyun 	DRM_INFO("Using command buffers with %s pool.\n",
1276*4882a593Smuzhiyun 		 (man->using_mob) ? "MOB" : "DMA");
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	return 0;
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun out_no_map:
1281*4882a593Smuzhiyun 	if (man->using_mob) {
1282*4882a593Smuzhiyun 		ttm_bo_put(man->cmd_space);
1283*4882a593Smuzhiyun 		man->cmd_space = NULL;
1284*4882a593Smuzhiyun 	}
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	return ret;
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun /**
1290*4882a593Smuzhiyun  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1291*4882a593Smuzhiyun  * inline command buffer submissions only.
1292*4882a593Smuzhiyun  *
1293*4882a593Smuzhiyun  * @dev_priv: Pointer to device private structure.
1294*4882a593Smuzhiyun  *
1295*4882a593Smuzhiyun  * Returns a pointer to a cummand buffer manager to success or error pointer
1296*4882a593Smuzhiyun  * on failure. The command buffer manager will be enabled for submissions of
1297*4882a593Smuzhiyun  * size VMW_CMDBUF_INLINE_SIZE only.
1298*4882a593Smuzhiyun  */
vmw_cmdbuf_man_create(struct vmw_private * dev_priv)1299*4882a593Smuzhiyun struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun 	struct vmw_cmdbuf_man *man;
1302*4882a593Smuzhiyun 	struct vmw_cmdbuf_context *ctx;
1303*4882a593Smuzhiyun 	unsigned int i;
1304*4882a593Smuzhiyun 	int ret;
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307*4882a593Smuzhiyun 		return ERR_PTR(-ENOSYS);
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	man = kzalloc(sizeof(*man), GFP_KERNEL);
1310*4882a593Smuzhiyun 	if (!man)
1311*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314*4882a593Smuzhiyun 		2 : 1;
1315*4882a593Smuzhiyun 	man->headers = dma_pool_create("vmwgfx cmdbuf",
1316*4882a593Smuzhiyun 				       &dev_priv->dev->pdev->dev,
1317*4882a593Smuzhiyun 				       sizeof(SVGACBHeader),
1318*4882a593Smuzhiyun 				       64, PAGE_SIZE);
1319*4882a593Smuzhiyun 	if (!man->headers) {
1320*4882a593Smuzhiyun 		ret = -ENOMEM;
1321*4882a593Smuzhiyun 		goto out_no_pool;
1322*4882a593Smuzhiyun 	}
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325*4882a593Smuzhiyun 					&dev_priv->dev->pdev->dev,
1326*4882a593Smuzhiyun 					sizeof(struct vmw_cmdbuf_dheader),
1327*4882a593Smuzhiyun 					64, PAGE_SIZE);
1328*4882a593Smuzhiyun 	if (!man->dheaders) {
1329*4882a593Smuzhiyun 		ret = -ENOMEM;
1330*4882a593Smuzhiyun 		goto out_no_dpool;
1331*4882a593Smuzhiyun 	}
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	for_each_cmdbuf_ctx(man, i, ctx)
1334*4882a593Smuzhiyun 		vmw_cmdbuf_ctx_init(ctx);
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	INIT_LIST_HEAD(&man->error);
1337*4882a593Smuzhiyun 	spin_lock_init(&man->lock);
1338*4882a593Smuzhiyun 	mutex_init(&man->cur_mutex);
1339*4882a593Smuzhiyun 	mutex_init(&man->space_mutex);
1340*4882a593Smuzhiyun 	mutex_init(&man->error_mutex);
1341*4882a593Smuzhiyun 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342*4882a593Smuzhiyun 	init_waitqueue_head(&man->alloc_queue);
1343*4882a593Smuzhiyun 	init_waitqueue_head(&man->idle_queue);
1344*4882a593Smuzhiyun 	man->dev_priv = dev_priv;
1345*4882a593Smuzhiyun 	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346*4882a593Smuzhiyun 	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347*4882a593Smuzhiyun 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348*4882a593Smuzhiyun 			       &dev_priv->error_waiters);
1349*4882a593Smuzhiyun 	ret = vmw_cmdbuf_startstop(man, 0, true);
1350*4882a593Smuzhiyun 	if (ret) {
1351*4882a593Smuzhiyun 		DRM_ERROR("Failed starting command buffer contexts\n");
1352*4882a593Smuzhiyun 		vmw_cmdbuf_man_destroy(man);
1353*4882a593Smuzhiyun 		return ERR_PTR(ret);
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	return man;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun out_no_dpool:
1359*4882a593Smuzhiyun 	dma_pool_destroy(man->headers);
1360*4882a593Smuzhiyun out_no_pool:
1361*4882a593Smuzhiyun 	kfree(man);
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	return ERR_PTR(ret);
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun /**
1367*4882a593Smuzhiyun  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1368*4882a593Smuzhiyun  *
1369*4882a593Smuzhiyun  * @man: Pointer to a command buffer manager.
1370*4882a593Smuzhiyun  *
1371*4882a593Smuzhiyun  * This function removes the main buffer space pool, and should be called
1372*4882a593Smuzhiyun  * before MOB memory management is removed. When this function has been called,
1373*4882a593Smuzhiyun  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1374*4882a593Smuzhiyun  * less are allowed, and the default size of the command buffer for small kernel
1375*4882a593Smuzhiyun  * submissions is also set to this size.
1376*4882a593Smuzhiyun  */
vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man * man)1377*4882a593Smuzhiyun void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378*4882a593Smuzhiyun {
1379*4882a593Smuzhiyun 	if (!man->has_pool)
1380*4882a593Smuzhiyun 		return;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	man->has_pool = false;
1383*4882a593Smuzhiyun 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384*4882a593Smuzhiyun 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385*4882a593Smuzhiyun 	if (man->using_mob) {
1386*4882a593Smuzhiyun 		(void) ttm_bo_kunmap(&man->map_obj);
1387*4882a593Smuzhiyun 		ttm_bo_put(man->cmd_space);
1388*4882a593Smuzhiyun 		man->cmd_space = NULL;
1389*4882a593Smuzhiyun 	} else {
1390*4882a593Smuzhiyun 		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1391*4882a593Smuzhiyun 				  man->size, man->map, man->handle);
1392*4882a593Smuzhiyun 	}
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun /**
1396*4882a593Smuzhiyun  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1397*4882a593Smuzhiyun  *
1398*4882a593Smuzhiyun  * @man: Pointer to a command buffer manager.
1399*4882a593Smuzhiyun  *
1400*4882a593Smuzhiyun  * This function idles and then destroys a command buffer manager.
1401*4882a593Smuzhiyun  */
vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man * man)1402*4882a593Smuzhiyun void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun 	WARN_ON_ONCE(man->has_pool);
1405*4882a593Smuzhiyun 	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	if (vmw_cmdbuf_startstop(man, 0, false))
1408*4882a593Smuzhiyun 		DRM_ERROR("Failed stopping command buffer contexts.\n");
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411*4882a593Smuzhiyun 				  &man->dev_priv->error_waiters);
1412*4882a593Smuzhiyun 	(void) cancel_work_sync(&man->work);
1413*4882a593Smuzhiyun 	dma_pool_destroy(man->dheaders);
1414*4882a593Smuzhiyun 	dma_pool_destroy(man->headers);
1415*4882a593Smuzhiyun 	mutex_destroy(&man->cur_mutex);
1416*4882a593Smuzhiyun 	mutex_destroy(&man->space_mutex);
1417*4882a593Smuzhiyun 	mutex_destroy(&man->error_mutex);
1418*4882a593Smuzhiyun 	kfree(man);
1419*4882a593Smuzhiyun }
1420