xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun  * the following conditions:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun  * of the Software.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  **************************************************************************/
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <drm/ttm/ttm_bo_driver.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include "vmwgfx_drv.h"
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define VMW_PPN_SIZE (sizeof(unsigned long))
33*4882a593Smuzhiyun /* A future safe maximum remap size. */
34*4882a593Smuzhiyun #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
35*4882a593Smuzhiyun #define DMA_ADDR_INVALID ((dma_addr_t) 0)
36*4882a593Smuzhiyun #define DMA_PAGE_INVALID 0UL
37*4882a593Smuzhiyun 
vmw_gmr2_bind(struct vmw_private * dev_priv,struct vmw_piter * iter,unsigned long num_pages,int gmr_id)38*4882a593Smuzhiyun static int vmw_gmr2_bind(struct vmw_private *dev_priv,
39*4882a593Smuzhiyun 			 struct vmw_piter *iter,
40*4882a593Smuzhiyun 			 unsigned long num_pages,
41*4882a593Smuzhiyun 			 int gmr_id)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	SVGAFifoCmdDefineGMR2 define_cmd;
44*4882a593Smuzhiyun 	SVGAFifoCmdRemapGMR2 remap_cmd;
45*4882a593Smuzhiyun 	uint32_t *cmd;
46*4882a593Smuzhiyun 	uint32_t *cmd_orig;
47*4882a593Smuzhiyun 	uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
48*4882a593Smuzhiyun 	uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
49*4882a593Smuzhiyun 	uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
50*4882a593Smuzhiyun 	uint32_t remap_pos = 0;
51*4882a593Smuzhiyun 	uint32_t cmd_size = define_size + remap_size;
52*4882a593Smuzhiyun 	uint32_t i;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
55*4882a593Smuzhiyun 	if (unlikely(cmd == NULL))
56*4882a593Smuzhiyun 		return -ENOMEM;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	define_cmd.gmrId = gmr_id;
59*4882a593Smuzhiyun 	define_cmd.numPages = num_pages;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	*cmd++ = SVGA_CMD_DEFINE_GMR2;
62*4882a593Smuzhiyun 	memcpy(cmd, &define_cmd, sizeof(define_cmd));
63*4882a593Smuzhiyun 	cmd += sizeof(define_cmd) / sizeof(*cmd);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/*
66*4882a593Smuzhiyun 	 * Need to split the command if there are too many
67*4882a593Smuzhiyun 	 * pages that goes into the gmr.
68*4882a593Smuzhiyun 	 */
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	remap_cmd.gmrId = gmr_id;
71*4882a593Smuzhiyun 	remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
72*4882a593Smuzhiyun 		SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	while (num_pages > 0) {
75*4882a593Smuzhiyun 		unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		remap_cmd.offsetPages = remap_pos;
78*4882a593Smuzhiyun 		remap_cmd.numPages = nr;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 		*cmd++ = SVGA_CMD_REMAP_GMR2;
81*4882a593Smuzhiyun 		memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
82*4882a593Smuzhiyun 		cmd += sizeof(remap_cmd) / sizeof(*cmd);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		for (i = 0; i < nr; ++i) {
85*4882a593Smuzhiyun 			if (VMW_PPN_SIZE <= 4)
86*4882a593Smuzhiyun 				*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
87*4882a593Smuzhiyun 			else
88*4882a593Smuzhiyun 				*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
89*4882a593Smuzhiyun 					PAGE_SHIFT;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 			cmd += VMW_PPN_SIZE / sizeof(*cmd);
92*4882a593Smuzhiyun 			vmw_piter_next(iter);
93*4882a593Smuzhiyun 		}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 		num_pages -= nr;
96*4882a593Smuzhiyun 		remap_pos += nr;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, cmd_size);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	return 0;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
vmw_gmr2_unbind(struct vmw_private * dev_priv,int gmr_id)106*4882a593Smuzhiyun static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
107*4882a593Smuzhiyun 			    int gmr_id)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	SVGAFifoCmdDefineGMR2 define_cmd;
110*4882a593Smuzhiyun 	uint32_t define_size = sizeof(define_cmd) + 4;
111*4882a593Smuzhiyun 	uint32_t *cmd;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
114*4882a593Smuzhiyun 	if (unlikely(cmd == NULL))
115*4882a593Smuzhiyun 		return;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	define_cmd.gmrId = gmr_id;
118*4882a593Smuzhiyun 	define_cmd.numPages = 0;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	*cmd++ = SVGA_CMD_DEFINE_GMR2;
121*4882a593Smuzhiyun 	memcpy(cmd, &define_cmd, sizeof(define_cmd));
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, define_size);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 
vmw_gmr_bind(struct vmw_private * dev_priv,const struct vmw_sg_table * vsgt,unsigned long num_pages,int gmr_id)127*4882a593Smuzhiyun int vmw_gmr_bind(struct vmw_private *dev_priv,
128*4882a593Smuzhiyun 		 const struct vmw_sg_table *vsgt,
129*4882a593Smuzhiyun 		 unsigned long num_pages,
130*4882a593Smuzhiyun 		 int gmr_id)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct vmw_piter data_iter;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	vmw_piter_start(&data_iter, vsgt, 0);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	if (unlikely(!vmw_piter_next(&data_iter)))
137*4882a593Smuzhiyun 		return 0;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
140*4882a593Smuzhiyun 		return -EINVAL;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 
vmw_gmr_unbind(struct vmw_private * dev_priv,int gmr_id)146*4882a593Smuzhiyun void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
149*4882a593Smuzhiyun 		vmw_gmr2_unbind(dev_priv, gmr_id);
150*4882a593Smuzhiyun }
151