1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun * the following conditions:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun * of the Software.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun **************************************************************************/
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "vmwgfx_drv.h"
29*4882a593Smuzhiyun #include <drm/ttm/ttm_bo_driver.h>
30*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
31*4882a593Smuzhiyun #include <drm/ttm/ttm_page_alloc.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun static const struct ttm_place vram_placement_flags = {
34*4882a593Smuzhiyun .fpfn = 0,
35*4882a593Smuzhiyun .lpfn = 0,
36*4882a593Smuzhiyun .mem_type = TTM_PL_VRAM,
37*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun static const struct ttm_place vram_ne_placement_flags = {
41*4882a593Smuzhiyun .fpfn = 0,
42*4882a593Smuzhiyun .lpfn = 0,
43*4882a593Smuzhiyun .mem_type = TTM_PL_VRAM,
44*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun static const struct ttm_place sys_placement_flags = {
48*4882a593Smuzhiyun .fpfn = 0,
49*4882a593Smuzhiyun .lpfn = 0,
50*4882a593Smuzhiyun .mem_type = TTM_PL_SYSTEM,
51*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun static const struct ttm_place sys_ne_placement_flags = {
55*4882a593Smuzhiyun .fpfn = 0,
56*4882a593Smuzhiyun .lpfn = 0,
57*4882a593Smuzhiyun .mem_type = TTM_PL_SYSTEM,
58*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static const struct ttm_place gmr_placement_flags = {
62*4882a593Smuzhiyun .fpfn = 0,
63*4882a593Smuzhiyun .lpfn = 0,
64*4882a593Smuzhiyun .mem_type = VMW_PL_GMR,
65*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static const struct ttm_place gmr_ne_placement_flags = {
69*4882a593Smuzhiyun .fpfn = 0,
70*4882a593Smuzhiyun .lpfn = 0,
71*4882a593Smuzhiyun .mem_type = VMW_PL_GMR,
72*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun static const struct ttm_place mob_placement_flags = {
76*4882a593Smuzhiyun .fpfn = 0,
77*4882a593Smuzhiyun .lpfn = 0,
78*4882a593Smuzhiyun .mem_type = VMW_PL_MOB,
79*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun static const struct ttm_place mob_ne_placement_flags = {
83*4882a593Smuzhiyun .fpfn = 0,
84*4882a593Smuzhiyun .lpfn = 0,
85*4882a593Smuzhiyun .mem_type = VMW_PL_MOB,
86*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun struct ttm_placement vmw_vram_placement = {
90*4882a593Smuzhiyun .num_placement = 1,
91*4882a593Smuzhiyun .placement = &vram_placement_flags,
92*4882a593Smuzhiyun .num_busy_placement = 1,
93*4882a593Smuzhiyun .busy_placement = &vram_placement_flags
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static const struct ttm_place vram_gmr_placement_flags[] = {
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun .fpfn = 0,
99*4882a593Smuzhiyun .lpfn = 0,
100*4882a593Smuzhiyun .mem_type = TTM_PL_VRAM,
101*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
102*4882a593Smuzhiyun }, {
103*4882a593Smuzhiyun .fpfn = 0,
104*4882a593Smuzhiyun .lpfn = 0,
105*4882a593Smuzhiyun .mem_type = VMW_PL_GMR,
106*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static const struct ttm_place gmr_vram_placement_flags[] = {
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun .fpfn = 0,
113*4882a593Smuzhiyun .lpfn = 0,
114*4882a593Smuzhiyun .mem_type = VMW_PL_GMR,
115*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
116*4882a593Smuzhiyun }, {
117*4882a593Smuzhiyun .fpfn = 0,
118*4882a593Smuzhiyun .lpfn = 0,
119*4882a593Smuzhiyun .mem_type = TTM_PL_VRAM,
120*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun struct ttm_placement vmw_vram_gmr_placement = {
125*4882a593Smuzhiyun .num_placement = 2,
126*4882a593Smuzhiyun .placement = vram_gmr_placement_flags,
127*4882a593Smuzhiyun .num_busy_placement = 1,
128*4882a593Smuzhiyun .busy_placement = &gmr_placement_flags
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun static const struct ttm_place vram_gmr_ne_placement_flags[] = {
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun .fpfn = 0,
134*4882a593Smuzhiyun .lpfn = 0,
135*4882a593Smuzhiyun .mem_type = TTM_PL_VRAM,
136*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED |
137*4882a593Smuzhiyun TTM_PL_FLAG_NO_EVICT
138*4882a593Smuzhiyun }, {
139*4882a593Smuzhiyun .fpfn = 0,
140*4882a593Smuzhiyun .lpfn = 0,
141*4882a593Smuzhiyun .mem_type = VMW_PL_GMR,
142*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED |
143*4882a593Smuzhiyun TTM_PL_FLAG_NO_EVICT
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun struct ttm_placement vmw_vram_gmr_ne_placement = {
148*4882a593Smuzhiyun .num_placement = 2,
149*4882a593Smuzhiyun .placement = vram_gmr_ne_placement_flags,
150*4882a593Smuzhiyun .num_busy_placement = 1,
151*4882a593Smuzhiyun .busy_placement = &gmr_ne_placement_flags
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun struct ttm_placement vmw_vram_sys_placement = {
155*4882a593Smuzhiyun .num_placement = 1,
156*4882a593Smuzhiyun .placement = &vram_placement_flags,
157*4882a593Smuzhiyun .num_busy_placement = 1,
158*4882a593Smuzhiyun .busy_placement = &sys_placement_flags
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun struct ttm_placement vmw_vram_ne_placement = {
162*4882a593Smuzhiyun .num_placement = 1,
163*4882a593Smuzhiyun .placement = &vram_ne_placement_flags,
164*4882a593Smuzhiyun .num_busy_placement = 1,
165*4882a593Smuzhiyun .busy_placement = &vram_ne_placement_flags
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun struct ttm_placement vmw_sys_placement = {
169*4882a593Smuzhiyun .num_placement = 1,
170*4882a593Smuzhiyun .placement = &sys_placement_flags,
171*4882a593Smuzhiyun .num_busy_placement = 1,
172*4882a593Smuzhiyun .busy_placement = &sys_placement_flags
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun struct ttm_placement vmw_sys_ne_placement = {
176*4882a593Smuzhiyun .num_placement = 1,
177*4882a593Smuzhiyun .placement = &sys_ne_placement_flags,
178*4882a593Smuzhiyun .num_busy_placement = 1,
179*4882a593Smuzhiyun .busy_placement = &sys_ne_placement_flags
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun static const struct ttm_place evictable_placement_flags[] = {
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun .fpfn = 0,
185*4882a593Smuzhiyun .lpfn = 0,
186*4882a593Smuzhiyun .mem_type = TTM_PL_SYSTEM,
187*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
188*4882a593Smuzhiyun }, {
189*4882a593Smuzhiyun .fpfn = 0,
190*4882a593Smuzhiyun .lpfn = 0,
191*4882a593Smuzhiyun .mem_type = TTM_PL_VRAM,
192*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
193*4882a593Smuzhiyun }, {
194*4882a593Smuzhiyun .fpfn = 0,
195*4882a593Smuzhiyun .lpfn = 0,
196*4882a593Smuzhiyun .mem_type = VMW_PL_GMR,
197*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
198*4882a593Smuzhiyun }, {
199*4882a593Smuzhiyun .fpfn = 0,
200*4882a593Smuzhiyun .lpfn = 0,
201*4882a593Smuzhiyun .mem_type = VMW_PL_MOB,
202*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun static const struct ttm_place nonfixed_placement_flags[] = {
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun .fpfn = 0,
209*4882a593Smuzhiyun .lpfn = 0,
210*4882a593Smuzhiyun .mem_type = TTM_PL_SYSTEM,
211*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
212*4882a593Smuzhiyun }, {
213*4882a593Smuzhiyun .fpfn = 0,
214*4882a593Smuzhiyun .lpfn = 0,
215*4882a593Smuzhiyun .mem_type = VMW_PL_GMR,
216*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
217*4882a593Smuzhiyun }, {
218*4882a593Smuzhiyun .fpfn = 0,
219*4882a593Smuzhiyun .lpfn = 0,
220*4882a593Smuzhiyun .mem_type = VMW_PL_MOB,
221*4882a593Smuzhiyun .flags = TTM_PL_FLAG_CACHED
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun struct ttm_placement vmw_evictable_placement = {
226*4882a593Smuzhiyun .num_placement = 4,
227*4882a593Smuzhiyun .placement = evictable_placement_flags,
228*4882a593Smuzhiyun .num_busy_placement = 1,
229*4882a593Smuzhiyun .busy_placement = &sys_placement_flags
230*4882a593Smuzhiyun };
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun struct ttm_placement vmw_srf_placement = {
233*4882a593Smuzhiyun .num_placement = 1,
234*4882a593Smuzhiyun .num_busy_placement = 2,
235*4882a593Smuzhiyun .placement = &gmr_placement_flags,
236*4882a593Smuzhiyun .busy_placement = gmr_vram_placement_flags
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun struct ttm_placement vmw_mob_placement = {
240*4882a593Smuzhiyun .num_placement = 1,
241*4882a593Smuzhiyun .num_busy_placement = 1,
242*4882a593Smuzhiyun .placement = &mob_placement_flags,
243*4882a593Smuzhiyun .busy_placement = &mob_placement_flags
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun struct ttm_placement vmw_mob_ne_placement = {
247*4882a593Smuzhiyun .num_placement = 1,
248*4882a593Smuzhiyun .num_busy_placement = 1,
249*4882a593Smuzhiyun .placement = &mob_ne_placement_flags,
250*4882a593Smuzhiyun .busy_placement = &mob_ne_placement_flags
251*4882a593Smuzhiyun };
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun struct ttm_placement vmw_nonfixed_placement = {
254*4882a593Smuzhiyun .num_placement = 3,
255*4882a593Smuzhiyun .placement = nonfixed_placement_flags,
256*4882a593Smuzhiyun .num_busy_placement = 1,
257*4882a593Smuzhiyun .busy_placement = &sys_placement_flags
258*4882a593Smuzhiyun };
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun struct vmw_ttm_tt {
261*4882a593Smuzhiyun struct ttm_dma_tt dma_ttm;
262*4882a593Smuzhiyun struct vmw_private *dev_priv;
263*4882a593Smuzhiyun int gmr_id;
264*4882a593Smuzhiyun struct vmw_mob *mob;
265*4882a593Smuzhiyun int mem_type;
266*4882a593Smuzhiyun struct sg_table sgt;
267*4882a593Smuzhiyun struct vmw_sg_table vsgt;
268*4882a593Smuzhiyun uint64_t sg_alloc_size;
269*4882a593Smuzhiyun bool mapped;
270*4882a593Smuzhiyun bool bound;
271*4882a593Smuzhiyun };
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun * Helper functions to advance a struct vmw_piter iterator.
277*4882a593Smuzhiyun *
278*4882a593Smuzhiyun * @viter: Pointer to the iterator.
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * These functions return false if past the end of the list,
281*4882a593Smuzhiyun * true otherwise. Functions are selected depending on the current
282*4882a593Smuzhiyun * DMA mapping mode.
283*4882a593Smuzhiyun */
__vmw_piter_non_sg_next(struct vmw_piter * viter)284*4882a593Smuzhiyun static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun return ++(viter->i) < viter->num_pages;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
__vmw_piter_sg_next(struct vmw_piter * viter)289*4882a593Smuzhiyun static bool __vmw_piter_sg_next(struct vmw_piter *viter)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun bool ret = __vmw_piter_non_sg_next(viter);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return __sg_page_iter_dma_next(&viter->iter) && ret;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun * Helper functions to return a pointer to the current page.
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * @viter: Pointer to the iterator
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * These functions return a pointer to the page currently
303*4882a593Smuzhiyun * pointed to by @viter. Functions are selected depending on the
304*4882a593Smuzhiyun * current mapping mode.
305*4882a593Smuzhiyun */
__vmw_piter_non_sg_page(struct vmw_piter * viter)306*4882a593Smuzhiyun static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun return viter->pages[viter->i];
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /**
312*4882a593Smuzhiyun * Helper functions to return the DMA address of the current page.
313*4882a593Smuzhiyun *
314*4882a593Smuzhiyun * @viter: Pointer to the iterator
315*4882a593Smuzhiyun *
316*4882a593Smuzhiyun * These functions return the DMA address of the page currently
317*4882a593Smuzhiyun * pointed to by @viter. Functions are selected depending on the
318*4882a593Smuzhiyun * current mapping mode.
319*4882a593Smuzhiyun */
__vmw_piter_phys_addr(struct vmw_piter * viter)320*4882a593Smuzhiyun static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun return page_to_phys(viter->pages[viter->i]);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
__vmw_piter_dma_addr(struct vmw_piter * viter)325*4882a593Smuzhiyun static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun return viter->addrs[viter->i];
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
__vmw_piter_sg_addr(struct vmw_piter * viter)330*4882a593Smuzhiyun static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun return sg_page_iter_dma_address(&viter->iter);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /**
337*4882a593Smuzhiyun * vmw_piter_start - Initialize a struct vmw_piter.
338*4882a593Smuzhiyun *
339*4882a593Smuzhiyun * @viter: Pointer to the iterator to initialize
340*4882a593Smuzhiyun * @vsgt: Pointer to a struct vmw_sg_table to initialize from
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * Note that we're following the convention of __sg_page_iter_start, so that
343*4882a593Smuzhiyun * the iterator doesn't point to a valid page after initialization; it has
344*4882a593Smuzhiyun * to be advanced one step first.
345*4882a593Smuzhiyun */
vmw_piter_start(struct vmw_piter * viter,const struct vmw_sg_table * vsgt,unsigned long p_offset)346*4882a593Smuzhiyun void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
347*4882a593Smuzhiyun unsigned long p_offset)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun viter->i = p_offset - 1;
350*4882a593Smuzhiyun viter->num_pages = vsgt->num_pages;
351*4882a593Smuzhiyun viter->page = &__vmw_piter_non_sg_page;
352*4882a593Smuzhiyun viter->pages = vsgt->pages;
353*4882a593Smuzhiyun switch (vsgt->mode) {
354*4882a593Smuzhiyun case vmw_dma_phys:
355*4882a593Smuzhiyun viter->next = &__vmw_piter_non_sg_next;
356*4882a593Smuzhiyun viter->dma_address = &__vmw_piter_phys_addr;
357*4882a593Smuzhiyun break;
358*4882a593Smuzhiyun case vmw_dma_alloc_coherent:
359*4882a593Smuzhiyun viter->next = &__vmw_piter_non_sg_next;
360*4882a593Smuzhiyun viter->dma_address = &__vmw_piter_dma_addr;
361*4882a593Smuzhiyun viter->addrs = vsgt->addrs;
362*4882a593Smuzhiyun break;
363*4882a593Smuzhiyun case vmw_dma_map_populate:
364*4882a593Smuzhiyun case vmw_dma_map_bind:
365*4882a593Smuzhiyun viter->next = &__vmw_piter_sg_next;
366*4882a593Smuzhiyun viter->dma_address = &__vmw_piter_sg_addr;
367*4882a593Smuzhiyun __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
368*4882a593Smuzhiyun vsgt->sgt->orig_nents, p_offset);
369*4882a593Smuzhiyun break;
370*4882a593Smuzhiyun default:
371*4882a593Smuzhiyun BUG();
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /**
376*4882a593Smuzhiyun * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
377*4882a593Smuzhiyun * TTM pages
378*4882a593Smuzhiyun *
379*4882a593Smuzhiyun * @vmw_tt: Pointer to a struct vmw_ttm_backend
380*4882a593Smuzhiyun *
381*4882a593Smuzhiyun * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
382*4882a593Smuzhiyun */
vmw_ttm_unmap_from_dma(struct vmw_ttm_tt * vmw_tt)383*4882a593Smuzhiyun static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun struct device *dev = vmw_tt->dev_priv->dev->dev;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
388*4882a593Smuzhiyun vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /**
392*4882a593Smuzhiyun * vmw_ttm_map_for_dma - map TTM pages to get device addresses
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * @vmw_tt: Pointer to a struct vmw_ttm_backend
395*4882a593Smuzhiyun *
396*4882a593Smuzhiyun * This function is used to get device addresses from the kernel DMA layer.
397*4882a593Smuzhiyun * However, it's violating the DMA API in that when this operation has been
398*4882a593Smuzhiyun * performed, it's illegal for the CPU to write to the pages without first
399*4882a593Smuzhiyun * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
400*4882a593Smuzhiyun * therefore only legal to call this function if we know that the function
401*4882a593Smuzhiyun * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
402*4882a593Smuzhiyun * a CPU write buffer flush.
403*4882a593Smuzhiyun */
vmw_ttm_map_for_dma(struct vmw_ttm_tt * vmw_tt)404*4882a593Smuzhiyun static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun struct device *dev = vmw_tt->dev_priv->dev->dev;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /**
412*4882a593Smuzhiyun * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
413*4882a593Smuzhiyun *
414*4882a593Smuzhiyun * @vmw_tt: Pointer to a struct vmw_ttm_tt
415*4882a593Smuzhiyun *
416*4882a593Smuzhiyun * Select the correct function for and make sure the TTM pages are
417*4882a593Smuzhiyun * visible to the device. Allocate storage for the device mappings.
418*4882a593Smuzhiyun * If a mapping has already been performed, indicated by the storage
419*4882a593Smuzhiyun * pointer being non NULL, the function returns success.
420*4882a593Smuzhiyun */
vmw_ttm_map_dma(struct vmw_ttm_tt * vmw_tt)421*4882a593Smuzhiyun static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun struct vmw_private *dev_priv = vmw_tt->dev_priv;
424*4882a593Smuzhiyun struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
425*4882a593Smuzhiyun struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
426*4882a593Smuzhiyun struct ttm_operation_ctx ctx = {
427*4882a593Smuzhiyun .interruptible = true,
428*4882a593Smuzhiyun .no_wait_gpu = false
429*4882a593Smuzhiyun };
430*4882a593Smuzhiyun struct vmw_piter iter;
431*4882a593Smuzhiyun dma_addr_t old;
432*4882a593Smuzhiyun int ret = 0;
433*4882a593Smuzhiyun static size_t sgl_size;
434*4882a593Smuzhiyun static size_t sgt_size;
435*4882a593Smuzhiyun struct scatterlist *sg;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (vmw_tt->mapped)
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun vsgt->mode = dev_priv->map_mode;
441*4882a593Smuzhiyun vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
442*4882a593Smuzhiyun vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
443*4882a593Smuzhiyun vsgt->addrs = vmw_tt->dma_ttm.dma_address;
444*4882a593Smuzhiyun vsgt->sgt = &vmw_tt->sgt;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun switch (dev_priv->map_mode) {
447*4882a593Smuzhiyun case vmw_dma_map_bind:
448*4882a593Smuzhiyun case vmw_dma_map_populate:
449*4882a593Smuzhiyun if (unlikely(!sgl_size)) {
450*4882a593Smuzhiyun sgl_size = ttm_round_pot(sizeof(struct scatterlist));
451*4882a593Smuzhiyun sgt_size = ttm_round_pot(sizeof(struct sg_table));
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
454*4882a593Smuzhiyun ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
455*4882a593Smuzhiyun if (unlikely(ret != 0))
456*4882a593Smuzhiyun return ret;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
459*4882a593Smuzhiyun vsgt->num_pages, 0,
460*4882a593Smuzhiyun (unsigned long) vsgt->num_pages << PAGE_SHIFT,
461*4882a593Smuzhiyun dma_get_max_seg_size(dev_priv->dev->dev),
462*4882a593Smuzhiyun NULL, 0, GFP_KERNEL);
463*4882a593Smuzhiyun if (IS_ERR(sg)) {
464*4882a593Smuzhiyun ret = PTR_ERR(sg);
465*4882a593Smuzhiyun goto out_sg_alloc_fail;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
469*4882a593Smuzhiyun uint64_t over_alloc =
470*4882a593Smuzhiyun sgl_size * (vsgt->num_pages -
471*4882a593Smuzhiyun vmw_tt->sgt.orig_nents);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun ttm_mem_global_free(glob, over_alloc);
474*4882a593Smuzhiyun vmw_tt->sg_alloc_size -= over_alloc;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun ret = vmw_ttm_map_for_dma(vmw_tt);
478*4882a593Smuzhiyun if (unlikely(ret != 0))
479*4882a593Smuzhiyun goto out_map_fail;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun break;
482*4882a593Smuzhiyun default:
483*4882a593Smuzhiyun break;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun old = ~((dma_addr_t) 0);
487*4882a593Smuzhiyun vmw_tt->vsgt.num_regions = 0;
488*4882a593Smuzhiyun for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
489*4882a593Smuzhiyun dma_addr_t cur = vmw_piter_dma_addr(&iter);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun if (cur != old + PAGE_SIZE)
492*4882a593Smuzhiyun vmw_tt->vsgt.num_regions++;
493*4882a593Smuzhiyun old = cur;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun vmw_tt->mapped = true;
497*4882a593Smuzhiyun return 0;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun out_map_fail:
500*4882a593Smuzhiyun sg_free_table(vmw_tt->vsgt.sgt);
501*4882a593Smuzhiyun vmw_tt->vsgt.sgt = NULL;
502*4882a593Smuzhiyun out_sg_alloc_fail:
503*4882a593Smuzhiyun ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
504*4882a593Smuzhiyun return ret;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /**
508*4882a593Smuzhiyun * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
509*4882a593Smuzhiyun *
510*4882a593Smuzhiyun * @vmw_tt: Pointer to a struct vmw_ttm_tt
511*4882a593Smuzhiyun *
512*4882a593Smuzhiyun * Tear down any previously set up device DMA mappings and free
513*4882a593Smuzhiyun * any storage space allocated for them. If there are no mappings set up,
514*4882a593Smuzhiyun * this function is a NOP.
515*4882a593Smuzhiyun */
vmw_ttm_unmap_dma(struct vmw_ttm_tt * vmw_tt)516*4882a593Smuzhiyun static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun struct vmw_private *dev_priv = vmw_tt->dev_priv;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (!vmw_tt->vsgt.sgt)
521*4882a593Smuzhiyun return;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun switch (dev_priv->map_mode) {
524*4882a593Smuzhiyun case vmw_dma_map_bind:
525*4882a593Smuzhiyun case vmw_dma_map_populate:
526*4882a593Smuzhiyun vmw_ttm_unmap_from_dma(vmw_tt);
527*4882a593Smuzhiyun sg_free_table(vmw_tt->vsgt.sgt);
528*4882a593Smuzhiyun vmw_tt->vsgt.sgt = NULL;
529*4882a593Smuzhiyun ttm_mem_global_free(vmw_mem_glob(dev_priv),
530*4882a593Smuzhiyun vmw_tt->sg_alloc_size);
531*4882a593Smuzhiyun break;
532*4882a593Smuzhiyun default:
533*4882a593Smuzhiyun break;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun vmw_tt->mapped = false;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /**
539*4882a593Smuzhiyun * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
540*4882a593Smuzhiyun * TTM buffer object
541*4882a593Smuzhiyun *
542*4882a593Smuzhiyun * @bo: Pointer to a struct ttm_buffer_object
543*4882a593Smuzhiyun *
544*4882a593Smuzhiyun * Returns a pointer to a struct vmw_sg_table object. The object should
545*4882a593Smuzhiyun * not be freed after use.
546*4882a593Smuzhiyun * Note that for the device addresses to be valid, the buffer object must
547*4882a593Smuzhiyun * either be reserved or pinned.
548*4882a593Smuzhiyun */
vmw_bo_sg_table(struct ttm_buffer_object * bo)549*4882a593Smuzhiyun const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct vmw_ttm_tt *vmw_tt =
552*4882a593Smuzhiyun container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun return &vmw_tt->vsgt;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun
vmw_ttm_bind(struct ttm_bo_device * bdev,struct ttm_tt * ttm,struct ttm_resource * bo_mem)558*4882a593Smuzhiyun static int vmw_ttm_bind(struct ttm_bo_device *bdev,
559*4882a593Smuzhiyun struct ttm_tt *ttm, struct ttm_resource *bo_mem)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct vmw_ttm_tt *vmw_be =
562*4882a593Smuzhiyun container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
563*4882a593Smuzhiyun int ret = 0;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (!bo_mem)
566*4882a593Smuzhiyun return -EINVAL;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun if (vmw_be->bound)
569*4882a593Smuzhiyun return 0;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun ret = vmw_ttm_map_dma(vmw_be);
572*4882a593Smuzhiyun if (unlikely(ret != 0))
573*4882a593Smuzhiyun return ret;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun vmw_be->gmr_id = bo_mem->start;
576*4882a593Smuzhiyun vmw_be->mem_type = bo_mem->mem_type;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun switch (bo_mem->mem_type) {
579*4882a593Smuzhiyun case VMW_PL_GMR:
580*4882a593Smuzhiyun ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
581*4882a593Smuzhiyun ttm->num_pages, vmw_be->gmr_id);
582*4882a593Smuzhiyun break;
583*4882a593Smuzhiyun case VMW_PL_MOB:
584*4882a593Smuzhiyun if (unlikely(vmw_be->mob == NULL)) {
585*4882a593Smuzhiyun vmw_be->mob =
586*4882a593Smuzhiyun vmw_mob_create(ttm->num_pages);
587*4882a593Smuzhiyun if (unlikely(vmw_be->mob == NULL))
588*4882a593Smuzhiyun return -ENOMEM;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
592*4882a593Smuzhiyun &vmw_be->vsgt, ttm->num_pages,
593*4882a593Smuzhiyun vmw_be->gmr_id);
594*4882a593Smuzhiyun break;
595*4882a593Smuzhiyun default:
596*4882a593Smuzhiyun BUG();
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun vmw_be->bound = true;
599*4882a593Smuzhiyun return ret;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
vmw_ttm_unbind(struct ttm_bo_device * bdev,struct ttm_tt * ttm)602*4882a593Smuzhiyun static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
603*4882a593Smuzhiyun struct ttm_tt *ttm)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun struct vmw_ttm_tt *vmw_be =
606*4882a593Smuzhiyun container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (!vmw_be->bound)
609*4882a593Smuzhiyun return;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun switch (vmw_be->mem_type) {
612*4882a593Smuzhiyun case VMW_PL_GMR:
613*4882a593Smuzhiyun vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
614*4882a593Smuzhiyun break;
615*4882a593Smuzhiyun case VMW_PL_MOB:
616*4882a593Smuzhiyun vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
617*4882a593Smuzhiyun break;
618*4882a593Smuzhiyun default:
619*4882a593Smuzhiyun BUG();
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
623*4882a593Smuzhiyun vmw_ttm_unmap_dma(vmw_be);
624*4882a593Smuzhiyun vmw_be->bound = false;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun
vmw_ttm_destroy(struct ttm_bo_device * bdev,struct ttm_tt * ttm)628*4882a593Smuzhiyun static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun struct vmw_ttm_tt *vmw_be =
631*4882a593Smuzhiyun container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun vmw_ttm_unbind(bdev, ttm);
634*4882a593Smuzhiyun ttm_tt_destroy_common(bdev, ttm);
635*4882a593Smuzhiyun vmw_ttm_unmap_dma(vmw_be);
636*4882a593Smuzhiyun if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
637*4882a593Smuzhiyun ttm_dma_tt_fini(&vmw_be->dma_ttm);
638*4882a593Smuzhiyun else
639*4882a593Smuzhiyun ttm_tt_fini(ttm);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun if (vmw_be->mob)
642*4882a593Smuzhiyun vmw_mob_destroy(vmw_be->mob);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun kfree(vmw_be);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun
vmw_ttm_populate(struct ttm_bo_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)648*4882a593Smuzhiyun static int vmw_ttm_populate(struct ttm_bo_device *bdev,
649*4882a593Smuzhiyun struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun struct vmw_ttm_tt *vmw_tt =
652*4882a593Smuzhiyun container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
653*4882a593Smuzhiyun struct vmw_private *dev_priv = vmw_tt->dev_priv;
654*4882a593Smuzhiyun struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
655*4882a593Smuzhiyun int ret;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (ttm_tt_is_populated(ttm))
658*4882a593Smuzhiyun return 0;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
661*4882a593Smuzhiyun size_t size =
662*4882a593Smuzhiyun ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
663*4882a593Smuzhiyun ret = ttm_mem_global_alloc(glob, size, ctx);
664*4882a593Smuzhiyun if (unlikely(ret != 0))
665*4882a593Smuzhiyun return ret;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
668*4882a593Smuzhiyun ctx);
669*4882a593Smuzhiyun if (unlikely(ret != 0))
670*4882a593Smuzhiyun ttm_mem_global_free(glob, size);
671*4882a593Smuzhiyun } else
672*4882a593Smuzhiyun ret = ttm_pool_populate(ttm, ctx);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun return ret;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
vmw_ttm_unpopulate(struct ttm_bo_device * bdev,struct ttm_tt * ttm)677*4882a593Smuzhiyun static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
678*4882a593Smuzhiyun struct ttm_tt *ttm)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
681*4882a593Smuzhiyun dma_ttm.ttm);
682*4882a593Smuzhiyun struct vmw_private *dev_priv = vmw_tt->dev_priv;
683*4882a593Smuzhiyun struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if (vmw_tt->mob) {
687*4882a593Smuzhiyun vmw_mob_destroy(vmw_tt->mob);
688*4882a593Smuzhiyun vmw_tt->mob = NULL;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun vmw_ttm_unmap_dma(vmw_tt);
692*4882a593Smuzhiyun if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
693*4882a593Smuzhiyun size_t size =
694*4882a593Smuzhiyun ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
697*4882a593Smuzhiyun ttm_mem_global_free(glob, size);
698*4882a593Smuzhiyun } else
699*4882a593Smuzhiyun ttm_pool_unpopulate(ttm);
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
vmw_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)702*4882a593Smuzhiyun static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
703*4882a593Smuzhiyun uint32_t page_flags)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun struct vmw_ttm_tt *vmw_be;
706*4882a593Smuzhiyun int ret;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
709*4882a593Smuzhiyun if (!vmw_be)
710*4882a593Smuzhiyun return NULL;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
713*4882a593Smuzhiyun vmw_be->mob = NULL;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
716*4882a593Smuzhiyun ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
717*4882a593Smuzhiyun else
718*4882a593Smuzhiyun ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
719*4882a593Smuzhiyun if (unlikely(ret != 0))
720*4882a593Smuzhiyun goto out_no_init;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun return &vmw_be->dma_ttm.ttm;
723*4882a593Smuzhiyun out_no_init:
724*4882a593Smuzhiyun kfree(vmw_be);
725*4882a593Smuzhiyun return NULL;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
vmw_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)728*4882a593Smuzhiyun static void vmw_evict_flags(struct ttm_buffer_object *bo,
729*4882a593Smuzhiyun struct ttm_placement *placement)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun *placement = vmw_sys_placement;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
vmw_verify_access(struct ttm_buffer_object * bo,struct file * filp)734*4882a593Smuzhiyun static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun struct ttm_object_file *tfile =
737*4882a593Smuzhiyun vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun return vmw_user_bo_verify_access(bo, tfile);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
vmw_ttm_io_mem_reserve(struct ttm_bo_device * bdev,struct ttm_resource * mem)742*4882a593Smuzhiyun static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun switch (mem->mem_type) {
747*4882a593Smuzhiyun case TTM_PL_SYSTEM:
748*4882a593Smuzhiyun case VMW_PL_GMR:
749*4882a593Smuzhiyun case VMW_PL_MOB:
750*4882a593Smuzhiyun return 0;
751*4882a593Smuzhiyun case TTM_PL_VRAM:
752*4882a593Smuzhiyun mem->bus.offset = (mem->start << PAGE_SHIFT) +
753*4882a593Smuzhiyun dev_priv->vram_start;
754*4882a593Smuzhiyun mem->bus.is_iomem = true;
755*4882a593Smuzhiyun break;
756*4882a593Smuzhiyun default:
757*4882a593Smuzhiyun return -EINVAL;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun return 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /**
763*4882a593Smuzhiyun * vmw_move_notify - TTM move_notify_callback
764*4882a593Smuzhiyun *
765*4882a593Smuzhiyun * @bo: The TTM buffer object about to move.
766*4882a593Smuzhiyun * @mem: The struct ttm_resource indicating to what memory
767*4882a593Smuzhiyun * region the move is taking place.
768*4882a593Smuzhiyun *
769*4882a593Smuzhiyun * Calls move_notify for all subsystems needing it.
770*4882a593Smuzhiyun * (currently only resources).
771*4882a593Smuzhiyun */
vmw_move_notify(struct ttm_buffer_object * bo,bool evict,struct ttm_resource * mem)772*4882a593Smuzhiyun static void vmw_move_notify(struct ttm_buffer_object *bo,
773*4882a593Smuzhiyun bool evict,
774*4882a593Smuzhiyun struct ttm_resource *mem)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun vmw_bo_move_notify(bo, mem);
777*4882a593Smuzhiyun vmw_query_move_notify(bo, mem);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /**
782*4882a593Smuzhiyun * vmw_swap_notify - TTM move_notify_callback
783*4882a593Smuzhiyun *
784*4882a593Smuzhiyun * @bo: The TTM buffer object about to be swapped out.
785*4882a593Smuzhiyun */
vmw_swap_notify(struct ttm_buffer_object * bo)786*4882a593Smuzhiyun static void vmw_swap_notify(struct ttm_buffer_object *bo)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun vmw_bo_swap_notify(bo);
789*4882a593Smuzhiyun (void) ttm_bo_wait(bo, false, false);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun struct ttm_bo_driver vmw_bo_driver = {
794*4882a593Smuzhiyun .ttm_tt_create = &vmw_ttm_tt_create,
795*4882a593Smuzhiyun .ttm_tt_populate = &vmw_ttm_populate,
796*4882a593Smuzhiyun .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
797*4882a593Smuzhiyun .ttm_tt_bind = &vmw_ttm_bind,
798*4882a593Smuzhiyun .ttm_tt_unbind = &vmw_ttm_unbind,
799*4882a593Smuzhiyun .ttm_tt_destroy = &vmw_ttm_destroy,
800*4882a593Smuzhiyun .eviction_valuable = ttm_bo_eviction_valuable,
801*4882a593Smuzhiyun .evict_flags = vmw_evict_flags,
802*4882a593Smuzhiyun .move = NULL,
803*4882a593Smuzhiyun .verify_access = vmw_verify_access,
804*4882a593Smuzhiyun .move_notify = vmw_move_notify,
805*4882a593Smuzhiyun .swap_notify = vmw_swap_notify,
806*4882a593Smuzhiyun .io_mem_reserve = &vmw_ttm_io_mem_reserve,
807*4882a593Smuzhiyun };
808*4882a593Smuzhiyun
vmw_bo_create_and_populate(struct vmw_private * dev_priv,unsigned long bo_size,struct ttm_buffer_object ** bo_p)809*4882a593Smuzhiyun int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
810*4882a593Smuzhiyun unsigned long bo_size,
811*4882a593Smuzhiyun struct ttm_buffer_object **bo_p)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun struct ttm_operation_ctx ctx = {
814*4882a593Smuzhiyun .interruptible = false,
815*4882a593Smuzhiyun .no_wait_gpu = false
816*4882a593Smuzhiyun };
817*4882a593Smuzhiyun struct ttm_buffer_object *bo;
818*4882a593Smuzhiyun int ret;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun ret = ttm_bo_create(&dev_priv->bdev, bo_size,
821*4882a593Smuzhiyun ttm_bo_type_device,
822*4882a593Smuzhiyun &vmw_sys_ne_placement,
823*4882a593Smuzhiyun 0, false, &bo);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (unlikely(ret != 0))
826*4882a593Smuzhiyun return ret;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun ret = ttm_bo_reserve(bo, false, true, NULL);
829*4882a593Smuzhiyun BUG_ON(ret != 0);
830*4882a593Smuzhiyun ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
831*4882a593Smuzhiyun if (likely(ret == 0)) {
832*4882a593Smuzhiyun struct vmw_ttm_tt *vmw_tt =
833*4882a593Smuzhiyun container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
834*4882a593Smuzhiyun ret = vmw_ttm_map_dma(vmw_tt);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun ttm_bo_unreserve(bo);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (likely(ret == 0))
840*4882a593Smuzhiyun *bo_p = bo;
841*4882a593Smuzhiyun return ret;
842*4882a593Smuzhiyun }
843