xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/nouveau/nouveau_vmm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2017 Red Hat Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun #include "nouveau_vmm.h"
23*4882a593Smuzhiyun #include "nouveau_drv.h"
24*4882a593Smuzhiyun #include "nouveau_bo.h"
25*4882a593Smuzhiyun #include "nouveau_svm.h"
26*4882a593Smuzhiyun #include "nouveau_mem.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun void
nouveau_vma_unmap(struct nouveau_vma * vma)29*4882a593Smuzhiyun nouveau_vma_unmap(struct nouveau_vma *vma)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	if (vma->mem) {
32*4882a593Smuzhiyun 		nvif_vmm_unmap(&vma->vmm->vmm, vma->addr);
33*4882a593Smuzhiyun 		vma->mem = NULL;
34*4882a593Smuzhiyun 	}
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun int
nouveau_vma_map(struct nouveau_vma * vma,struct nouveau_mem * mem)38*4882a593Smuzhiyun nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	struct nvif_vma tmp = { .addr = vma->addr };
41*4882a593Smuzhiyun 	int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp);
42*4882a593Smuzhiyun 	if (ret)
43*4882a593Smuzhiyun 		return ret;
44*4882a593Smuzhiyun 	vma->mem = mem;
45*4882a593Smuzhiyun 	return 0;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun struct nouveau_vma *
nouveau_vma_find(struct nouveau_bo * nvbo,struct nouveau_vmm * vmm)49*4882a593Smuzhiyun nouveau_vma_find(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	struct nouveau_vma *vma;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	list_for_each_entry(vma, &nvbo->vma_list, head) {
54*4882a593Smuzhiyun 		if (vma->vmm == vmm)
55*4882a593Smuzhiyun 			return vma;
56*4882a593Smuzhiyun 	}
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	return NULL;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun void
nouveau_vma_del(struct nouveau_vma ** pvma)62*4882a593Smuzhiyun nouveau_vma_del(struct nouveau_vma **pvma)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct nouveau_vma *vma = *pvma;
65*4882a593Smuzhiyun 	if (vma && --vma->refs <= 0) {
66*4882a593Smuzhiyun 		if (likely(vma->addr != ~0ULL)) {
67*4882a593Smuzhiyun 			struct nvif_vma tmp = { .addr = vma->addr, .size = 1 };
68*4882a593Smuzhiyun 			nvif_vmm_put(&vma->vmm->vmm, &tmp);
69*4882a593Smuzhiyun 		}
70*4882a593Smuzhiyun 		list_del(&vma->head);
71*4882a593Smuzhiyun 		kfree(*pvma);
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun 	*pvma = NULL;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun int
nouveau_vma_new(struct nouveau_bo * nvbo,struct nouveau_vmm * vmm,struct nouveau_vma ** pvma)77*4882a593Smuzhiyun nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
78*4882a593Smuzhiyun 		struct nouveau_vma **pvma)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
81*4882a593Smuzhiyun 	struct nouveau_vma *vma;
82*4882a593Smuzhiyun 	struct nvif_vma tmp;
83*4882a593Smuzhiyun 	int ret;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
86*4882a593Smuzhiyun 		vma->refs++;
87*4882a593Smuzhiyun 		return 0;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
91*4882a593Smuzhiyun 		return -ENOMEM;
92*4882a593Smuzhiyun 	vma->vmm = vmm;
93*4882a593Smuzhiyun 	vma->refs = 1;
94*4882a593Smuzhiyun 	vma->addr = ~0ULL;
95*4882a593Smuzhiyun 	vma->mem = NULL;
96*4882a593Smuzhiyun 	vma->fence = NULL;
97*4882a593Smuzhiyun 	list_add_tail(&vma->head, &nvbo->vma_list);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
100*4882a593Smuzhiyun 	    mem->mem.page == nvbo->page) {
101*4882a593Smuzhiyun 		ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
102*4882a593Smuzhiyun 				   mem->mem.size, &tmp);
103*4882a593Smuzhiyun 		if (ret)
104*4882a593Smuzhiyun 			goto done;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		vma->addr = tmp.addr;
107*4882a593Smuzhiyun 		ret = nouveau_vma_map(vma, mem);
108*4882a593Smuzhiyun 	} else {
109*4882a593Smuzhiyun 		ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
110*4882a593Smuzhiyun 				   mem->mem.size, &tmp);
111*4882a593Smuzhiyun 		vma->addr = tmp.addr;
112*4882a593Smuzhiyun 	}
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun done:
115*4882a593Smuzhiyun 	if (ret)
116*4882a593Smuzhiyun 		nouveau_vma_del(pvma);
117*4882a593Smuzhiyun 	return ret;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun void
nouveau_vmm_fini(struct nouveau_vmm * vmm)121*4882a593Smuzhiyun nouveau_vmm_fini(struct nouveau_vmm *vmm)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	nouveau_svmm_fini(&vmm->svmm);
124*4882a593Smuzhiyun 	nvif_vmm_dtor(&vmm->vmm);
125*4882a593Smuzhiyun 	vmm->cli = NULL;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun int
nouveau_vmm_init(struct nouveau_cli * cli,s32 oclass,struct nouveau_vmm * vmm)129*4882a593Smuzhiyun nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, false, PAGE_SIZE,
132*4882a593Smuzhiyun 				0, NULL, 0, &vmm->vmm);
133*4882a593Smuzhiyun 	if (ret)
134*4882a593Smuzhiyun 		return ret;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	vmm->cli = cli;
137*4882a593Smuzhiyun 	return 0;
138*4882a593Smuzhiyun }
139