xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/nouveau/nv17_fence.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2012 Red Hat Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Authors: Ben Skeggs <bskeggs@redhat.com>
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun #include "nouveau_drv.h"
25*4882a593Smuzhiyun #include "nouveau_dma.h"
26*4882a593Smuzhiyun #include "nv10_fence.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <nvif/push006c.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <nvif/class.h>
31*4882a593Smuzhiyun #include <nvif/cl0002.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <nvhw/class/cl176e.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun int
nv17_fence_sync(struct nouveau_fence * fence,struct nouveau_channel * prev,struct nouveau_channel * chan)36*4882a593Smuzhiyun nv17_fence_sync(struct nouveau_fence *fence,
37*4882a593Smuzhiyun 		struct nouveau_channel *prev, struct nouveau_channel *chan)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	struct nouveau_cli *cli = (void *)prev->user.client;
40*4882a593Smuzhiyun 	struct nv10_fence_priv *priv = chan->drm->fence;
41*4882a593Smuzhiyun 	struct nv10_fence_chan *fctx = chan->fence;
42*4882a593Smuzhiyun 	struct nvif_push *ppush = prev->chan.push;
43*4882a593Smuzhiyun 	struct nvif_push *npush = chan->chan.push;
44*4882a593Smuzhiyun 	u32 value;
45*4882a593Smuzhiyun 	int ret;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	if (!mutex_trylock(&cli->mutex))
48*4882a593Smuzhiyun 		return -EBUSY;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	spin_lock(&priv->lock);
51*4882a593Smuzhiyun 	value = priv->sequence;
52*4882a593Smuzhiyun 	priv->sequence += 2;
53*4882a593Smuzhiyun 	spin_unlock(&priv->lock);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	ret = PUSH_WAIT(ppush, 5);
56*4882a593Smuzhiyun 	if (!ret) {
57*4882a593Smuzhiyun 		PUSH_MTHD(ppush, NV176E, SET_CONTEXT_DMA_SEMAPHORE, fctx->sema.handle,
58*4882a593Smuzhiyun 					 SEMAPHORE_OFFSET, 0,
59*4882a593Smuzhiyun 					 SEMAPHORE_ACQUIRE, value + 0,
60*4882a593Smuzhiyun 					 SEMAPHORE_RELEASE, value + 1);
61*4882a593Smuzhiyun 		PUSH_KICK(ppush);
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	if (!ret && !(ret = PUSH_WAIT(npush, 5))) {
65*4882a593Smuzhiyun 		PUSH_MTHD(npush, NV176E, SET_CONTEXT_DMA_SEMAPHORE, fctx->sema.handle,
66*4882a593Smuzhiyun 					 SEMAPHORE_OFFSET, 0,
67*4882a593Smuzhiyun 					 SEMAPHORE_ACQUIRE, value + 1,
68*4882a593Smuzhiyun 					 SEMAPHORE_RELEASE, value + 2);
69*4882a593Smuzhiyun 		PUSH_KICK(npush);
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	mutex_unlock(&cli->mutex);
73*4882a593Smuzhiyun 	return 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static int
nv17_fence_context_new(struct nouveau_channel * chan)77*4882a593Smuzhiyun nv17_fence_context_new(struct nouveau_channel *chan)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	struct nv10_fence_priv *priv = chan->drm->fence;
80*4882a593Smuzhiyun 	struct nv10_fence_chan *fctx;
81*4882a593Smuzhiyun 	struct ttm_resource *reg = &priv->bo->bo.mem;
82*4882a593Smuzhiyun 	u32 start = reg->start * PAGE_SIZE;
83*4882a593Smuzhiyun 	u32 limit = start + reg->size - 1;
84*4882a593Smuzhiyun 	int ret = 0;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
87*4882a593Smuzhiyun 	if (!fctx)
88*4882a593Smuzhiyun 		return -ENOMEM;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	nouveau_fence_context_new(chan, &fctx->base);
91*4882a593Smuzhiyun 	fctx->base.emit = nv10_fence_emit;
92*4882a593Smuzhiyun 	fctx->base.read = nv10_fence_read;
93*4882a593Smuzhiyun 	fctx->base.sync = nv17_fence_sync;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	ret = nvif_object_ctor(&chan->user, "fenceCtxDma", NvSema,
96*4882a593Smuzhiyun 			       NV_DMA_FROM_MEMORY,
97*4882a593Smuzhiyun 			       &(struct nv_dma_v0) {
98*4882a593Smuzhiyun 					.target = NV_DMA_V0_TARGET_VRAM,
99*4882a593Smuzhiyun 					.access = NV_DMA_V0_ACCESS_RDWR,
100*4882a593Smuzhiyun 					.start = start,
101*4882a593Smuzhiyun 					.limit = limit,
102*4882a593Smuzhiyun 			       }, sizeof(struct nv_dma_v0),
103*4882a593Smuzhiyun 			       &fctx->sema);
104*4882a593Smuzhiyun 	if (ret)
105*4882a593Smuzhiyun 		nv10_fence_context_del(chan);
106*4882a593Smuzhiyun 	return ret;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun void
nv17_fence_resume(struct nouveau_drm * drm)110*4882a593Smuzhiyun nv17_fence_resume(struct nouveau_drm *drm)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	struct nv10_fence_priv *priv = drm->fence;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	nouveau_bo_wr32(priv->bo, 0, priv->sequence);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun int
nv17_fence_create(struct nouveau_drm * drm)118*4882a593Smuzhiyun nv17_fence_create(struct nouveau_drm *drm)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct nv10_fence_priv *priv;
121*4882a593Smuzhiyun 	int ret = 0;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
124*4882a593Smuzhiyun 	if (!priv)
125*4882a593Smuzhiyun 		return -ENOMEM;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	priv->base.dtor = nv10_fence_destroy;
128*4882a593Smuzhiyun 	priv->base.resume = nv17_fence_resume;
129*4882a593Smuzhiyun 	priv->base.context_new = nv17_fence_context_new;
130*4882a593Smuzhiyun 	priv->base.context_del = nv10_fence_context_del;
131*4882a593Smuzhiyun 	spin_lock_init(&priv->lock);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
134*4882a593Smuzhiyun 			     NOUVEAU_GEM_DOMAIN_VRAM,
135*4882a593Smuzhiyun 			     0, 0x0000, NULL, NULL, &priv->bo);
136*4882a593Smuzhiyun 	if (!ret) {
137*4882a593Smuzhiyun 		ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
138*4882a593Smuzhiyun 		if (!ret) {
139*4882a593Smuzhiyun 			ret = nouveau_bo_map(priv->bo);
140*4882a593Smuzhiyun 			if (ret)
141*4882a593Smuzhiyun 				nouveau_bo_unpin(priv->bo);
142*4882a593Smuzhiyun 		}
143*4882a593Smuzhiyun 		if (ret)
144*4882a593Smuzhiyun 			nouveau_bo_ref(NULL, &priv->bo);
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (ret) {
148*4882a593Smuzhiyun 		nv10_fence_destroy(drm);
149*4882a593Smuzhiyun 		return ret;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
153*4882a593Smuzhiyun 	return ret;
154*4882a593Smuzhiyun }
155