xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/nouveau/nvkm/falcon/base.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20*4882a593Smuzhiyun  * DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun #include "priv.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <subdev/mc.h>
25*4882a593Smuzhiyun #include <subdev/top.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun void
nvkm_falcon_load_imem(struct nvkm_falcon * falcon,void * data,u32 start,u32 size,u16 tag,u8 port,bool secure)28*4882a593Smuzhiyun nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
29*4882a593Smuzhiyun 		      u32 size, u16 tag, u8 port, bool secure)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	if (secure && !falcon->secret) {
32*4882a593Smuzhiyun 		nvkm_warn(falcon->user,
33*4882a593Smuzhiyun 			  "writing with secure tag on a non-secure falcon!\n");
34*4882a593Smuzhiyun 		return;
35*4882a593Smuzhiyun 	}
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	falcon->func->load_imem(falcon, data, start, size, tag, port,
38*4882a593Smuzhiyun 				secure);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun void
nvkm_falcon_load_dmem(struct nvkm_falcon * falcon,void * data,u32 start,u32 size,u8 port)42*4882a593Smuzhiyun nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
43*4882a593Smuzhiyun 		      u32 size, u8 port)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	mutex_lock(&falcon->dmem_mutex);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	falcon->func->load_dmem(falcon, data, start, size, port);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	mutex_unlock(&falcon->dmem_mutex);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun void
nvkm_falcon_read_dmem(struct nvkm_falcon * falcon,u32 start,u32 size,u8 port,void * data)53*4882a593Smuzhiyun nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
54*4882a593Smuzhiyun 		      void *data)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	mutex_lock(&falcon->dmem_mutex);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	falcon->func->read_dmem(falcon, start, size, port, data);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	mutex_unlock(&falcon->dmem_mutex);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun void
nvkm_falcon_bind_context(struct nvkm_falcon * falcon,struct nvkm_memory * inst)64*4882a593Smuzhiyun nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	if (!falcon->func->bind_context) {
67*4882a593Smuzhiyun 		nvkm_error(falcon->user,
68*4882a593Smuzhiyun 			   "Context binding not supported on this falcon!\n");
69*4882a593Smuzhiyun 		return;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	falcon->func->bind_context(falcon, inst);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun void
nvkm_falcon_set_start_addr(struct nvkm_falcon * falcon,u32 start_addr)76*4882a593Smuzhiyun nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	falcon->func->set_start_addr(falcon, start_addr);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun void
nvkm_falcon_start(struct nvkm_falcon * falcon)82*4882a593Smuzhiyun nvkm_falcon_start(struct nvkm_falcon *falcon)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	falcon->func->start(falcon);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun int
nvkm_falcon_enable(struct nvkm_falcon * falcon)88*4882a593Smuzhiyun nvkm_falcon_enable(struct nvkm_falcon *falcon)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct nvkm_device *device = falcon->owner->device;
91*4882a593Smuzhiyun 	enum nvkm_devidx id = falcon->owner->index;
92*4882a593Smuzhiyun 	int ret;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	nvkm_mc_enable(device, id);
95*4882a593Smuzhiyun 	ret = falcon->func->enable(falcon);
96*4882a593Smuzhiyun 	if (ret) {
97*4882a593Smuzhiyun 		nvkm_mc_disable(device, id);
98*4882a593Smuzhiyun 		return ret;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	return 0;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun void
nvkm_falcon_disable(struct nvkm_falcon * falcon)105*4882a593Smuzhiyun nvkm_falcon_disable(struct nvkm_falcon *falcon)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct nvkm_device *device = falcon->owner->device;
108*4882a593Smuzhiyun 	enum nvkm_devidx id = falcon->owner->index;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	/* already disabled, return or wait_idle will timeout */
111*4882a593Smuzhiyun 	if (!nvkm_mc_enabled(device, id))
112*4882a593Smuzhiyun 		return;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	falcon->func->disable(falcon);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	nvkm_mc_disable(device, id);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun int
nvkm_falcon_reset(struct nvkm_falcon * falcon)120*4882a593Smuzhiyun nvkm_falcon_reset(struct nvkm_falcon *falcon)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	if (!falcon->func->reset) {
123*4882a593Smuzhiyun 		nvkm_falcon_disable(falcon);
124*4882a593Smuzhiyun 		return nvkm_falcon_enable(falcon);
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	return falcon->func->reset(falcon);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun int
nvkm_falcon_wait_for_halt(struct nvkm_falcon * falcon,u32 ms)131*4882a593Smuzhiyun nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	return falcon->func->wait_for_halt(falcon, ms);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun int
nvkm_falcon_clear_interrupt(struct nvkm_falcon * falcon,u32 mask)137*4882a593Smuzhiyun nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	return falcon->func->clear_interrupt(falcon, mask);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static int
nvkm_falcon_oneinit(struct nvkm_falcon * falcon)143*4882a593Smuzhiyun nvkm_falcon_oneinit(struct nvkm_falcon *falcon)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	const struct nvkm_falcon_func *func = falcon->func;
146*4882a593Smuzhiyun 	const struct nvkm_subdev *subdev = falcon->owner;
147*4882a593Smuzhiyun 	u32 reg;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (!falcon->addr) {
150*4882a593Smuzhiyun 		falcon->addr = nvkm_top_addr(subdev->device, subdev->index);
151*4882a593Smuzhiyun 		if (WARN_ON(!falcon->addr))
152*4882a593Smuzhiyun 			return -ENODEV;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	reg = nvkm_falcon_rd32(falcon, 0x12c);
156*4882a593Smuzhiyun 	falcon->version = reg & 0xf;
157*4882a593Smuzhiyun 	falcon->secret = (reg >> 4) & 0x3;
158*4882a593Smuzhiyun 	falcon->code.ports = (reg >> 8) & 0xf;
159*4882a593Smuzhiyun 	falcon->data.ports = (reg >> 12) & 0xf;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	reg = nvkm_falcon_rd32(falcon, 0x108);
162*4882a593Smuzhiyun 	falcon->code.limit = (reg & 0x1ff) << 8;
163*4882a593Smuzhiyun 	falcon->data.limit = (reg & 0x3fe00) >> 1;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (func->debug) {
166*4882a593Smuzhiyun 		u32 val = nvkm_falcon_rd32(falcon, func->debug);
167*4882a593Smuzhiyun 		falcon->debug = (val >> 20) & 0x1;
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun void
nvkm_falcon_put(struct nvkm_falcon * falcon,const struct nvkm_subdev * user)174*4882a593Smuzhiyun nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	if (unlikely(!falcon))
177*4882a593Smuzhiyun 		return;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	mutex_lock(&falcon->mutex);
180*4882a593Smuzhiyun 	if (falcon->user == user) {
181*4882a593Smuzhiyun 		nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
182*4882a593Smuzhiyun 		falcon->user = NULL;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 	mutex_unlock(&falcon->mutex);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun int
nvkm_falcon_get(struct nvkm_falcon * falcon,const struct nvkm_subdev * user)188*4882a593Smuzhiyun nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	int ret = 0;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	mutex_lock(&falcon->mutex);
193*4882a593Smuzhiyun 	if (falcon->user) {
194*4882a593Smuzhiyun 		nvkm_error(user, "%s falcon already acquired by %s!\n",
195*4882a593Smuzhiyun 			   falcon->name, nvkm_subdev_name[falcon->user->index]);
196*4882a593Smuzhiyun 		mutex_unlock(&falcon->mutex);
197*4882a593Smuzhiyun 		return -EBUSY;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	nvkm_debug(user, "acquired %s falcon\n", falcon->name);
201*4882a593Smuzhiyun 	if (!falcon->oneinit)
202*4882a593Smuzhiyun 		ret = nvkm_falcon_oneinit(falcon);
203*4882a593Smuzhiyun 	falcon->user = user;
204*4882a593Smuzhiyun 	mutex_unlock(&falcon->mutex);
205*4882a593Smuzhiyun 	return ret;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun void
nvkm_falcon_dtor(struct nvkm_falcon * falcon)209*4882a593Smuzhiyun nvkm_falcon_dtor(struct nvkm_falcon *falcon)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun int
nvkm_falcon_ctor(const struct nvkm_falcon_func * func,struct nvkm_subdev * subdev,const char * name,u32 addr,struct nvkm_falcon * falcon)214*4882a593Smuzhiyun nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
215*4882a593Smuzhiyun 		 struct nvkm_subdev *subdev, const char *name, u32 addr,
216*4882a593Smuzhiyun 		 struct nvkm_falcon *falcon)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	falcon->func = func;
219*4882a593Smuzhiyun 	falcon->owner = subdev;
220*4882a593Smuzhiyun 	falcon->name = name;
221*4882a593Smuzhiyun 	falcon->addr = addr;
222*4882a593Smuzhiyun 	mutex_init(&falcon->mutex);
223*4882a593Smuzhiyun 	mutex_init(&falcon->dmem_mutex);
224*4882a593Smuzhiyun 	return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun void
nvkm_falcon_del(struct nvkm_falcon ** pfalcon)228*4882a593Smuzhiyun nvkm_falcon_del(struct nvkm_falcon **pfalcon)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	if (*pfalcon) {
231*4882a593Smuzhiyun 		nvkm_falcon_dtor(*pfalcon);
232*4882a593Smuzhiyun 		kfree(*pfalcon);
233*4882a593Smuzhiyun 		*pfalcon = NULL;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun }
236