xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20*4882a593Smuzhiyun  * DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun #include "priv.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <core/gpuobj.h>
25*4882a593Smuzhiyun #include <core/memory.h>
26*4882a593Smuzhiyun #include <subdev/timer.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun void
nvkm_falcon_v1_load_imem(struct nvkm_falcon * falcon,void * data,u32 start,u32 size,u16 tag,u8 port,bool secure)29*4882a593Smuzhiyun nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
30*4882a593Smuzhiyun 			 u32 size, u16 tag, u8 port, bool secure)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	u8 rem = size % 4;
33*4882a593Smuzhiyun 	u32 reg;
34*4882a593Smuzhiyun 	int i;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	size -= rem;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	reg = start | BIT(24) | (secure ? BIT(28) : 0);
39*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
40*4882a593Smuzhiyun 	for (i = 0; i < size / 4; i++) {
41*4882a593Smuzhiyun 		/* write new tag every 256B */
42*4882a593Smuzhiyun 		if ((i & 0x3f) == 0)
43*4882a593Smuzhiyun 			nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
44*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	/*
48*4882a593Smuzhiyun 	 * If size is not a multiple of 4, mask the last work to ensure garbage
49*4882a593Smuzhiyun 	 * does not get written
50*4882a593Smuzhiyun 	 */
51*4882a593Smuzhiyun 	if (rem) {
52*4882a593Smuzhiyun 		u32 extra = ((u32 *)data)[i];
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 		/* write new tag every 256B */
55*4882a593Smuzhiyun 		if ((i & 0x3f) == 0)
56*4882a593Smuzhiyun 			nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
57*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
58*4882a593Smuzhiyun 				 extra & (BIT(rem * 8) - 1));
59*4882a593Smuzhiyun 		++i;
60*4882a593Smuzhiyun 	}
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* code must be padded to 0x40 words */
63*4882a593Smuzhiyun 	for (; i & 0x3f; i++)
64*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun static void
nvkm_falcon_v1_load_emem(struct nvkm_falcon * falcon,void * data,u32 start,u32 size,u8 port)68*4882a593Smuzhiyun nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
69*4882a593Smuzhiyun 			 u32 size, u8 port)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	u8 rem = size % 4;
72*4882a593Smuzhiyun 	int i;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	size -= rem;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24));
77*4882a593Smuzhiyun 	for (i = 0; i < size / 4; i++)
78*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/*
81*4882a593Smuzhiyun 	 * If size is not a multiple of 4, mask the last word to ensure garbage
82*4882a593Smuzhiyun 	 * does not get written
83*4882a593Smuzhiyun 	 */
84*4882a593Smuzhiyun 	if (rem) {
85*4882a593Smuzhiyun 		u32 extra = ((u32 *)data)[i];
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0xac4 + (port * 8),
88*4882a593Smuzhiyun 				 extra & (BIT(rem * 8) - 1));
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun void
nvkm_falcon_v1_load_dmem(struct nvkm_falcon * falcon,void * data,u32 start,u32 size,u8 port)93*4882a593Smuzhiyun nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
94*4882a593Smuzhiyun 			 u32 size, u8 port)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	const struct nvkm_falcon_func *func = falcon->func;
97*4882a593Smuzhiyun 	u8 rem = size % 4;
98*4882a593Smuzhiyun 	int i;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (func->emem_addr && start >= func->emem_addr)
101*4882a593Smuzhiyun 		return nvkm_falcon_v1_load_emem(falcon, data,
102*4882a593Smuzhiyun 						start - func->emem_addr, size,
103*4882a593Smuzhiyun 						port);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	size -= rem;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
108*4882a593Smuzhiyun 	for (i = 0; i < size / 4; i++)
109*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/*
112*4882a593Smuzhiyun 	 * If size is not a multiple of 4, mask the last word to ensure garbage
113*4882a593Smuzhiyun 	 * does not get written
114*4882a593Smuzhiyun 	 */
115*4882a593Smuzhiyun 	if (rem) {
116*4882a593Smuzhiyun 		u32 extra = ((u32 *)data)[i];
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
119*4882a593Smuzhiyun 				 extra & (BIT(rem * 8) - 1));
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun static void
nvkm_falcon_v1_read_emem(struct nvkm_falcon * falcon,u32 start,u32 size,u8 port,void * data)124*4882a593Smuzhiyun nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
125*4882a593Smuzhiyun 			 u8 port, void *data)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	u8 rem = size % 4;
128*4882a593Smuzhiyun 	int i;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	size -= rem;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25));
133*4882a593Smuzhiyun 	for (i = 0; i < size / 4; i++)
134*4882a593Smuzhiyun 		((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/*
137*4882a593Smuzhiyun 	 * If size is not a multiple of 4, mask the last word to ensure garbage
138*4882a593Smuzhiyun 	 * does not get read
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	if (rem) {
141*4882a593Smuzhiyun 		u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		for (i = size; i < size + rem; i++) {
144*4882a593Smuzhiyun 			((u8 *)data)[i] = (u8)(extra & 0xff);
145*4882a593Smuzhiyun 			extra >>= 8;
146*4882a593Smuzhiyun 		}
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun void
nvkm_falcon_v1_read_dmem(struct nvkm_falcon * falcon,u32 start,u32 size,u8 port,void * data)151*4882a593Smuzhiyun nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
152*4882a593Smuzhiyun 			 u8 port, void *data)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	const struct nvkm_falcon_func *func = falcon->func;
155*4882a593Smuzhiyun 	u8 rem = size % 4;
156*4882a593Smuzhiyun 	int i;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (func->emem_addr && start >= func->emem_addr)
159*4882a593Smuzhiyun 		return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr,
160*4882a593Smuzhiyun 						size, port, data);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	size -= rem;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25));
165*4882a593Smuzhiyun 	for (i = 0; i < size / 4; i++)
166*4882a593Smuzhiyun 		((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/*
169*4882a593Smuzhiyun 	 * If size is not a multiple of 4, mask the last word to ensure garbage
170*4882a593Smuzhiyun 	 * does not get read
171*4882a593Smuzhiyun 	 */
172*4882a593Smuzhiyun 	if (rem) {
173*4882a593Smuzhiyun 		u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		for (i = size; i < size + rem; i++) {
176*4882a593Smuzhiyun 			((u8 *)data)[i] = (u8)(extra & 0xff);
177*4882a593Smuzhiyun 			extra >>= 8;
178*4882a593Smuzhiyun 		}
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun void
nvkm_falcon_v1_bind_context(struct nvkm_falcon * falcon,struct nvkm_memory * ctx)183*4882a593Smuzhiyun nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	const u32 fbif = falcon->func->fbif;
186*4882a593Smuzhiyun 	u32 inst_loc;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* disable instance block binding */
189*4882a593Smuzhiyun 	if (ctx == NULL) {
190*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0x10c, 0x0);
191*4882a593Smuzhiyun 		return;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0x10c, 0x1);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* setup apertures - virtual */
197*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4);
198*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0);
199*4882a593Smuzhiyun 	/* setup apertures - physical */
200*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
201*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
202*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* Set context */
205*4882a593Smuzhiyun 	switch (nvkm_memory_target(ctx)) {
206*4882a593Smuzhiyun 	case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
207*4882a593Smuzhiyun 	case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
208*4882a593Smuzhiyun 	case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
209*4882a593Smuzhiyun 	default:
210*4882a593Smuzhiyun 		WARN_ON(1);
211*4882a593Smuzhiyun 		return;
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/* Enable context */
215*4882a593Smuzhiyun 	nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
216*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0x054,
217*4882a593Smuzhiyun 			 ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
218*4882a593Smuzhiyun 			 (inst_loc << 28) | (1 << 30));
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
221*4882a593Smuzhiyun 	nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun void
nvkm_falcon_v1_set_start_addr(struct nvkm_falcon * falcon,u32 start_addr)225*4882a593Smuzhiyun nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0x104, start_addr);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun void
nvkm_falcon_v1_start(struct nvkm_falcon * falcon)231*4882a593Smuzhiyun nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	u32 reg = nvkm_falcon_rd32(falcon, 0x100);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (reg & BIT(6))
236*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0x130, 0x2);
237*4882a593Smuzhiyun 	else
238*4882a593Smuzhiyun 		nvkm_falcon_wr32(falcon, 0x100, 0x2);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun int
nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon * falcon,u32 ms)242*4882a593Smuzhiyun nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	struct nvkm_device *device = falcon->owner->device;
245*4882a593Smuzhiyun 	int ret;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10);
248*4882a593Smuzhiyun 	if (ret < 0)
249*4882a593Smuzhiyun 		return ret;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	return 0;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun int
nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon * falcon,u32 mask)255*4882a593Smuzhiyun nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct nvkm_device *device = falcon->owner->device;
258*4882a593Smuzhiyun 	int ret;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* clear interrupt(s) */
261*4882a593Smuzhiyun 	nvkm_falcon_mask(falcon, 0x004, mask, mask);
262*4882a593Smuzhiyun 	/* wait until interrupts are cleared */
263*4882a593Smuzhiyun 	ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0);
264*4882a593Smuzhiyun 	if (ret < 0)
265*4882a593Smuzhiyun 		return ret;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	return 0;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun static int
falcon_v1_wait_idle(struct nvkm_falcon * falcon)271*4882a593Smuzhiyun falcon_v1_wait_idle(struct nvkm_falcon *falcon)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct nvkm_device *device = falcon->owner->device;
274*4882a593Smuzhiyun 	int ret;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0);
277*4882a593Smuzhiyun 	if (ret < 0)
278*4882a593Smuzhiyun 		return ret;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	return 0;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun int
nvkm_falcon_v1_enable(struct nvkm_falcon * falcon)284*4882a593Smuzhiyun nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct nvkm_device *device = falcon->owner->device;
287*4882a593Smuzhiyun 	int ret;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0);
290*4882a593Smuzhiyun 	if (ret < 0) {
291*4882a593Smuzhiyun 		nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n");
292*4882a593Smuzhiyun 		return ret;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	ret = falcon_v1_wait_idle(falcon);
296*4882a593Smuzhiyun 	if (ret)
297*4882a593Smuzhiyun 		return ret;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/* enable IRQs */
300*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0x010, 0xff);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	return 0;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun void
nvkm_falcon_v1_disable(struct nvkm_falcon * falcon)306*4882a593Smuzhiyun nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	/* disable IRQs and wait for any previous code to complete */
309*4882a593Smuzhiyun 	nvkm_falcon_wr32(falcon, 0x014, 0xff);
310*4882a593Smuzhiyun 	falcon_v1_wait_idle(falcon);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun static const struct nvkm_falcon_func
314*4882a593Smuzhiyun nvkm_falcon_v1 = {
315*4882a593Smuzhiyun 	.load_imem = nvkm_falcon_v1_load_imem,
316*4882a593Smuzhiyun 	.load_dmem = nvkm_falcon_v1_load_dmem,
317*4882a593Smuzhiyun 	.read_dmem = nvkm_falcon_v1_read_dmem,
318*4882a593Smuzhiyun 	.bind_context = nvkm_falcon_v1_bind_context,
319*4882a593Smuzhiyun 	.start = nvkm_falcon_v1_start,
320*4882a593Smuzhiyun 	.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
321*4882a593Smuzhiyun 	.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
322*4882a593Smuzhiyun 	.enable = nvkm_falcon_v1_enable,
323*4882a593Smuzhiyun 	.disable = nvkm_falcon_v1_disable,
324*4882a593Smuzhiyun 	.set_start_addr = nvkm_falcon_v1_set_start_addr,
325*4882a593Smuzhiyun };
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun int
nvkm_falcon_v1_new(struct nvkm_subdev * owner,const char * name,u32 addr,struct nvkm_falcon ** pfalcon)328*4882a593Smuzhiyun nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
329*4882a593Smuzhiyun 		   struct nvkm_falcon **pfalcon)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	struct nvkm_falcon *falcon;
332*4882a593Smuzhiyun 	if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
333*4882a593Smuzhiyun 		return -ENOMEM;
334*4882a593Smuzhiyun 	nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon);
335*4882a593Smuzhiyun 	return 0;
336*4882a593Smuzhiyun }
337