xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/cell/spufs/run.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #define DEBUG
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/wait.h>
5*4882a593Smuzhiyun #include <linux/ptrace.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <asm/spu.h>
8*4882a593Smuzhiyun #include <asm/spu_priv1.h>
9*4882a593Smuzhiyun #include <asm/io.h>
10*4882a593Smuzhiyun #include <asm/unistd.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "spufs.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /* interrupt-level stop callback function. */
spufs_stop_callback(struct spu * spu,int irq)15*4882a593Smuzhiyun void spufs_stop_callback(struct spu *spu, int irq)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	struct spu_context *ctx = spu->ctx;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	/*
20*4882a593Smuzhiyun 	 * It should be impossible to preempt a context while an exception
21*4882a593Smuzhiyun 	 * is being processed, since the context switch code is specially
22*4882a593Smuzhiyun 	 * coded to deal with interrupts ... But, just in case, sanity check
23*4882a593Smuzhiyun 	 * the context pointer.  It is OK to return doing nothing since
24*4882a593Smuzhiyun 	 * the exception will be regenerated when the context is resumed.
25*4882a593Smuzhiyun 	 */
26*4882a593Smuzhiyun 	if (ctx) {
27*4882a593Smuzhiyun 		/* Copy exception arguments into module specific structure */
28*4882a593Smuzhiyun 		switch(irq) {
29*4882a593Smuzhiyun 		case 0 :
30*4882a593Smuzhiyun 			ctx->csa.class_0_pending = spu->class_0_pending;
31*4882a593Smuzhiyun 			ctx->csa.class_0_dar = spu->class_0_dar;
32*4882a593Smuzhiyun 			break;
33*4882a593Smuzhiyun 		case 1 :
34*4882a593Smuzhiyun 			ctx->csa.class_1_dsisr = spu->class_1_dsisr;
35*4882a593Smuzhiyun 			ctx->csa.class_1_dar = spu->class_1_dar;
36*4882a593Smuzhiyun 			break;
37*4882a593Smuzhiyun 		case 2 :
38*4882a593Smuzhiyun 			break;
39*4882a593Smuzhiyun 		}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 		/* ensure that the exception status has hit memory before a
42*4882a593Smuzhiyun 		 * thread waiting on the context's stop queue is woken */
43*4882a593Smuzhiyun 		smp_wmb();
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 		wake_up_all(&ctx->stop_wq);
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
spu_stopped(struct spu_context * ctx,u32 * stat)49*4882a593Smuzhiyun int spu_stopped(struct spu_context *ctx, u32 *stat)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	u64 dsisr;
52*4882a593Smuzhiyun 	u32 stopped;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
55*4882a593Smuzhiyun 		SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun top:
58*4882a593Smuzhiyun 	*stat = ctx->ops->status_read(ctx);
59*4882a593Smuzhiyun 	if (*stat & stopped) {
60*4882a593Smuzhiyun 		/*
61*4882a593Smuzhiyun 		 * If the spu hasn't finished stopping, we need to
62*4882a593Smuzhiyun 		 * re-read the register to get the stopped value.
63*4882a593Smuzhiyun 		 */
64*4882a593Smuzhiyun 		if (*stat & SPU_STATUS_RUNNING)
65*4882a593Smuzhiyun 			goto top;
66*4882a593Smuzhiyun 		return 1;
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
70*4882a593Smuzhiyun 		return 1;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	dsisr = ctx->csa.class_1_dsisr;
73*4882a593Smuzhiyun 	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
74*4882a593Smuzhiyun 		return 1;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	if (ctx->csa.class_0_pending)
77*4882a593Smuzhiyun 		return 1;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	return 0;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
spu_setup_isolated(struct spu_context * ctx)82*4882a593Smuzhiyun static int spu_setup_isolated(struct spu_context *ctx)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	int ret;
85*4882a593Smuzhiyun 	u64 __iomem *mfc_cntl;
86*4882a593Smuzhiyun 	u64 sr1;
87*4882a593Smuzhiyun 	u32 status;
88*4882a593Smuzhiyun 	unsigned long timeout;
89*4882a593Smuzhiyun 	const u32 status_loading = SPU_STATUS_RUNNING
90*4882a593Smuzhiyun 		| SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	ret = -ENODEV;
93*4882a593Smuzhiyun 	if (!isolated_loader)
94*4882a593Smuzhiyun 		goto out;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/*
97*4882a593Smuzhiyun 	 * We need to exclude userspace access to the context.
98*4882a593Smuzhiyun 	 *
99*4882a593Smuzhiyun 	 * To protect against memory access we invalidate all ptes
100*4882a593Smuzhiyun 	 * and make sure the pagefault handlers block on the mutex.
101*4882a593Smuzhiyun 	 */
102*4882a593Smuzhiyun 	spu_unmap_mappings(ctx);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/* purge the MFC DMA queue to ensure no spurious accesses before we
107*4882a593Smuzhiyun 	 * enter kernel mode */
108*4882a593Smuzhiyun 	timeout = jiffies + HZ;
109*4882a593Smuzhiyun 	out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
110*4882a593Smuzhiyun 	while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
111*4882a593Smuzhiyun 			!= MFC_CNTL_PURGE_DMA_COMPLETE) {
112*4882a593Smuzhiyun 		if (time_after(jiffies, timeout)) {
113*4882a593Smuzhiyun 			printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
114*4882a593Smuzhiyun 					__func__);
115*4882a593Smuzhiyun 			ret = -EIO;
116*4882a593Smuzhiyun 			goto out;
117*4882a593Smuzhiyun 		}
118*4882a593Smuzhiyun 		cond_resched();
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* clear purge status */
122*4882a593Smuzhiyun 	out_be64(mfc_cntl, 0);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* put the SPE in kernel mode to allow access to the loader */
125*4882a593Smuzhiyun 	sr1 = spu_mfc_sr1_get(ctx->spu);
126*4882a593Smuzhiyun 	sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
127*4882a593Smuzhiyun 	spu_mfc_sr1_set(ctx->spu, sr1);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* start the loader */
130*4882a593Smuzhiyun 	ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
131*4882a593Smuzhiyun 	ctx->ops->signal2_write(ctx,
132*4882a593Smuzhiyun 			(unsigned long)isolated_loader & 0xffffffff);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	ctx->ops->runcntl_write(ctx,
135*4882a593Smuzhiyun 			SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	ret = 0;
138*4882a593Smuzhiyun 	timeout = jiffies + HZ;
139*4882a593Smuzhiyun 	while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
140*4882a593Smuzhiyun 				status_loading) {
141*4882a593Smuzhiyun 		if (time_after(jiffies, timeout)) {
142*4882a593Smuzhiyun 			printk(KERN_ERR "%s: timeout waiting for loader\n",
143*4882a593Smuzhiyun 					__func__);
144*4882a593Smuzhiyun 			ret = -EIO;
145*4882a593Smuzhiyun 			goto out_drop_priv;
146*4882a593Smuzhiyun 		}
147*4882a593Smuzhiyun 		cond_resched();
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (!(status & SPU_STATUS_RUNNING)) {
151*4882a593Smuzhiyun 		/* If isolated LOAD has failed: run SPU, we will get a stop-and
152*4882a593Smuzhiyun 		 * signal later. */
153*4882a593Smuzhiyun 		pr_debug("%s: isolated LOAD failed\n", __func__);
154*4882a593Smuzhiyun 		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
155*4882a593Smuzhiyun 		ret = -EACCES;
156*4882a593Smuzhiyun 		goto out_drop_priv;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (!(status & SPU_STATUS_ISOLATED_STATE)) {
160*4882a593Smuzhiyun 		/* This isn't allowed by the CBEA, but check anyway */
161*4882a593Smuzhiyun 		pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
162*4882a593Smuzhiyun 		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
163*4882a593Smuzhiyun 		ret = -EINVAL;
164*4882a593Smuzhiyun 		goto out_drop_priv;
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun out_drop_priv:
168*4882a593Smuzhiyun 	/* Finished accessing the loader. Drop kernel mode */
169*4882a593Smuzhiyun 	sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
170*4882a593Smuzhiyun 	spu_mfc_sr1_set(ctx->spu, sr1);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun out:
173*4882a593Smuzhiyun 	return ret;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
spu_run_init(struct spu_context * ctx,u32 * npc)176*4882a593Smuzhiyun static int spu_run_init(struct spu_context *ctx, u32 *npc)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
179*4882a593Smuzhiyun 	int ret;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/*
184*4882a593Smuzhiyun 	 * NOSCHED is synchronous scheduling with respect to the caller.
185*4882a593Smuzhiyun 	 * The caller waits for the context to be loaded.
186*4882a593Smuzhiyun 	 */
187*4882a593Smuzhiyun 	if (ctx->flags & SPU_CREATE_NOSCHED) {
188*4882a593Smuzhiyun 		if (ctx->state == SPU_STATE_SAVED) {
189*4882a593Smuzhiyun 			ret = spu_activate(ctx, 0);
190*4882a593Smuzhiyun 			if (ret)
191*4882a593Smuzhiyun 				return ret;
192*4882a593Smuzhiyun 		}
193*4882a593Smuzhiyun 	}
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/*
196*4882a593Smuzhiyun 	 * Apply special setup as required.
197*4882a593Smuzhiyun 	 */
198*4882a593Smuzhiyun 	if (ctx->flags & SPU_CREATE_ISOLATE) {
199*4882a593Smuzhiyun 		if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
200*4882a593Smuzhiyun 			ret = spu_setup_isolated(ctx);
201*4882a593Smuzhiyun 			if (ret)
202*4882a593Smuzhiyun 				return ret;
203*4882a593Smuzhiyun 		}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 		/*
206*4882a593Smuzhiyun 		 * If userspace has set the runcntrl register (eg, to
207*4882a593Smuzhiyun 		 * issue an isolated exit), we need to re-set it here
208*4882a593Smuzhiyun 		 */
209*4882a593Smuzhiyun 		runcntl = ctx->ops->runcntl_read(ctx) &
210*4882a593Smuzhiyun 			(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
211*4882a593Smuzhiyun 		if (runcntl == 0)
212*4882a593Smuzhiyun 			runcntl = SPU_RUNCNTL_RUNNABLE;
213*4882a593Smuzhiyun 	} else {
214*4882a593Smuzhiyun 		unsigned long privcntl;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 		if (test_thread_flag(TIF_SINGLESTEP))
217*4882a593Smuzhiyun 			privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
218*4882a593Smuzhiyun 		else
219*4882a593Smuzhiyun 			privcntl = SPU_PRIVCNTL_MODE_NORMAL;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		ctx->ops->privcntl_write(ctx, privcntl);
222*4882a593Smuzhiyun 		ctx->ops->npc_write(ctx, *npc);
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	ctx->ops->runcntl_write(ctx, runcntl);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	if (ctx->flags & SPU_CREATE_NOSCHED) {
228*4882a593Smuzhiyun 		spuctx_switch_state(ctx, SPU_UTIL_USER);
229*4882a593Smuzhiyun 	} else {
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		if (ctx->state == SPU_STATE_SAVED) {
232*4882a593Smuzhiyun 			ret = spu_activate(ctx, 0);
233*4882a593Smuzhiyun 			if (ret)
234*4882a593Smuzhiyun 				return ret;
235*4882a593Smuzhiyun 		} else {
236*4882a593Smuzhiyun 			spuctx_switch_state(ctx, SPU_UTIL_USER);
237*4882a593Smuzhiyun 		}
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
241*4882a593Smuzhiyun 	return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
spu_run_fini(struct spu_context * ctx,u32 * npc,u32 * status)244*4882a593Smuzhiyun static int spu_run_fini(struct spu_context *ctx, u32 *npc,
245*4882a593Smuzhiyun 			       u32 *status)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	int ret = 0;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spu_del_from_rq(ctx);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	*status = ctx->ops->status_read(ctx);
252*4882a593Smuzhiyun 	*npc = ctx->ops->npc_read(ctx);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
255*4882a593Smuzhiyun 	clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
256*4882a593Smuzhiyun 	spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
257*4882a593Smuzhiyun 	spu_release(ctx);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (signal_pending(current))
260*4882a593Smuzhiyun 		ret = -ERESTARTSYS;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	return ret;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun  * SPU syscall restarting is tricky because we violate the basic
267*4882a593Smuzhiyun  * assumption that the signal handler is running on the interrupted
268*4882a593Smuzhiyun  * thread. Here instead, the handler runs on PowerPC user space code,
269*4882a593Smuzhiyun  * while the syscall was called from the SPU.
270*4882a593Smuzhiyun  * This means we can only do a very rough approximation of POSIX
271*4882a593Smuzhiyun  * signal semantics.
272*4882a593Smuzhiyun  */
spu_handle_restartsys(struct spu_context * ctx,long * spu_ret,unsigned int * npc)273*4882a593Smuzhiyun static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
274*4882a593Smuzhiyun 			  unsigned int *npc)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	int ret;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	switch (*spu_ret) {
279*4882a593Smuzhiyun 	case -ERESTARTSYS:
280*4882a593Smuzhiyun 	case -ERESTARTNOINTR:
281*4882a593Smuzhiyun 		/*
282*4882a593Smuzhiyun 		 * Enter the regular syscall restarting for
283*4882a593Smuzhiyun 		 * sys_spu_run, then restart the SPU syscall
284*4882a593Smuzhiyun 		 * callback.
285*4882a593Smuzhiyun 		 */
286*4882a593Smuzhiyun 		*npc -= 8;
287*4882a593Smuzhiyun 		ret = -ERESTARTSYS;
288*4882a593Smuzhiyun 		break;
289*4882a593Smuzhiyun 	case -ERESTARTNOHAND:
290*4882a593Smuzhiyun 	case -ERESTART_RESTARTBLOCK:
291*4882a593Smuzhiyun 		/*
292*4882a593Smuzhiyun 		 * Restart block is too hard for now, just return -EINTR
293*4882a593Smuzhiyun 		 * to the SPU.
294*4882a593Smuzhiyun 		 * ERESTARTNOHAND comes from sys_pause, we also return
295*4882a593Smuzhiyun 		 * -EINTR from there.
296*4882a593Smuzhiyun 		 * Assume that we need to be restarted ourselves though.
297*4882a593Smuzhiyun 		 */
298*4882a593Smuzhiyun 		*spu_ret = -EINTR;
299*4882a593Smuzhiyun 		ret = -ERESTARTSYS;
300*4882a593Smuzhiyun 		break;
301*4882a593Smuzhiyun 	default:
302*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: unexpected return code %ld\n",
303*4882a593Smuzhiyun 			__func__, *spu_ret);
304*4882a593Smuzhiyun 		ret = 0;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 	return ret;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
spu_process_callback(struct spu_context * ctx)309*4882a593Smuzhiyun static int spu_process_callback(struct spu_context *ctx)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct spu_syscall_block s;
312*4882a593Smuzhiyun 	u32 ls_pointer, npc;
313*4882a593Smuzhiyun 	void __iomem *ls;
314*4882a593Smuzhiyun 	long spu_ret;
315*4882a593Smuzhiyun 	int ret;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/* get syscall block from local store */
318*4882a593Smuzhiyun 	npc = ctx->ops->npc_read(ctx) & ~3;
319*4882a593Smuzhiyun 	ls = (void __iomem *)ctx->ops->get_ls(ctx);
320*4882a593Smuzhiyun 	ls_pointer = in_be32(ls + npc);
321*4882a593Smuzhiyun 	if (ls_pointer > (LS_SIZE - sizeof(s)))
322*4882a593Smuzhiyun 		return -EFAULT;
323*4882a593Smuzhiyun 	memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* do actual syscall without pinning the spu */
326*4882a593Smuzhiyun 	ret = 0;
327*4882a593Smuzhiyun 	spu_ret = -ENOSYS;
328*4882a593Smuzhiyun 	npc += 4;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (s.nr_ret < NR_syscalls) {
331*4882a593Smuzhiyun 		spu_release(ctx);
332*4882a593Smuzhiyun 		/* do actual system call from here */
333*4882a593Smuzhiyun 		spu_ret = spu_sys_callback(&s);
334*4882a593Smuzhiyun 		if (spu_ret <= -ERESTARTSYS) {
335*4882a593Smuzhiyun 			ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
336*4882a593Smuzhiyun 		}
337*4882a593Smuzhiyun 		mutex_lock(&ctx->state_mutex);
338*4882a593Smuzhiyun 		if (ret == -ERESTARTSYS)
339*4882a593Smuzhiyun 			return ret;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* need to re-get the ls, as it may have changed when we released the
343*4882a593Smuzhiyun 	 * spu */
344*4882a593Smuzhiyun 	ls = (void __iomem *)ctx->ops->get_ls(ctx);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	/* write result, jump over indirect pointer */
347*4882a593Smuzhiyun 	memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
348*4882a593Smuzhiyun 	ctx->ops->npc_write(ctx, npc);
349*4882a593Smuzhiyun 	ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
350*4882a593Smuzhiyun 	return ret;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
spufs_run_spu(struct spu_context * ctx,u32 * npc,u32 * event)353*4882a593Smuzhiyun long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	int ret;
356*4882a593Smuzhiyun 	struct spu *spu;
357*4882a593Smuzhiyun 	u32 status;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (mutex_lock_interruptible(&ctx->run_mutex))
360*4882a593Smuzhiyun 		return -ERESTARTSYS;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	ctx->event_return = 0;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	ret = spu_acquire(ctx);
365*4882a593Smuzhiyun 	if (ret)
366*4882a593Smuzhiyun 		goto out_unlock;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	spu_enable_spu(ctx);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	spu_update_sched_info(ctx);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	ret = spu_run_init(ctx, npc);
373*4882a593Smuzhiyun 	if (ret) {
374*4882a593Smuzhiyun 		spu_release(ctx);
375*4882a593Smuzhiyun 		goto out;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	do {
379*4882a593Smuzhiyun 		ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
380*4882a593Smuzhiyun 		if (unlikely(ret)) {
381*4882a593Smuzhiyun 			/*
382*4882a593Smuzhiyun 			 * This is nasty: we need the state_mutex for all the
383*4882a593Smuzhiyun 			 * bookkeeping even if the syscall was interrupted by
384*4882a593Smuzhiyun 			 * a signal. ewww.
385*4882a593Smuzhiyun 			 */
386*4882a593Smuzhiyun 			mutex_lock(&ctx->state_mutex);
387*4882a593Smuzhiyun 			break;
388*4882a593Smuzhiyun 		}
389*4882a593Smuzhiyun 		spu = ctx->spu;
390*4882a593Smuzhiyun 		if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
391*4882a593Smuzhiyun 						&ctx->sched_flags))) {
392*4882a593Smuzhiyun 			if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
393*4882a593Smuzhiyun 				spu_switch_notify(spu, ctx);
394*4882a593Smuzhiyun 				continue;
395*4882a593Smuzhiyun 			}
396*4882a593Smuzhiyun 		}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
401*4882a593Smuzhiyun 		    (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
402*4882a593Smuzhiyun 			ret = spu_process_callback(ctx);
403*4882a593Smuzhiyun 			if (ret)
404*4882a593Smuzhiyun 				break;
405*4882a593Smuzhiyun 			status &= ~SPU_STATUS_STOPPED_BY_STOP;
406*4882a593Smuzhiyun 		}
407*4882a593Smuzhiyun 		ret = spufs_handle_class1(ctx);
408*4882a593Smuzhiyun 		if (ret)
409*4882a593Smuzhiyun 			break;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		ret = spufs_handle_class0(ctx);
412*4882a593Smuzhiyun 		if (ret)
413*4882a593Smuzhiyun 			break;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		if (signal_pending(current))
416*4882a593Smuzhiyun 			ret = -ERESTARTSYS;
417*4882a593Smuzhiyun 	} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
418*4882a593Smuzhiyun 				      SPU_STATUS_STOPPED_BY_HALT |
419*4882a593Smuzhiyun 				       SPU_STATUS_SINGLE_STEP)));
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	spu_disable_spu(ctx);
422*4882a593Smuzhiyun 	ret = spu_run_fini(ctx, npc, &status);
423*4882a593Smuzhiyun 	spu_yield(ctx);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
426*4882a593Smuzhiyun 	    (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
427*4882a593Smuzhiyun 		ctx->stats.libassist++;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if ((ret == 0) ||
430*4882a593Smuzhiyun 	    ((ret == -ERESTARTSYS) &&
431*4882a593Smuzhiyun 	     ((status & SPU_STATUS_STOPPED_BY_HALT) ||
432*4882a593Smuzhiyun 	      (status & SPU_STATUS_SINGLE_STEP) ||
433*4882a593Smuzhiyun 	      ((status & SPU_STATUS_STOPPED_BY_STOP) &&
434*4882a593Smuzhiyun 	       (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
435*4882a593Smuzhiyun 		ret = status;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/* Note: we don't need to force_sig SIGTRAP on single-step
438*4882a593Smuzhiyun 	 * since we have TIF_SINGLESTEP set, thus the kernel will do
439*4882a593Smuzhiyun 	 * it upon return from the syscall anyway.
440*4882a593Smuzhiyun 	 */
441*4882a593Smuzhiyun 	if (unlikely(status & SPU_STATUS_SINGLE_STEP))
442*4882a593Smuzhiyun 		ret = -ERESTARTSYS;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
445*4882a593Smuzhiyun 	    && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
446*4882a593Smuzhiyun 		force_sig(SIGTRAP);
447*4882a593Smuzhiyun 		ret = -ERESTARTSYS;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun out:
451*4882a593Smuzhiyun 	*event = ctx->event_return;
452*4882a593Smuzhiyun out_unlock:
453*4882a593Smuzhiyun 	mutex_unlock(&ctx->run_mutex);
454*4882a593Smuzhiyun 	return ret;
455*4882a593Smuzhiyun }
456