1// SPDX-License-Identifier: GPL-2.0
2#define DEBUG
3
4#include <linux/wait.h>
5#include <linux/ptrace.h>
6
7#include <asm/spu.h>
8#include <asm/spu_priv1.h>
9#include <asm/io.h>
10#include <asm/unistd.h>
11
12#include "spufs.h"
13
14/* interrupt-level stop callback function. */
15void spufs_stop_callback(struct spu *spu, int irq)
16{
17	struct spu_context *ctx = spu->ctx;
18
19	/*
20	 * It should be impossible to preempt a context while an exception
21	 * is being processed, since the context switch code is specially
22	 * coded to deal with interrupts ... But, just in case, sanity check
23	 * the context pointer.  It is OK to return doing nothing since
24	 * the exception will be regenerated when the context is resumed.
25	 */
26	if (ctx) {
27		/* Copy exception arguments into module specific structure */
28		switch(irq) {
29		case 0 :
30			ctx->csa.class_0_pending = spu->class_0_pending;
31			ctx->csa.class_0_dar = spu->class_0_dar;
32			break;
33		case 1 :
34			ctx->csa.class_1_dsisr = spu->class_1_dsisr;
35			ctx->csa.class_1_dar = spu->class_1_dar;
36			break;
37		case 2 :
38			break;
39		}
40
41		/* ensure that the exception status has hit memory before a
42		 * thread waiting on the context's stop queue is woken */
43		smp_wmb();
44
45		wake_up_all(&ctx->stop_wq);
46	}
47}
48
49int spu_stopped(struct spu_context *ctx, u32 *stat)
50{
51	u64 dsisr;
52	u32 stopped;
53
54	stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
55		SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
56
57top:
58	*stat = ctx->ops->status_read(ctx);
59	if (*stat & stopped) {
60		/*
61		 * If the spu hasn't finished stopping, we need to
62		 * re-read the register to get the stopped value.
63		 */
64		if (*stat & SPU_STATUS_RUNNING)
65			goto top;
66		return 1;
67	}
68
69	if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
70		return 1;
71
72	dsisr = ctx->csa.class_1_dsisr;
73	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
74		return 1;
75
76	if (ctx->csa.class_0_pending)
77		return 1;
78
79	return 0;
80}
81
82static int spu_setup_isolated(struct spu_context *ctx)
83{
84	int ret;
85	u64 __iomem *mfc_cntl;
86	u64 sr1;
87	u32 status;
88	unsigned long timeout;
89	const u32 status_loading = SPU_STATUS_RUNNING
90		| SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
91
92	ret = -ENODEV;
93	if (!isolated_loader)
94		goto out;
95
96	/*
97	 * We need to exclude userspace access to the context.
98	 *
99	 * To protect against memory access we invalidate all ptes
100	 * and make sure the pagefault handlers block on the mutex.
101	 */
102	spu_unmap_mappings(ctx);
103
104	mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
105
106	/* purge the MFC DMA queue to ensure no spurious accesses before we
107	 * enter kernel mode */
108	timeout = jiffies + HZ;
109	out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
110	while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
111			!= MFC_CNTL_PURGE_DMA_COMPLETE) {
112		if (time_after(jiffies, timeout)) {
113			printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
114					__func__);
115			ret = -EIO;
116			goto out;
117		}
118		cond_resched();
119	}
120
121	/* clear purge status */
122	out_be64(mfc_cntl, 0);
123
124	/* put the SPE in kernel mode to allow access to the loader */
125	sr1 = spu_mfc_sr1_get(ctx->spu);
126	sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
127	spu_mfc_sr1_set(ctx->spu, sr1);
128
129	/* start the loader */
130	ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
131	ctx->ops->signal2_write(ctx,
132			(unsigned long)isolated_loader & 0xffffffff);
133
134	ctx->ops->runcntl_write(ctx,
135			SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
136
137	ret = 0;
138	timeout = jiffies + HZ;
139	while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
140				status_loading) {
141		if (time_after(jiffies, timeout)) {
142			printk(KERN_ERR "%s: timeout waiting for loader\n",
143					__func__);
144			ret = -EIO;
145			goto out_drop_priv;
146		}
147		cond_resched();
148	}
149
150	if (!(status & SPU_STATUS_RUNNING)) {
151		/* If isolated LOAD has failed: run SPU, we will get a stop-and
152		 * signal later. */
153		pr_debug("%s: isolated LOAD failed\n", __func__);
154		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
155		ret = -EACCES;
156		goto out_drop_priv;
157	}
158
159	if (!(status & SPU_STATUS_ISOLATED_STATE)) {
160		/* This isn't allowed by the CBEA, but check anyway */
161		pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
162		ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
163		ret = -EINVAL;
164		goto out_drop_priv;
165	}
166
167out_drop_priv:
168	/* Finished accessing the loader. Drop kernel mode */
169	sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
170	spu_mfc_sr1_set(ctx->spu, sr1);
171
172out:
173	return ret;
174}
175
176static int spu_run_init(struct spu_context *ctx, u32 *npc)
177{
178	unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
179	int ret;
180
181	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
182
183	/*
184	 * NOSCHED is synchronous scheduling with respect to the caller.
185	 * The caller waits for the context to be loaded.
186	 */
187	if (ctx->flags & SPU_CREATE_NOSCHED) {
188		if (ctx->state == SPU_STATE_SAVED) {
189			ret = spu_activate(ctx, 0);
190			if (ret)
191				return ret;
192		}
193	}
194
195	/*
196	 * Apply special setup as required.
197	 */
198	if (ctx->flags & SPU_CREATE_ISOLATE) {
199		if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
200			ret = spu_setup_isolated(ctx);
201			if (ret)
202				return ret;
203		}
204
205		/*
206		 * If userspace has set the runcntrl register (eg, to
207		 * issue an isolated exit), we need to re-set it here
208		 */
209		runcntl = ctx->ops->runcntl_read(ctx) &
210			(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
211		if (runcntl == 0)
212			runcntl = SPU_RUNCNTL_RUNNABLE;
213	} else {
214		unsigned long privcntl;
215
216		if (test_thread_flag(TIF_SINGLESTEP))
217			privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
218		else
219			privcntl = SPU_PRIVCNTL_MODE_NORMAL;
220
221		ctx->ops->privcntl_write(ctx, privcntl);
222		ctx->ops->npc_write(ctx, *npc);
223	}
224
225	ctx->ops->runcntl_write(ctx, runcntl);
226
227	if (ctx->flags & SPU_CREATE_NOSCHED) {
228		spuctx_switch_state(ctx, SPU_UTIL_USER);
229	} else {
230
231		if (ctx->state == SPU_STATE_SAVED) {
232			ret = spu_activate(ctx, 0);
233			if (ret)
234				return ret;
235		} else {
236			spuctx_switch_state(ctx, SPU_UTIL_USER);
237		}
238	}
239
240	set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
241	return 0;
242}
243
244static int spu_run_fini(struct spu_context *ctx, u32 *npc,
245			       u32 *status)
246{
247	int ret = 0;
248
249	spu_del_from_rq(ctx);
250
251	*status = ctx->ops->status_read(ctx);
252	*npc = ctx->ops->npc_read(ctx);
253
254	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
255	clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
256	spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
257	spu_release(ctx);
258
259	if (signal_pending(current))
260		ret = -ERESTARTSYS;
261
262	return ret;
263}
264
265/*
266 * SPU syscall restarting is tricky because we violate the basic
267 * assumption that the signal handler is running on the interrupted
268 * thread. Here instead, the handler runs on PowerPC user space code,
269 * while the syscall was called from the SPU.
270 * This means we can only do a very rough approximation of POSIX
271 * signal semantics.
272 */
273static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
274			  unsigned int *npc)
275{
276	int ret;
277
278	switch (*spu_ret) {
279	case -ERESTARTSYS:
280	case -ERESTARTNOINTR:
281		/*
282		 * Enter the regular syscall restarting for
283		 * sys_spu_run, then restart the SPU syscall
284		 * callback.
285		 */
286		*npc -= 8;
287		ret = -ERESTARTSYS;
288		break;
289	case -ERESTARTNOHAND:
290	case -ERESTART_RESTARTBLOCK:
291		/*
292		 * Restart block is too hard for now, just return -EINTR
293		 * to the SPU.
294		 * ERESTARTNOHAND comes from sys_pause, we also return
295		 * -EINTR from there.
296		 * Assume that we need to be restarted ourselves though.
297		 */
298		*spu_ret = -EINTR;
299		ret = -ERESTARTSYS;
300		break;
301	default:
302		printk(KERN_WARNING "%s: unexpected return code %ld\n",
303			__func__, *spu_ret);
304		ret = 0;
305	}
306	return ret;
307}
308
309static int spu_process_callback(struct spu_context *ctx)
310{
311	struct spu_syscall_block s;
312	u32 ls_pointer, npc;
313	void __iomem *ls;
314	long spu_ret;
315	int ret;
316
317	/* get syscall block from local store */
318	npc = ctx->ops->npc_read(ctx) & ~3;
319	ls = (void __iomem *)ctx->ops->get_ls(ctx);
320	ls_pointer = in_be32(ls + npc);
321	if (ls_pointer > (LS_SIZE - sizeof(s)))
322		return -EFAULT;
323	memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
324
325	/* do actual syscall without pinning the spu */
326	ret = 0;
327	spu_ret = -ENOSYS;
328	npc += 4;
329
330	if (s.nr_ret < NR_syscalls) {
331		spu_release(ctx);
332		/* do actual system call from here */
333		spu_ret = spu_sys_callback(&s);
334		if (spu_ret <= -ERESTARTSYS) {
335			ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
336		}
337		mutex_lock(&ctx->state_mutex);
338		if (ret == -ERESTARTSYS)
339			return ret;
340	}
341
342	/* need to re-get the ls, as it may have changed when we released the
343	 * spu */
344	ls = (void __iomem *)ctx->ops->get_ls(ctx);
345
346	/* write result, jump over indirect pointer */
347	memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
348	ctx->ops->npc_write(ctx, npc);
349	ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
350	return ret;
351}
352
353long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
354{
355	int ret;
356	u32 status;
357
358	if (mutex_lock_interruptible(&ctx->run_mutex))
359		return -ERESTARTSYS;
360
361	ctx->event_return = 0;
362
363	ret = spu_acquire(ctx);
364	if (ret)
365		goto out_unlock;
366
367	spu_enable_spu(ctx);
368
369	spu_update_sched_info(ctx);
370
371	ret = spu_run_init(ctx, npc);
372	if (ret) {
373		spu_release(ctx);
374		goto out;
375	}
376
377	do {
378		ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
379		if (unlikely(ret)) {
380			/*
381			 * This is nasty: we need the state_mutex for all the
382			 * bookkeeping even if the syscall was interrupted by
383			 * a signal. ewww.
384			 */
385			mutex_lock(&ctx->state_mutex);
386			break;
387		}
388		if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
389						&ctx->sched_flags))) {
390			if (!(status & SPU_STATUS_STOPPED_BY_STOP))
391				continue;
392		}
393
394		spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
395
396		if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
397		    (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
398			ret = spu_process_callback(ctx);
399			if (ret)
400				break;
401			status &= ~SPU_STATUS_STOPPED_BY_STOP;
402		}
403		ret = spufs_handle_class1(ctx);
404		if (ret)
405			break;
406
407		ret = spufs_handle_class0(ctx);
408		if (ret)
409			break;
410
411		if (signal_pending(current))
412			ret = -ERESTARTSYS;
413	} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
414				      SPU_STATUS_STOPPED_BY_HALT |
415				       SPU_STATUS_SINGLE_STEP)));
416
417	spu_disable_spu(ctx);
418	ret = spu_run_fini(ctx, npc, &status);
419	spu_yield(ctx);
420
421	if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
422	    (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
423		ctx->stats.libassist++;
424
425	if ((ret == 0) ||
426	    ((ret == -ERESTARTSYS) &&
427	     ((status & SPU_STATUS_STOPPED_BY_HALT) ||
428	      (status & SPU_STATUS_SINGLE_STEP) ||
429	      ((status & SPU_STATUS_STOPPED_BY_STOP) &&
430	       (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
431		ret = status;
432
433	/* Note: we don't need to force_sig SIGTRAP on single-step
434	 * since we have TIF_SINGLESTEP set, thus the kernel will do
435	 * it upon return from the syscall anyway.
436	 */
437	if (unlikely(status & SPU_STATUS_SINGLE_STEP))
438		ret = -ERESTARTSYS;
439
440	else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
441	    && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
442		force_sig(SIGTRAP);
443		ret = -ERESTARTSYS;
444	}
445
446out:
447	*event = ctx->event_return;
448out_unlock:
449	mutex_unlock(&ctx->run_mutex);
450	return ret;
451}
452