1/*
2 * Authors:    Hans-Peter Nilsson (hp@axis.com)
3 *
4 */
5#ifndef _CRIS_ARCH_UACCESS_H
6#define _CRIS_ARCH_UACCESS_H
7
8/*
9 * We don't tell gcc that we are accessing memory, but this is OK
10 * because we do not write to any memory gcc knows about, so there
11 * are no aliasing issues.
12 *
13 * Note that PC at a fault is the address *at* the faulting
14 * instruction for CRISv32.
15 */
16#define __put_user_asm(x, addr, err, op)			\
17	__asm__ __volatile__(					\
18		"2:	"op" %1,[%2]\n"				\
19		"4:\n"						\
20		"	.section .fixup,\"ax\"\n"		\
21		"3:	move.d %3,%0\n"				\
22		"	jump 4b\n"				\
23		"	nop\n"					\
24		"	.previous\n"				\
25		"	.section __ex_table,\"a\"\n"		\
26		"	.dword 2b,3b\n"				\
27		"	.previous\n"				\
28		: "=r" (err)					\
29		: "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
30
31#define __put_user_asm_64(x, addr, err) do {			\
32	int dummy_for_put_user_asm_64_;				\
33	__asm__ __volatile__(					\
34		"2:	move.d %M2,[%1+]\n"			\
35		"4:	move.d %H2,[%1]\n"			\
36		"5:\n"						\
37		"	.section .fixup,\"ax\"\n"		\
38		"3:	move.d %4,%0\n"				\
39		"	jump 5b\n"				\
40		"	.previous\n"				\
41		"	.section __ex_table,\"a\"\n"		\
42		"	.dword 2b,3b\n"				\
43		"	.dword 4b,3b\n"				\
44		"	.previous\n"				\
45		: "=r" (err), "=b" (dummy_for_put_user_asm_64_)	\
46		: "r" (x), "1" (addr), "g" (-EFAULT),		\
47		  "0" (err));					\
48	} while (0)
49
50/* See comment before __put_user_asm.  */
51
52#define __get_user_asm(x, addr, err, op)		\
53	__asm__ __volatile__(				\
54		"2:	"op" [%2],%1\n"			\
55		"4:\n"					\
56		"	.section .fixup,\"ax\"\n"	\
57		"3:	move.d %3,%0\n"			\
58		"	jump 4b\n"			\
59		"	moveq 0,%1\n"			\
60		"	.previous\n"			\
61		"	.section __ex_table,\"a\"\n"	\
62		"	.dword 2b,3b\n"			\
63		"	.previous\n"			\
64		: "=r" (err), "=r" (x)			\
65		: "r" (addr), "g" (-EFAULT), "0" (err))
66
67#define __get_user_asm_64(x, addr, err) do {		\
68	int dummy_for_get_user_asm_64_;			\
69	__asm__ __volatile__(				\
70		"2:	move.d [%2+],%M1\n"		\
71		"4:	move.d [%2],%H1\n"		\
72		"5:\n"					\
73		"	.section .fixup,\"ax\"\n"	\
74		"3:	move.d %4,%0\n"			\
75		"	jump 5b\n"			\
76		"	moveq 0,%1\n"			\
77		"	.previous\n"			\
78		"	.section __ex_table,\"a\"\n"	\
79		"	.dword 2b,3b\n"			\
80		"	.dword 4b,3b\n"			\
81		"	.previous\n"			\
82		: "=r" (err), "=r" (x),			\
83		  "=b" (dummy_for_get_user_asm_64_)	\
84		: "2" (addr), "g" (-EFAULT), "0" (err));\
85	} while (0)
86
87/*
88 * Copy a null terminated string from userspace.
89 *
90 * Must return:
91 * -EFAULT		for an exception
92 * count		if we hit the buffer limit
93 * bytes copied		if we hit a null byte
94 * (without the null byte)
95 */
96static inline long
97__do_strncpy_from_user(char *dst, const char *src, long count)
98{
99	long res;
100
101	if (count == 0)
102		return 0;
103
104	/*
105	 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
106	 *  So do we.
107	 *
108	 *  This code is deduced from:
109	 *
110	 *	char tmp2;
111	 *	long tmp1, tmp3;
112	 *	tmp1 = count;
113	 *	while ((*dst++ = (tmp2 = *src++)) != 0
114	 *	       && --tmp1)
115	 *	  ;
116	 *
117	 *	res = count - tmp1;
118	 *
119	 *  with tweaks.
120	 */
121
122	__asm__ __volatile__ (
123		"	move.d %3,%0\n"
124		"5:	move.b [%2+],$acr\n"
125		"1:	beq 2f\n"
126		"	move.b $acr,[%1+]\n"
127
128		"	subq 1,%0\n"
129		"2:	bne 1b\n"
130		"	move.b [%2+],$acr\n"
131
132		"	sub.d %3,%0\n"
133		"	neg.d %0,%0\n"
134		"3:\n"
135		"	.section .fixup,\"ax\"\n"
136		"4:	move.d %7,%0\n"
137		"	jump 3b\n"
138		"	nop\n"
139
140		/* The address for a fault at the first move is trivial.
141		   The address for a fault at the second move is that of
142		   the preceding branch insn, since the move insn is in
143		   its delay-slot.  That address is also a branch
144		   target.  Just so you don't get confused...  */
145		"	.previous\n"
146		"	.section __ex_table,\"a\"\n"
147		"	.dword 5b,4b\n"
148		"	.dword 2b,4b\n"
149		"	.previous"
150		: "=r" (res), "=b" (dst), "=b" (src), "=r" (count)
151		: "3" (count), "1" (dst), "2" (src), "g" (-EFAULT)
152		: "acr");
153
154	return res;
155}
156
157/* A few copy asms to build up the more complex ones from.
158
159   Note again, a post-increment is performed regardless of whether a bus
160   fault occurred in that instruction, and PC for a faulted insn is the
161   address for the insn, or for the preceding branch when in a delay-slot.  */
162
163#define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
164	__asm__ __volatile__ (				\
165			COPY				\
166		"1:\n"					\
167		"	.section .fixup,\"ax\"\n"	\
168			FIXUP				\
169		"	.previous\n"			\
170		"	.section __ex_table,\"a\"\n"	\
171			TENTRY				\
172		"	.previous\n"			\
173		: "=b" (to), "=b" (from), "=r" (ret)	\
174		: "0" (to), "1" (from), "2" (ret)	\
175		: "acr", "memory")
176
177#define __asm_copy_from_user_1(to, from, ret) \
178	__asm_copy_user_cont(to, from, ret,	\
179		"2:	move.b [%1+],$acr\n"	\
180		"	move.b $acr,[%0+]\n",	\
181		"3:	addq 1,%2\n"		\
182		"	jump 1b\n"		\
183		"	clear.b [%0+]\n",	\
184		"	.dword 2b,3b\n")
185
186#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
187	__asm_copy_user_cont(to, from, ret,		\
188			COPY				\
189		"2:	move.w [%1+],$acr\n"		\
190		"	move.w $acr,[%0+]\n",		\
191			FIXUP				\
192		"3:	addq 2,%2\n"			\
193		"	jump 1b\n"			\
194		"	clear.w [%0+]\n",		\
195			TENTRY				\
196		"	.dword 2b,3b\n")
197
198#define __asm_copy_from_user_2(to, from, ret) \
199	__asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
200
201#define __asm_copy_from_user_3(to, from, ret)		\
202	__asm_copy_from_user_2x_cont(to, from, ret,	\
203		"4:	move.b [%1+],$acr\n"		\
204		"	move.b $acr,[%0+]\n",		\
205		"5:	addq 1,%2\n"			\
206		"	clear.b [%0+]\n",		\
207		"	.dword 4b,5b\n")
208
209#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
210	__asm_copy_user_cont(to, from, ret,		\
211			COPY				\
212		"2:	move.d [%1+],$acr\n"		\
213		"	move.d $acr,[%0+]\n",		\
214			FIXUP				\
215		"3:	addq 4,%2\n"			\
216		"	jump 1b\n"			\
217		"	clear.d [%0+]\n",		\
218			TENTRY				\
219		"	.dword 2b,3b\n")
220
221#define __asm_copy_from_user_4(to, from, ret) \
222	__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
223
224#define __asm_copy_from_user_5(to, from, ret) \
225	__asm_copy_from_user_4x_cont(to, from, ret,	\
226		"4:	move.b [%1+],$acr\n"		\
227		"	move.b $acr,[%0+]\n",		\
228		"5:	addq 1,%2\n"			\
229		"	clear.b [%0+]\n",		\
230		"	.dword 4b,5b\n")
231
232#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
233	__asm_copy_from_user_4x_cont(to, from, ret,	\
234			COPY				\
235		"4:	move.w [%1+],$acr\n"		\
236		"	move.w $acr,[%0+]\n",		\
237			FIXUP				\
238		"5:	addq 2,%2\n"			\
239		"	clear.w [%0+]\n",		\
240			TENTRY				\
241		"	.dword 4b,5b\n")
242
243#define __asm_copy_from_user_6(to, from, ret) \
244	__asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
245
246#define __asm_copy_from_user_7(to, from, ret) \
247	__asm_copy_from_user_6x_cont(to, from, ret,	\
248		"6:	move.b [%1+],$acr\n"		\
249		"	move.b $acr,[%0+]\n",		\
250		"7:	addq 1,%2\n"			\
251		"	clear.b [%0+]\n",		\
252		"	.dword 6b,7b\n")
253
254#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
255	__asm_copy_from_user_4x_cont(to, from, ret,	\
256			COPY				\
257		"4:	move.d [%1+],$acr\n"		\
258		"	move.d $acr,[%0+]\n",		\
259			FIXUP				\
260		"5:	addq 4,%2\n"			\
261		"	clear.d [%0+]\n",		\
262			TENTRY				\
263		"	.dword 4b,5b\n")
264
265#define __asm_copy_from_user_8(to, from, ret) \
266	__asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
267
268#define __asm_copy_from_user_9(to, from, ret) \
269	__asm_copy_from_user_8x_cont(to, from, ret,	\
270		"6:	move.b [%1+],$acr\n"		\
271		"	move.b $acr,[%0+]\n",		\
272		"7:	addq 1,%2\n"			\
273		"	clear.b [%0+]\n",		\
274		"	.dword 6b,7b\n")
275
276#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
277	__asm_copy_from_user_8x_cont(to, from, ret,	\
278			COPY				\
279		"6:	move.w [%1+],$acr\n"		\
280		"	move.w $acr,[%0+]\n",		\
281			FIXUP				\
282		"7:	addq 2,%2\n"			\
283		"	clear.w [%0+]\n",		\
284			TENTRY				\
285		"	.dword 6b,7b\n")
286
287#define __asm_copy_from_user_10(to, from, ret) \
288	__asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
289
290#define __asm_copy_from_user_11(to, from, ret)		\
291	__asm_copy_from_user_10x_cont(to, from, ret,	\
292		"8:	move.b [%1+],$acr\n"		\
293		"	move.b $acr,[%0+]\n",		\
294		"9:	addq 1,%2\n"			\
295		"	clear.b [%0+]\n",		\
296		"	.dword 8b,9b\n")
297
298#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
299	__asm_copy_from_user_8x_cont(to, from, ret,	\
300			COPY				\
301		"6:	move.d [%1+],$acr\n"		\
302		"	move.d $acr,[%0+]\n",		\
303			FIXUP				\
304		"7:	addq 4,%2\n"			\
305		"	clear.d [%0+]\n",		\
306			TENTRY				\
307		"	.dword 6b,7b\n")
308
309#define __asm_copy_from_user_12(to, from, ret) \
310	__asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
311
312#define __asm_copy_from_user_13(to, from, ret) \
313	__asm_copy_from_user_12x_cont(to, from, ret,	\
314		"8:	move.b [%1+],$acr\n"		\
315		"	move.b $acr,[%0+]\n",		\
316		"9:	addq 1,%2\n"			\
317		"	clear.b [%0+]\n",		\
318		"	.dword 8b,9b\n")
319
320#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
321	__asm_copy_from_user_12x_cont(to, from, ret,	\
322			COPY				\
323		"8:	move.w [%1+],$acr\n"		\
324		"	move.w $acr,[%0+]\n",		\
325			FIXUP				\
326		"9:	addq 2,%2\n"			\
327		"	clear.w [%0+]\n",		\
328			TENTRY				\
329		"	.dword 8b,9b\n")
330
331#define __asm_copy_from_user_14(to, from, ret) \
332	__asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
333
334#define __asm_copy_from_user_15(to, from, ret) \
335	__asm_copy_from_user_14x_cont(to, from, ret,	\
336		"10:	move.b [%1+],$acr\n"		\
337		"	move.b $acr,[%0+]\n",		\
338		"11:	addq 1,%2\n"			\
339		"	clear.b [%0+]\n",		\
340		"	.dword 10b,11b\n")
341
342#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
343	__asm_copy_from_user_12x_cont(to, from, ret,	\
344			COPY				\
345		"8:	move.d [%1+],$acr\n"		\
346		"	move.d $acr,[%0+]\n",		\
347			FIXUP				\
348		"9:	addq 4,%2\n"			\
349		"	clear.d [%0+]\n",		\
350			TENTRY				\
351		"	.dword 8b,9b\n")
352
353#define __asm_copy_from_user_16(to, from, ret) \
354	__asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
355
356#define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
357	__asm_copy_from_user_16x_cont(to, from, ret,	\
358			COPY				\
359		"10:	move.d [%1+],$acr\n"		\
360		"	move.d $acr,[%0+]\n",		\
361			FIXUP				\
362		"11:	addq 4,%2\n"			\
363		"	clear.d [%0+]\n",		\
364			TENTRY				\
365		"	.dword 10b,11b\n")
366
367#define __asm_copy_from_user_20(to, from, ret) \
368	__asm_copy_from_user_20x_cont(to, from, ret, "", "", "")
369
370#define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
371	__asm_copy_from_user_20x_cont(to, from, ret,	\
372			COPY				\
373		"12:	move.d [%1+],$acr\n"		\
374		"	move.d $acr,[%0+]\n",		\
375			FIXUP				\
376		"13:	addq 4,%2\n"			\
377		"	clear.d [%0+]\n",		\
378			TENTRY				\
379		"	.dword 12b,13b\n")
380
381#define __asm_copy_from_user_24(to, from, ret) \
382	__asm_copy_from_user_24x_cont(to, from, ret, "", "", "")
383
384/* And now, the to-user ones.  */
385
386#define __asm_copy_to_user_1(to, from, ret)	\
387	__asm_copy_user_cont(to, from, ret,	\
388		"	move.b [%1+],$acr\n"	\
389		"2:	move.b $acr,[%0+]\n",	\
390		"3:	jump 1b\n"		\
391		"	addq 1,%2\n",		\
392		"	.dword 2b,3b\n")
393
394#define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
395	__asm_copy_user_cont(to, from, ret,		\
396			COPY				\
397		"	move.w [%1+],$acr\n"		\
398		"2:	move.w $acr,[%0+]\n",		\
399			FIXUP				\
400		"3:	jump 1b\n"			\
401		"	addq 2,%2\n",			\
402			TENTRY				\
403		"	.dword 2b,3b\n")
404
405#define __asm_copy_to_user_2(to, from, ret) \
406	__asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
407
408#define __asm_copy_to_user_3(to, from, ret) \
409	__asm_copy_to_user_2x_cont(to, from, ret,	\
410		"	move.b [%1+],$acr\n"		\
411		"4:	move.b $acr,[%0+]\n",		\
412		"5:	addq 1,%2\n",			\
413		"	.dword 4b,5b\n")
414
415#define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
416	__asm_copy_user_cont(to, from, ret,		\
417			COPY				\
418		"	move.d [%1+],$acr\n"		\
419		"2:	move.d $acr,[%0+]\n",		\
420			FIXUP				\
421		"3:	jump 1b\n"			\
422		"	addq 4,%2\n",			\
423			TENTRY				\
424		"	.dword 2b,3b\n")
425
426#define __asm_copy_to_user_4(to, from, ret) \
427	__asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
428
429#define __asm_copy_to_user_5(to, from, ret) \
430	__asm_copy_to_user_4x_cont(to, from, ret,	\
431		"	move.b [%1+],$acr\n"		\
432		"4:	move.b $acr,[%0+]\n",		\
433		"5:	addq 1,%2\n",			\
434		"	.dword 4b,5b\n")
435
436#define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
437	__asm_copy_to_user_4x_cont(to, from, ret,	\
438			COPY				\
439		"	move.w [%1+],$acr\n"		\
440		"4:	move.w $acr,[%0+]\n",		\
441			FIXUP				\
442		"5:	addq 2,%2\n",			\
443			TENTRY				\
444		"	.dword 4b,5b\n")
445
446#define __asm_copy_to_user_6(to, from, ret) \
447	__asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
448
449#define __asm_copy_to_user_7(to, from, ret) \
450	__asm_copy_to_user_6x_cont(to, from, ret,	\
451		"	move.b [%1+],$acr\n"		\
452		"6:	move.b $acr,[%0+]\n",		\
453		"7:	addq 1,%2\n",			\
454		"	.dword 6b,7b\n")
455
456#define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
457	__asm_copy_to_user_4x_cont(to, from, ret,	\
458			COPY				\
459		"	move.d [%1+],$acr\n"		\
460		"4:	move.d $acr,[%0+]\n",		\
461			FIXUP				\
462		"5:	addq 4,%2\n",			\
463			TENTRY				\
464		"	.dword 4b,5b\n")
465
466#define __asm_copy_to_user_8(to, from, ret) \
467	__asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
468
469#define __asm_copy_to_user_9(to, from, ret) \
470	__asm_copy_to_user_8x_cont(to, from, ret,	\
471		"	move.b [%1+],$acr\n"		\
472		"6:	move.b $acr,[%0+]\n",		\
473		"7:	addq 1,%2\n",			\
474		"	.dword 6b,7b\n")
475
476#define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
477	__asm_copy_to_user_8x_cont(to, from, ret,	\
478			COPY				\
479		"	move.w [%1+],$acr\n"		\
480		"6:	move.w $acr,[%0+]\n",		\
481			FIXUP				\
482		"7:	addq 2,%2\n",			\
483			TENTRY				\
484		"	.dword 6b,7b\n")
485
486#define __asm_copy_to_user_10(to, from, ret) \
487	__asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
488
489#define __asm_copy_to_user_11(to, from, ret) \
490	__asm_copy_to_user_10x_cont(to, from, ret,	\
491		"	move.b [%1+],$acr\n"		\
492		"8:	move.b $acr,[%0+]\n",		\
493		"9:	addq 1,%2\n",			\
494		"	.dword 8b,9b\n")
495
496#define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
497	__asm_copy_to_user_8x_cont(to, from, ret,	\
498			COPY				\
499		"	move.d [%1+],$acr\n"		\
500		"6:	move.d $acr,[%0+]\n",		\
501			FIXUP				\
502		"7:	addq 4,%2\n",			\
503			TENTRY				\
504		"	.dword 6b,7b\n")
505
506#define __asm_copy_to_user_12(to, from, ret) \
507	__asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
508
509#define __asm_copy_to_user_13(to, from, ret) \
510	__asm_copy_to_user_12x_cont(to, from, ret,	\
511		"	move.b [%1+],$acr\n"		\
512		"8:	move.b $acr,[%0+]\n",		\
513		"9:	addq 1,%2\n",			\
514		"	.dword 8b,9b\n")
515
516#define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
517	__asm_copy_to_user_12x_cont(to, from, ret,	\
518			COPY				\
519		"	move.w [%1+],$acr\n"		\
520		"8:	move.w $acr,[%0+]\n",		\
521			FIXUP				\
522		"9:	addq 2,%2\n",			\
523			TENTRY				\
524		"	.dword 8b,9b\n")
525
526#define __asm_copy_to_user_14(to, from, ret)	\
527	__asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
528
529#define __asm_copy_to_user_15(to, from, ret) \
530	__asm_copy_to_user_14x_cont(to, from, ret,	\
531		"	move.b [%1+],$acr\n"		\
532		"10:	move.b $acr,[%0+]\n",		\
533		"11:	addq 1,%2\n",			\
534		"	.dword 10b,11b\n")
535
536#define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
537	__asm_copy_to_user_12x_cont(to, from, ret,	\
538			COPY				\
539		"	move.d [%1+],$acr\n"		\
540		"8:	move.d $acr,[%0+]\n",		\
541			FIXUP				\
542		"9:	addq 4,%2\n",			\
543			TENTRY				\
544		"	.dword 8b,9b\n")
545
546#define __asm_copy_to_user_16(to, from, ret) \
547	__asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
548
549#define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
550	__asm_copy_to_user_16x_cont(to, from, ret,	\
551			COPY				\
552		"	move.d [%1+],$acr\n"		\
553		"10:	move.d $acr,[%0+]\n",		\
554			FIXUP				\
555		"11:	addq 4,%2\n",			\
556			TENTRY				\
557		"	.dword 10b,11b\n")
558
559#define __asm_copy_to_user_20(to, from, ret) \
560	__asm_copy_to_user_20x_cont(to, from, ret, "", "", "")
561
562#define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY)	\
563	__asm_copy_to_user_20x_cont(to, from, ret,	\
564			COPY				\
565		"	move.d [%1+],$acr\n"		\
566		"12:	move.d $acr,[%0+]\n",		\
567			FIXUP				\
568		"13:	addq 4,%2\n",			\
569			TENTRY				\
570		"	.dword 12b,13b\n")
571
572#define __asm_copy_to_user_24(to, from, ret)	\
573	__asm_copy_to_user_24x_cont(to, from, ret, "", "", "")
574
575/* Define a few clearing asms with exception handlers.  */
576
577/* This frame-asm is like the __asm_copy_user_cont one, but has one less
578   input.  */
579
580#define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
581	__asm__ __volatile__ (				\
582			CLEAR				\
583		"1:\n"					\
584		"	.section .fixup,\"ax\"\n"	\
585			FIXUP				\
586		"	.previous\n"			\
587		"	.section __ex_table,\"a\"\n"	\
588			TENTRY				\
589		"	.previous"			\
590		: "=b" (to), "=r" (ret)			\
591		: "0" (to), "1" (ret)			\
592		: "memory")
593
594#define __asm_clear_1(to, ret) \
595	__asm_clear(to, ret,			\
596		"2:	clear.b [%0+]\n",	\
597		"3:	jump 1b\n"		\
598		"	addq 1,%1\n",		\
599		"	.dword 2b,3b\n")
600
601#define __asm_clear_2(to, ret) \
602	__asm_clear(to, ret,			\
603		"2:	clear.w [%0+]\n",	\
604		"3:	jump 1b\n"		\
605		"	addq 2,%1\n",		\
606		"	.dword 2b,3b\n")
607
608#define __asm_clear_3(to, ret) \
609     __asm_clear(to, ret,			\
610		 "2:	clear.w [%0+]\n"	\
611		 "3:	clear.b [%0+]\n",	\
612		 "4:	addq 2,%1\n"		\
613		 "5:	jump 1b\n"		\
614		 "	addq 1,%1\n",		\
615		 "	.dword 2b,4b\n"		\
616		 "	.dword 3b,5b\n")
617
618#define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
619	__asm_clear(to, ret,				\
620			CLEAR				\
621		"2:	clear.d [%0+]\n",		\
622			FIXUP				\
623		"3:	jump 1b\n"			\
624		"	addq 4,%1\n",			\
625			TENTRY				\
626		"	.dword 2b,3b\n")
627
628#define __asm_clear_4(to, ret) \
629	__asm_clear_4x_cont(to, ret, "", "", "")
630
631#define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
632	__asm_clear_4x_cont(to, ret,			\
633			CLEAR				\
634		"4:	clear.d [%0+]\n",		\
635			FIXUP				\
636		"5:	addq 4,%1\n",			\
637			TENTRY				\
638		"	.dword 4b,5b\n")
639
640#define __asm_clear_8(to, ret) \
641	__asm_clear_8x_cont(to, ret, "", "", "")
642
643#define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
644	__asm_clear_8x_cont(to, ret,			\
645			CLEAR				\
646		"6:	clear.d [%0+]\n",		\
647			FIXUP				\
648		"7:	addq 4,%1\n",			\
649			TENTRY				\
650		"	.dword 6b,7b\n")
651
652#define __asm_clear_12(to, ret) \
653	__asm_clear_12x_cont(to, ret, "", "", "")
654
655#define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
656	__asm_clear_12x_cont(to, ret,			\
657			CLEAR				\
658		"8:	clear.d [%0+]\n",		\
659			FIXUP				\
660		"9:	addq 4,%1\n",			\
661			TENTRY				\
662		"	.dword 8b,9b\n")
663
664#define __asm_clear_16(to, ret) \
665	__asm_clear_16x_cont(to, ret, "", "", "")
666
667#define __asm_clear_20x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
668	__asm_clear_16x_cont(to, ret,			\
669			CLEAR				\
670		"10:	clear.d [%0+]\n",		\
671			FIXUP				\
672		"11:	addq 4,%1\n",			\
673			TENTRY				\
674		"	.dword 10b,11b\n")
675
676#define __asm_clear_20(to, ret) \
677	__asm_clear_20x_cont(to, ret, "", "", "")
678
679#define __asm_clear_24x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
680	__asm_clear_20x_cont(to, ret,			\
681			CLEAR				\
682		"12:	clear.d [%0+]\n",		\
683			FIXUP				\
684		"13:	addq 4,%1\n",			\
685			TENTRY				\
686		"	.dword 12b,13b\n")
687
688#define __asm_clear_24(to, ret) \
689	__asm_clear_24x_cont(to, ret, "", "", "")
690
691/*
692 * Return the size of a string (including the ending 0)
693 *
694 * Return length of string in userspace including terminating 0
695 * or 0 for error.  Return a value greater than N if too long.
696 */
697
698static inline long
699strnlen_user(const char *s, long n)
700{
701	long res, tmp1;
702
703	if (!access_ok(VERIFY_READ, s, 0))
704		return 0;
705
706	/*
707	 * This code is deduced from:
708	 *
709	 *	tmp1 = n;
710	 *	while (tmp1-- > 0 && *s++)
711	 *	  ;
712	 *
713	 *	res = n - tmp1;
714	 *
715	 *  (with tweaks).
716	 */
717
718	__asm__ __volatile__ (
719		"	move.d %1,$acr\n"
720		"	cmpq 0,$acr\n"
721		"0:\n"
722		"	ble 1f\n"
723		"	subq 1,$acr\n"
724
725		"4:	test.b [%0+]\n"
726		"	bne 0b\n"
727		"	cmpq 0,$acr\n"
728		"1:\n"
729		"	move.d %1,%0\n"
730		"	sub.d $acr,%0\n"
731		"2:\n"
732		"	.section .fixup,\"ax\"\n"
733
734		"3:	jump 2b\n"
735		"	clear.d %0\n"
736
737		"	.previous\n"
738		"	.section __ex_table,\"a\"\n"
739		"	.dword 4b,3b\n"
740		"	.previous\n"
741		: "=r" (res), "=r" (tmp1)
742		: "0" (s), "1" (n)
743		: "acr");
744
745	return res;
746}
747
748#endif
749