• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/sparc/lib/
1/* checksum.S: Sparc optimized checksum code.
2 *
3 *  Copyright(C) 1995 Linus Torvalds
4 *  Copyright(C) 1995 Miguel de Icaza
5 *  Copyright(C) 1996 David S. Miller
6 *  Copyright(C) 1997 Jakub Jelinek
7 *
8 * derived from:
9 *	Linux/Alpha checksum c-code
10 *      Linux/ix86 inline checksum assembly
11 *      RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
12 *	David Mosberger-Tang for optimized reference c-code
13 *	BSD4.4 portable checksum routine
14 */
15
16#include <asm/errno.h>
17
18#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5)	\
19	ldd	[buf + offset + 0x00], t0;			\
20	ldd	[buf + offset + 0x08], t2;			\
21	addxcc	t0, sum, sum;					\
22	addxcc	t1, sum, sum;					\
23	ldd	[buf + offset + 0x10], t4;			\
24	addxcc	t2, sum, sum;					\
25	addxcc	t3, sum, sum;					\
26	ldd	[buf + offset + 0x18], t0;			\
27	addxcc	t4, sum, sum;					\
28	addxcc	t5, sum, sum;					\
29	addxcc	t0, sum, sum;					\
30	addxcc	t1, sum, sum;
31
32#define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3)	\
33	ldd	[buf - offset - 0x08], t0;			\
34	ldd	[buf - offset - 0x00], t2;			\
35	addxcc	t0, sum, sum;					\
36	addxcc	t1, sum, sum;					\
37	addxcc	t2, sum, sum;					\
38	addxcc	t3, sum, sum;
39
40	/* Do end cruft out of band to get better cache patterns. */
41csum_partial_end_cruft:
42	be	1f				! caller asks %o1 & 0x8
43	 andcc	%o1, 4, %g0			! nope, check for word remaining
44	ldd	[%o0], %g2			! load two
45	addcc	%g2, %o2, %o2			! add first word to sum
46	addxcc	%g3, %o2, %o2			! add second word as well
47	add	%o0, 8, %o0			! advance buf ptr
48	addx	%g0, %o2, %o2			! add in final carry
49	andcc	%o1, 4, %g0			! check again for word remaining
501:	be	1f				! nope, skip this code
51	 andcc	%o1, 3, %o1			! check for trailing bytes
52	ld	[%o0], %g2			! load it
53	addcc	%g2, %o2, %o2			! add to sum
54	add	%o0, 4, %o0			! advance buf ptr
55	addx	%g0, %o2, %o2			! add in final carry
56	andcc	%o1, 3, %g0			! check again for trailing bytes
571:	be	1f				! no trailing bytes, return
58	 addcc	%o1, -1, %g0			! only one byte remains?
59	bne	2f				! at least two bytes more
60	 subcc	%o1, 2, %o1			! only two bytes more?
61	b	4f				! only one byte remains
62	 or	%g0, %g0, %o4			! clear fake hword value
632:	lduh	[%o0], %o4			! get hword
64	be	6f				! jmp if only hword remains
65	 add	%o0, 2, %o0			! advance buf ptr either way
66	sll	%o4, 16, %o4			! create upper hword
674:	ldub	[%o0], %o5			! get final byte
68	sll	%o5, 8, %o5			! put into place
69	or	%o5, %o4, %o4			! coalese with hword (if any)
706:	addcc	%o4, %o2, %o2			! add to sum
711:	retl					! get outta here
72	 addx	%g0, %o2, %o0			! add final carry into retval
73
74	/* Also do alignment out of band to get better cache patterns. */
75csum_partial_fix_alignment:
76	cmp	%o1, 6
77	bl	cpte - 0x4
78	 andcc	%o0, 0x2, %g0
79	be	1f
80	 andcc	%o0, 0x4, %g0
81	lduh	[%o0 + 0x00], %g2
82	sub	%o1, 2, %o1
83	add	%o0, 2, %o0
84	sll	%g2, 16, %g2
85	addcc	%g2, %o2, %o2
86	srl	%o2, 16, %g3
87	addx	%g0, %g3, %g2
88	sll	%o2, 16, %o2
89	sll	%g2, 16, %g3
90	srl	%o2, 16, %o2
91	andcc	%o0, 0x4, %g0
92	or	%g3, %o2, %o2
931:	be	cpa
94	 andcc	%o1, 0xffffff80, %o3
95	ld	[%o0 + 0x00], %g2
96	sub	%o1, 4, %o1
97	addcc	%g2, %o2, %o2
98	add	%o0, 4, %o0
99	addx	%g0, %o2, %o2
100	b	cpa
101	 andcc	%o1, 0xffffff80, %o3
102
103	/* The common case is to get called with a nicely aligned
104	 * buffer of size 0x20.  Follow the code path for that case.
105	 */
106	.globl	csum_partial
107csum_partial:			/* %o0=buf, %o1=len, %o2=sum */
108	andcc	%o0, 0x7, %g0				! alignment problems?
109	bne	csum_partial_fix_alignment		! yep, handle it
110	 sethi	%hi(cpte - 8), %g7			! prepare table jmp ptr
111	andcc	%o1, 0xffffff80, %o3			! num loop iterations
112cpa:	be	3f					! none to do
113	 andcc	%o1, 0x70, %g1				! clears carry flag too
1145:	CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
115	CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
116	CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
117	CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
118	addx	%g0, %o2, %o2				! sink in final carry
119	subcc	%o3, 128, %o3				! detract from loop iters
120	bne	5b					! more to do
121	 add	%o0, 128, %o0				! advance buf ptr
122	andcc	%o1, 0x70, %g1				! clears carry flag too
1233:	be	cpte					! nope
124	 andcc	%o1, 0xf, %g0				! anything left at all?
125	srl	%g1, 1, %o4				! compute offset
126	sub	%g7, %g1, %g7				! adjust jmp ptr
127	sub	%g7, %o4, %g7				! final jmp ptr adjust
128	jmp	%g7 + %lo(cpte - 8)			! enter the table
129	 add	%o0, %g1, %o0				! advance buf ptr
130cptbl:	CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5)
131	CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5)
132	CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5)
133	CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5)
134	CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5)
135	CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5)
136	CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5)
137	addx	%g0, %o2, %o2				! fetch final carry
138	andcc	%o1, 0xf, %g0				! anything left at all?
139cpte:	bne	csum_partial_end_cruft			! yep, handle it
140	 andcc	%o1, 8, %g0				! check how much
141cpout:	retl						! get outta here
142	 mov	%o2, %o0				! return computed csum
143
144	.globl __csum_partial_copy_start, __csum_partial_copy_end
145__csum_partial_copy_start:
146
147#define ALLOC #alloc
148#define EXECINSTR #execinstr
149#define EX(x,y,a,b)				\
15098:     x,y;                                    \
151        .section .fixup,ALLOC,EXECINSTR;	\
152        .align  4;                              \
15399:     ba 30f;                                 \
154         a, b, %o3;                             \
155        .section __ex_table,ALLOC;		\
156        .align  4;                              \
157        .word   98b, 99b;                       \
158        .text;                                  \
159        .align  4
160
161#define EX2(x,y)				\
16298:     x,y;                                    \
163        .section __ex_table,ALLOC;		\
164        .align  4;                              \
165        .word   98b, 30f;                       \
166        .text;                                  \
167        .align  4
168
169#define EX3(x,y)				\
17098:     x,y;                                    \
171        .section __ex_table,ALLOC;		\
172        .align  4;                              \
173        .word   98b, 96f;                       \
174        .text;                                  \
175        .align  4
176
177#define EXT(start,end,handler)			\
178        .section __ex_table,ALLOC;		\
179        .align  4;                              \
180        .word   start, 0, end, handler;         \
181        .text;                                  \
182        .align  4
183
184	/* This aligned version executes typically in 8.5 superscalar cycles, this
185	 * is the best I can do.  I say 8.5 because the final add will pair with
186	 * the next ldd in the main unrolled loop.  Thus the pipe is always full.
187	 * If you change these macros (including order of instructions),
188	 * please check the fixup code below as well.
189	 */
190#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7)	\
191	ldd	[src + off + 0x00], t0;							\
192	ldd	[src + off + 0x08], t2;							\
193	addxcc	t0, sum, sum;								\
194	ldd	[src + off + 0x10], t4;							\
195	addxcc	t1, sum, sum;								\
196	ldd	[src + off + 0x18], t6;							\
197	addxcc	t2, sum, sum;								\
198	std	t0, [dst + off + 0x00];							\
199	addxcc	t3, sum, sum;								\
200	std	t2, [dst + off + 0x08];							\
201	addxcc	t4, sum, sum;								\
202	std	t4, [dst + off + 0x10];							\
203	addxcc	t5, sum, sum;								\
204	std	t6, [dst + off + 0x18];							\
205	addxcc	t6, sum, sum;								\
206	addxcc	t7, sum, sum;
207
208	/* 12 superscalar cycles seems to be the limit for this case,
209	 * because of this we thus do all the ldd's together to get
210	 * Viking MXCC into streaming mode.  Ho hum...
211	 */
212#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7)	\
213	ldd	[src + off + 0x00], t0;						\
214	ldd	[src + off + 0x08], t2;						\
215	ldd	[src + off + 0x10], t4;						\
216	ldd	[src + off + 0x18], t6;						\
217	st	t0, [dst + off + 0x00];						\
218	addxcc	t0, sum, sum;							\
219	st	t1, [dst + off + 0x04];						\
220	addxcc	t1, sum, sum;							\
221	st	t2, [dst + off + 0x08];						\
222	addxcc	t2, sum, sum;							\
223	st	t3, [dst + off + 0x0c];						\
224	addxcc	t3, sum, sum;							\
225	st	t4, [dst + off + 0x10];						\
226	addxcc	t4, sum, sum;							\
227	st	t5, [dst + off + 0x14];						\
228	addxcc	t5, sum, sum;							\
229	st	t6, [dst + off + 0x18];						\
230	addxcc	t6, sum, sum;							\
231	st	t7, [dst + off + 0x1c];						\
232	addxcc	t7, sum, sum;
233
234	/* Yuck, 6 superscalar cycles... */
235#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3)	\
236	ldd	[src - off - 0x08], t0;				\
237	ldd	[src - off - 0x00], t2;				\
238	addxcc	t0, sum, sum;					\
239	st	t0, [dst - off - 0x08];				\
240	addxcc	t1, sum, sum;					\
241	st	t1, [dst - off - 0x04];				\
242	addxcc	t2, sum, sum;					\
243	st	t2, [dst - off - 0x00];				\
244	addxcc	t3, sum, sum;					\
245	st	t3, [dst - off + 0x04];
246
247	/* Handle the end cruft code out of band for better cache patterns. */
248cc_end_cruft:
249	be	1f
250	 andcc	%o3, 4, %g0
251	EX(ldd	[%o0 + 0x00], %g2, and %o3, 0xf)
252	add	%o1, 8, %o1
253	addcc	%g2, %g7, %g7
254	add	%o0, 8, %o0
255	addxcc	%g3, %g7, %g7
256	EX2(st	%g2, [%o1 - 0x08])
257	addx	%g0, %g7, %g7
258	andcc	%o3, 4, %g0
259	EX2(st	%g3, [%o1 - 0x04])
2601:	be	1f
261	 andcc	%o3, 3, %o3
262	EX(ld	[%o0 + 0x00], %g2, add %o3, 4)
263	add	%o1, 4, %o1
264	addcc	%g2, %g7, %g7
265	EX2(st	%g2, [%o1 - 0x04])
266	addx	%g0, %g7, %g7
267	andcc	%o3, 3, %g0
268	add	%o0, 4, %o0
2691:	be	1f
270	 addcc	%o3, -1, %g0
271	bne	2f
272	 subcc	%o3, 2, %o3
273	b	4f
274	 or	%g0, %g0, %o4
2752:	EX(lduh	[%o0 + 0x00], %o4, add %o3, 2)
276	add	%o0, 2, %o0
277	EX2(sth	%o4, [%o1 + 0x00])
278	be	6f
279	 add	%o1, 2, %o1
280	sll	%o4, 16, %o4
2814:	EX(ldub	[%o0 + 0x00], %o5, add %g0, 1)
282	EX2(stb	%o5, [%o1 + 0x00])
283	sll	%o5, 8, %o5
284	or	%o5, %o4, %o4
2856:	addcc	%o4, %g7, %g7
2861:	retl
287	 addx	%g0, %g7, %o0
288
289	/* Also, handle the alignment code out of band. */
290cc_dword_align:
291	cmp	%g1, 6
292	bl,a	ccte
293	 andcc	%g1, 0xf, %o3
294	andcc	%o0, 0x1, %g0
295	bne	ccslow
296	 andcc	%o0, 0x2, %g0
297	be	1f
298	 andcc	%o0, 0x4, %g0
299	EX(lduh	[%o0 + 0x00], %g4, add %g1, 0)
300	sub	%g1, 2, %g1
301	EX2(sth	%g4, [%o1 + 0x00])
302	add	%o0, 2, %o0
303	sll	%g4, 16, %g4
304	addcc	%g4, %g7, %g7
305	add	%o1, 2, %o1
306	srl	%g7, 16, %g3
307	addx	%g0, %g3, %g4
308	sll	%g7, 16, %g7
309	sll	%g4, 16, %g3
310	srl	%g7, 16, %g7
311	andcc	%o0, 0x4, %g0
312	or	%g3, %g7, %g7
3131:	be	3f
314	 andcc	%g1, 0xffffff80, %g0
315	EX(ld	[%o0 + 0x00], %g4, add %g1, 0)
316	sub	%g1, 4, %g1
317	EX2(st	%g4, [%o1 + 0x00])
318	add	%o0, 4, %o0
319	addcc	%g4, %g7, %g7
320	add	%o1, 4, %o1
321	addx	%g0, %g7, %g7
322	b	3f
323	 andcc	%g1, 0xffffff80, %g0
324
325	/* Sun, you just can't beat me, you just can't.  Stop trying,
326	 * give up.  I'm serious, I am going to kick the living shit
327	 * out of you, game over, lights out.
328	 */
329	.align	8
330	.globl	__csum_partial_copy_sparc_generic
331__csum_partial_copy_sparc_generic:
332					/* %o0=src, %o1=dest, %g1=len, %g7=sum */
333	xor	%o0, %o1, %o4		! get changing bits
334	andcc	%o4, 3, %g0		! check for mismatched alignment
335	bne	ccslow			! better this than unaligned/fixups
336	 andcc	%o0, 7, %g0		! need to align things?
337	bne	cc_dword_align		! yes, we check for short lengths there
338	 andcc	%g1, 0xffffff80, %g0	! can we use unrolled loop?
3393:	be	3f			! nope, less than one loop remains
340	 andcc	%o1, 4, %g0		! dest aligned on 4 or 8 byte boundary?
341	be	ccdbl + 4		! 8 byte aligned, kick ass
3425:	CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
343	CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
344	CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
345	CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
34610:	EXT(5b, 10b, 20f)		! note for exception handling
347	sub	%g1, 128, %g1		! detract from length
348	addx	%g0, %g7, %g7		! add in last carry bit
349	andcc	%g1, 0xffffff80, %g0	! more to csum?
350	add	%o0, 128, %o0		! advance src ptr
351	bne	5b			! we did not go negative, continue looping
352	 add	%o1, 128, %o1		! advance dest ptr
3533:	andcc	%g1, 0x70, %o2		! can use table?
354ccmerge:be	ccte			! nope, go and check for end cruft
355	 andcc	%g1, 0xf, %o3		! get low bits of length (clears carry btw)
356	srl	%o2, 1, %o4		! begin negative offset computation
357	sethi	%hi(12f), %o5		! set up table ptr end
358	add	%o0, %o2, %o0		! advance src ptr
359	sub	%o5, %o4, %o5		! continue table calculation
360	sll	%o2, 1, %g2		! constant multiplies are fun...
361	sub	%o5, %g2, %o5		! some more adjustments
362	jmp	%o5 + %lo(12f)		! jump into it, duff style, wheee...
363	 add	%o1, %o2, %o1		! advance dest ptr (carry is clear btw)
364cctbl:	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
365	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5)
366	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5)
367	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5)
368	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
369	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
370	CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
37112:	EXT(cctbl, 12b, 22f)		! note for exception table handling
372	addx	%g0, %g7, %g7
373	andcc	%o3, 0xf, %g0		! check for low bits set
374ccte:	bne	cc_end_cruft		! something left, handle it out of band
375	 andcc	%o3, 8, %g0		! begin checks for that code
376	retl				! return
377	 mov	%g7, %o0		! give em the computed checksum
378ccdbl:	CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
379	CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
380	CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
381	CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
38211:	EXT(ccdbl, 11b, 21f)		! note for exception table handling
383	sub	%g1, 128, %g1		! detract from length
384	addx	%g0, %g7, %g7		! add in last carry bit
385	andcc	%g1, 0xffffff80, %g0	! more to csum?
386	add	%o0, 128, %o0		! advance src ptr
387	bne	ccdbl			! we did not go negative, continue looping
388	 add	%o1, 128, %o1		! advance dest ptr
389	b	ccmerge			! finish it off, above
390	 andcc	%g1, 0x70, %o2		! can use table? (clears carry btw)
391
392ccslow:	cmp	%g1, 0
393	mov	0, %g5
394	bleu	4f
395	 andcc	%o0, 1, %o5
396	be,a	1f
397	 srl	%g1, 1, %g4
398	sub	%g1, 1, %g1
399	EX(ldub	[%o0], %g5, add %g1, 1)
400	add	%o0, 1, %o0
401	EX2(stb	%g5, [%o1])
402	srl	%g1, 1, %g4
403	add	%o1, 1, %o1
4041:	cmp	%g4, 0
405	be,a	3f
406	 andcc	%g1, 1, %g0
407	andcc	%o0, 2, %g0
408	be,a	1f
409	 srl	%g4, 1, %g4
410	EX(lduh	[%o0], %o4, add %g1, 0)
411	sub	%g1, 2, %g1
412	srl	%o4, 8, %g2
413	sub	%g4, 1, %g4
414	EX2(stb	%g2, [%o1])
415	add	%o4, %g5, %g5
416	EX2(stb	%o4, [%o1 + 1])
417	add	%o0, 2, %o0
418	srl	%g4, 1, %g4
419	add	%o1, 2, %o1
4201:	cmp	%g4, 0
421	be,a	2f
422	 andcc	%g1, 2, %g0
423	EX3(ld	[%o0], %o4)
4245:	srl	%o4, 24, %g2
425	srl	%o4, 16, %g3
426	EX2(stb	%g2, [%o1])
427	srl	%o4, 8, %g2
428	EX2(stb	%g3, [%o1 + 1])
429	add	%o0, 4, %o0
430	EX2(stb	%g2, [%o1 + 2])
431	addcc	%o4, %g5, %g5
432	EX2(stb	%o4, [%o1 + 3])
433	addx	%g5, %g0, %g5	! I am now to lazy to optimize this (question it
434	add	%o1, 4, %o1	! is worthy). Maybe some day - with the sll/srl
435	subcc	%g4, 1, %g4	! tricks
436	bne,a	5b
437	 EX3(ld	[%o0], %o4)
438	sll	%g5, 16, %g2
439	srl	%g5, 16, %g5
440	srl	%g2, 16, %g2
441	andcc	%g1, 2, %g0
442	add	%g2, %g5, %g5
4432:	be,a	3f
444	 andcc	%g1, 1, %g0
445	EX(lduh	[%o0], %o4, and %g1, 3)
446	andcc	%g1, 1, %g0
447	srl	%o4, 8, %g2
448	add	%o0, 2, %o0
449	EX2(stb	%g2, [%o1])
450	add	%g5, %o4, %g5
451	EX2(stb	%o4, [%o1 + 1])
452	add	%o1, 2, %o1
4533:	be,a	1f
454	 sll	%g5, 16, %o4
455	EX(ldub	[%o0], %g2, add %g0, 1)
456	sll	%g2, 8, %o4
457	EX2(stb	%g2, [%o1])
458	add	%g5, %o4, %g5
459	sll	%g5, 16, %o4
4601:	addcc	%o4, %g5, %g5
461	srl	%g5, 16, %o4
462	addx	%g0, %o4, %g5
463	orcc	%o5, %g0, %g0
464	be	4f
465	 srl	%g5, 8, %o4
466	and	%g5, 0xff, %g2
467	and	%o4, 0xff, %o4
468	sll	%g2, 8, %g2
469	or	%g2, %o4, %g5
4704:	addcc	%g7, %g5, %g7
471	retl
472	 addx	%g0, %g7, %o0
473__csum_partial_copy_end:
474
475/* We do these strange calculations for the csum_*_from_user case only, ie.
476 * we only bother with faults on loads... */
477
478/* o2 = ((g2%20)&3)*8
479 * o3 = g1 - (g2/20)*32 - o2 */
48020:
481	cmp	%g2, 20
482	blu,a	1f
483	 and	%g2, 3, %o2
484	sub	%g1, 32, %g1
485	b	20b
486	 sub	%g2, 20, %g2
4871:
488	sll	%o2, 3, %o2
489	b	31f
490	 sub	%g1, %o2, %o3
491
492/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
493 * o3 = g1 - (g2/16)*32 - o2 */
49421:
495	andcc	%g2, 15, %o3
496	srl	%g2, 4, %g2
497	be,a	1f
498	 clr	%o2
499	add	%o3, 1, %o3
500	and	%o3, 14, %o3
501	sll	%o3, 3, %o2
5021:
503	sll	%g2, 5, %g2
504	sub	%g1, %g2, %o3
505	b	31f
506	 sub	%o3, %o2, %o3
507
508/* o0 += (g2/10)*16 - 0x70
509 * 01 += (g2/10)*16 - 0x70
510 * o2 = (g2 % 10) ? 8 : 0
511 * o3 += 0x70 - (g2/10)*16 - o2 */
51222:
513	cmp	%g2, 10
514	blu,a	1f
515	 sub	%o0, 0x70, %o0
516	add	%o0, 16, %o0
517	add	%o1, 16, %o1
518	sub	%o3, 16, %o3
519	b	22b
520	 sub	%g2, 10, %g2
5211:
522	sub	%o1, 0x70, %o1
523	add	%o3, 0x70, %o3
524	clr	%o2
525	tst	%g2
526	bne,a	1f
527	 mov	8, %o2
5281:
529	b	31f
530	 sub	%o3, %o2, %o3
53196:
532	and	%g1, 3, %g1
533	sll	%g4, 2, %g4
534	add	%g1, %g4, %o3
53530:
536/* %o1 is dst
537 * %o3 is # bytes to zero out
538 * %o4 is faulting address
539 * %o5 is %pc where fault occurred */
540	clr	%o2
54131:
542/* %o0 is src
543 * %o1 is dst
544 * %o2 is # of bytes to copy from src to dst
545 * %o3 is # bytes to zero out
546 * %o4 is faulting address
547 * %o5 is %pc where fault occurred */
548	save	%sp, -104, %sp
549        mov     %i5, %o0
550        mov     %i7, %o1
551        mov	%i4, %o2
552        call    lookup_fault
553	 mov	%g7, %i4
554	cmp	%o0, 2
555	bne	1f
556	 add	%g0, -EFAULT, %i5
557	tst	%i2
558	be	2f
559	 mov	%i0, %o1
560	mov	%i1, %o0
5615:
562	call	memcpy
563	 mov	%i2, %o2
564	tst	%o0
565	bne,a	2f
566	 add	%i3, %i2, %i3
567	add	%i1, %i2, %i1
5682:
569	mov	%i1, %o0
5706:
571	call	__bzero
572	 mov	%i3, %o1
5731:
574	ld	[%sp + 168], %o2		! struct_ptr of parent
575	st	%i5, [%o2]
576	ret
577	 restore
578
579        .section __ex_table,#alloc
580        .align 4
581        .word 5b,2
582	.word 6b,2
583