1/*===-- umodsi3.S - 32-bit unsigned integer modulus -----------------------===//
2 *
3 *                     The LLVM Compiler Infrastructure
4 *
5 * This file is dual licensed under the MIT and the University of Illinois Open
6 * Source Licenses. See LICENSE.TXT for details.
7 *
8 *===----------------------------------------------------------------------===//
9 *
10 * This file implements the __umodsi3 (32-bit unsigned integer modulus)
11 * function for the ARM 32-bit architecture.
12 *
13 *===----------------------------------------------------------------------===*/
14
15#include "../assembly.h"
16
17	.syntax unified
18	.text
19
20@ unsigned int __umodsi3(unsigned int divident, unsigned int divisor)
21@   Calculate and return the remainder of the (unsigned) division.
22
23	.p2align 2
24DEFINE_COMPILERRT_FUNCTION(__umodsi3)
25#if __ARM_ARCH_EXT_IDIV__
26	tst     r1, r1
27	beq     LOCAL_LABEL(divby0)
28	udiv	r2, r0, r1
29	mls 	r0, r2, r1, r0
30	bx  	lr
31#else
32	cmp	r1, #1
33	bcc	LOCAL_LABEL(divby0)
34	ITT(eq)
35	moveq	r0, #0
36	JMPc(lr, eq)
37	cmp	r0, r1
38	IT(cc)
39	JMPc(lr, cc)
40	/*
41	 * Implement division using binary long division algorithm.
42	 *
43	 * r0 is the numerator, r1 the denominator.
44	 *
45	 * The code before JMP computes the correct shift I, so that
46	 * r0 and (r1 << I) have the highest bit set in the same position.
47	 * At the time of JMP, ip := .Ldiv0block - 8 * I.
48	 * This depends on the fixed instruction size of block.
49	 * For ARM mode, this is 8 Bytes, for THUMB mode 10 Bytes.
50	 *
51	 * block(shift) implements the test-and-update-quotient core.
52	 * It assumes (r0 << shift) can be computed without overflow and
53	 * that (r0 << shift) < 2 * r1. The quotient is stored in r3.
54	 */
55
56#  ifdef __ARM_FEATURE_CLZ
57	clz	ip, r0
58	clz	r3, r1
59	/* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
60	sub	r3, r3, ip
61	adr	ip, LOCAL_LABEL(div0block)
62	sub	ip, ip, r3, lsl #3
63	bx	ip
64#  else
65	mov	r2, r0
66	adr	ip, LOCAL_LABEL(div0block)
67
68	lsr	r3, r2, #16
69	cmp	r3, r1
70	movhs	r2, r3
71	subhs	ip, ip, #(16 * 8)
72
73	lsr	r3, r2, #8
74	cmp	r3, r1
75	movhs	r2, r3
76	subhs	ip, ip, #(8 * 8)
77
78	lsr	r3, r2, #4
79	cmp	r3, r1
80	movhs	r2, r3
81	subhs	ip, #(4 * 8)
82
83	lsr	r3, r2, #2
84	cmp	r3, r1
85	movhs	r2, r3
86	subhs	ip, ip, #(2 * 8)
87
88	/* Last block, no need to update r2 or r3. */
89	cmp	r1, r2, lsr #1
90	subls	ip, ip, #(1 * 8)
91
92	JMP(ip)
93#  endif
94
95#define	IMM	#
96
97#define block(shift)                                                           \
98	cmp	r0, r1, lsl IMM shift;                                         \
99	IT(hs);                                                                \
100	WIDE(subhs)	r0, r0, r1, lsl IMM shift
101
102	block(31)
103	block(30)
104	block(29)
105	block(28)
106	block(27)
107	block(26)
108	block(25)
109	block(24)
110	block(23)
111	block(22)
112	block(21)
113	block(20)
114	block(19)
115	block(18)
116	block(17)
117	block(16)
118	block(15)
119	block(14)
120	block(13)
121	block(12)
122	block(11)
123	block(10)
124	block(9)
125	block(8)
126	block(7)
127	block(6)
128	block(5)
129	block(4)
130	block(3)
131	block(2)
132	block(1)
133LOCAL_LABEL(div0block):
134	block(0)
135	JMP(lr)
136#endif /* __ARM_ARCH_EXT_IDIV__ */
137
138LOCAL_LABEL(divby0):
139	mov	r0, #0
140#ifdef __ARM_EABI__
141	b	__aeabi_idiv0
142#else
143	JMP(lr)
144#endif
145
146END_COMPILERRT_FUNCTION(__umodsi3)
147