1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/linkage.h>
16#include <asm/unwind.h>
17
18#ifdef __ARMEB__
19#define xh r0
20#define xl r1
21#define yh r2
22#define yl r3
23#else
24#define xl r0
25#define xh r1
26#define yl r2
27#define yh r3
28#endif
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47ENTRY(__do_div64)
48UNWIND(.fnstart)
49
50 @ Test for easy paths first.
51 subs ip, r4,
52 bls 9f @ divisor is 0 or 1
53 tst ip, r4
54 beq 8f @ divisor is power of 2
55
56 @ See if we need to handle upper 32-bit result.
57 cmp xh, r4
58 mov yh,
59 blo 3f
60
61 @ Align divisor with upper part of dividend.
62 @ The aligned divisor is stored in yl preserving the original.
63 @ The bit position is stored in ip.
64
65
66
67 clz yl, r4
68 clz ip, xh
69 sub yl, yl, ip
70 mov ip,
71 mov ip, ip, lsl yl
72 mov yl, r4, lsl yl
73
74#else
75
76 mov yl, r4
77 mov ip,
781: cmp yl,
79 cmpcc yl, xh
80 movcc yl, yl, lsl
81 movcc ip, ip, lsl
82 bcc 1b
83
84#endif
85
86 @ The division loop for needed upper bit positions.
87 @ Break out early if dividend reaches 0.
882: cmp xh, yl
89 orrcs yh, yh, ip
90 subcss xh, xh, yl
91 movnes ip, ip, lsr
92 mov yl, yl, lsr
93 bne 2b
94
95 @ See if we need to handle lower 32-bit result.
963: cmp xh,
97 mov yl,
98 cmpeq xl, r4
99 movlo xh, xl
100 movlo pc, lr
101
102 @ The division loop for lower bit positions.
103 @ Here we shift remainer bits leftwards rather than moving the
104 @ divisor for comparisons, considering the carry-out bit as well.
105 mov ip,
1064: movs xl, xl, lsl
107 adcs xh, xh, xh
108 beq 6f
109 cmpcc xh, r4
1105: orrcs yl, yl, ip
111 subcs xh, xh, r4
112 movs ip, ip, lsr
113 bne 4b
114 mov pc, lr
115
116 @ The top part of remainder became zero. If carry is set
117 @ (the 33th bit) this is a false positive so resume the loop.
118 @ Otherwise, if lower part is also null then we are done.
1196: bcs 5b
120 cmp xl,
121 moveq pc, lr
122
123 @ We still have remainer bits in the low part. Bring them up.
124
125
126
127 clz xh, xl @ we know xh is zero here so...
128 add xh, xh,
129 mov xl, xl, lsl xh
130 mov ip, ip, lsr xh
131
132#else
133
1347: movs xl, xl, lsl
135 mov ip, ip, lsr
136 bcc 7b
137
138#endif
139
140 @ Current remainder is now 1. It is worthless to compare with
141 @ divisor at this point since divisor can not be smaller than 3 here.
142 @ If possible, branch for another shift in the division loop.
143 @ If no bit position left then we are done.
144 movs ip, ip, lsr
145 mov xh,
146 bne 4b
147 mov pc, lr
148
1498: @ Division by a power of 2: determine what that divisor order is
150 @ then simply shift values around
151
152
153
154 clz ip, r4
155 rsb ip, ip,
156
157#else
158
159 mov yl, r4
160 cmp r4,
161 mov ip,
162 movhs yl, yl, lsr
163 movhs ip,
164
165 cmp yl,
166 movhs yl, yl, lsr
167 addhs ip, ip,
168
169 cmp yl,
170 movhs yl, yl, lsr
171 addhs ip, ip,
172
173 cmp yl,
174 addhi ip, ip,
175 addls ip, ip, yl, lsr
176
177#endif
178
179 mov yh, xh, lsr ip
180 mov yl, xl, lsr ip
181 rsb ip, ip,
182 ARM( orr yl, yl, xh, lsl ip )
183 THUMB( lsl xh, xh, ip )
184 THUMB( orr yl, yl, xh )
185 mov xh, xl, lsl ip
186 mov xh, xh, lsr ip
187 mov pc, lr
188
189 @ eq -> division by 1: obvious enough...
1909: moveq yl, xl
191 moveq yh, xh
192 moveq xh,
193 moveq pc, lr
194UNWIND(.fnend)
195
196UNWIND(.fnstart)
197UNWIND(.pad
198UNWIND(.save {lr})
199Ldiv0_64:
200 @ Division by 0:
201 str lr, [sp,
202 bl __div0
203
204 @ as wrong as it could be...
205 mov yl,
206 mov yh,
207 mov xh,
208 ldr pc, [sp],
209
210UNWIND(.fnend)
211ENDPROC(__do_div64)
212