1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
|
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _LIBKERN_QUAD_H_
#define _LIBKERN_QUAD_H_
/*
* Quad arithmetic.
*
* This library makes the following assumptions:
*
* - The type long long (aka quad_t) exists.
*
* - A quad variable is exactly twice as long as `long'.
*
* - The machine's arithmetic is two's complement.
*
* This library can provide 128-bit arithmetic on a machine with 128-bit
* quads and 64-bit longs, for instance, or 96-bit arithmetic on machines
* with 48-bit longs.
*/
/*
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/limits.h>
#include <sys/syslimits.h>
*/
#include <limits.h>
typedef long long quad_t;
typedef unsigned long long u_quad_t;
typedef unsigned long u_long;
#define CHAR_BIT __CHAR_BIT__
/*
* Define the order of 32-bit words in 64-bit words.
* For little endian only.
*/
#define _QUAD_HIGHWORD 1
#define _QUAD_LOWWORD 0
/*
* Depending on the desired operation, we view a `long long' (aka quad_t) in
* one or more of the following formats.
*/
union uu {
quad_t q; /* as a (signed) quad */
quad_t uq; /* as an unsigned quad */
long sl[2]; /* as two signed longs */
u_long ul[2]; /* as two unsigned longs */
};
/*
* Define high and low longwords.
*/
#define H _QUAD_HIGHWORD
#define L _QUAD_LOWWORD
/*
* Total number of bits in a quad_t and in the pieces that make it up.
* These are used for shifting, and also below for halfword extraction
* and assembly.
*/
#define QUAD_BITS (sizeof(quad_t) * CHAR_BIT)
#define LONG_BITS (sizeof(long) * CHAR_BIT)
#define HALF_BITS (sizeof(long) * CHAR_BIT / 2)
/*
* Extract high and low shortwords from longword, and move low shortword of
* longword to upper half of long, i.e., produce the upper longword of
* ((quad_t)(x) << (number_of_bits_in_long/2)). (`x' must actually be u_long.)
*
* These are used in the multiply code, to split a longword into upper
* and lower halves, and to reassemble a product as a quad_t, shifted left
* (sizeof(long)*CHAR_BIT/2).
*/
#define HHALF(x) ((x) >> HALF_BITS)
#define LHALF(x) ((x) & ((1 << HALF_BITS) - 1))
#define LHUP(x) ((x) << HALF_BITS)
typedef unsigned int qshift_t;
quad_t __ashldi3(quad_t, qshift_t);
quad_t __ashrdi3(quad_t, qshift_t);
int __cmpdi2(quad_t a, quad_t b);
quad_t __divdi3(quad_t a, quad_t b);
quad_t __lshrdi3(quad_t, qshift_t);
quad_t __moddi3(quad_t a, quad_t b);
u_quad_t __qdivrem(u_quad_t u, u_quad_t v, u_quad_t *rem);
u_quad_t __udivdi3(u_quad_t a, u_quad_t b);
u_quad_t __umoddi3(u_quad_t a, u_quad_t b);
int __ucmpdi2(u_quad_t a, u_quad_t b);
quad_t __divmoddi4(quad_t a, quad_t b, quad_t *rem);
u_quad_t __udivmoddi4(u_quad_t a, u_quad_t b, u_quad_t *rem);
#endif /* !_LIBKERN_QUAD_H_ */
#if defined (_X86_) && !defined (__x86_64__)
/*
* Shift a (signed) quad value left (arithmetic shift left).
* This is the same as logical shift left!
*/
quad_t
__ashldi3(a, shift)
quad_t a;
qshift_t shift;
{
union uu aa;
aa.q = a;
if (shift >= LONG_BITS) {
aa.ul[H] = shift >= QUAD_BITS ? 0 :
aa.ul[L] << (shift - LONG_BITS);
aa.ul[L] = 0;
} else if (shift > 0) {
aa.ul[H] = (aa.ul[H] << shift) |
(aa.ul[L] >> (LONG_BITS - shift));
aa.ul[L] <<= shift;
}
return (aa.q);
}
/*
* Shift a (signed) quad value right (arithmetic shift right).
*/
quad_t
__ashrdi3(a, shift)
quad_t a;
qshift_t shift;
{
union uu aa;
aa.q = a;
if (shift >= LONG_BITS) {
long s;
/*
* Smear bits rightward using the machine's right-shift
* method, whether that is sign extension or zero fill,
* to get the `sign word' s. Note that shifting by
* LONG_BITS is undefined, so we shift (LONG_BITS-1),
* then 1 more, to get our answer.
*/
s = (aa.sl[H] >> (LONG_BITS - 1)) >> 1;
aa.ul[L] = shift >= QUAD_BITS ? s :
aa.sl[H] >> (shift - LONG_BITS);
aa.ul[H] = s;
} else if (shift > 0) {
aa.ul[L] = (aa.ul[L] >> shift) |
(aa.ul[H] << (LONG_BITS - shift));
aa.sl[H] >>= shift;
}
return (aa.q);
}
/*
* Return 0, 1, or 2 as a <, =, > b respectively.
* Both a and b are considered signed---which means only the high word is
* signed.
*/
int
__cmpdi2(a, b)
quad_t a, b;
{
union uu aa, bb;
aa.q = a;
bb.q = b;
return (aa.sl[H] < bb.sl[H] ? 0 : aa.sl[H] > bb.sl[H] ? 2 :
aa.ul[L] < bb.ul[L] ? 0 : aa.ul[L] > bb.ul[L] ? 2 : 1);
}
/*
* Divide two signed quads.
* ??? if -1/2 should produce -1 on this machine, this code is wrong
*/
quad_t
__divdi3(a, b)
quad_t a, b;
{
u_quad_t ua, ub, uq;
int neg;
if (a < 0)
ua = -(u_quad_t)a, neg = 1;
else
ua = a, neg = 0;
if (b < 0)
ub = -(u_quad_t)b, neg ^= 1;
else
ub = b;
uq = __qdivrem(ua, ub, (u_quad_t *)0);
return (neg ? -uq : uq);
}
/*
* Shift an (unsigned) quad value right (logical shift right).
*/
quad_t
__lshrdi3(a, shift)
quad_t a;
qshift_t shift;
{
union uu aa;
aa.q = a;
if (shift >= LONG_BITS) {
aa.ul[L] = shift >= QUAD_BITS ? 0 :
aa.ul[H] >> (shift - LONG_BITS);
aa.ul[H] = 0;
} else if (shift > 0) {
aa.ul[L] = (aa.ul[L] >> shift) |
(aa.ul[H] << (LONG_BITS - shift));
aa.ul[H] >>= shift;
}
return (aa.q);
}
/*
* Return remainder after dividing two signed quads.
*
* XXX
* If -1/2 should produce -1 on this machine, this code is wrong.
*/
quad_t
__moddi3(a, b)
quad_t a, b;
{
u_quad_t ua, ub, ur;
int neg;
if (a < 0)
ua = -(u_quad_t)a, neg = 1;
else
ua = a, neg = 0;
if (b < 0)
ub = -(u_quad_t)b;
else
ub = b;
(void)__qdivrem(ua, ub, &ur);
return (neg ? -ur : ur);
}
/*
* Multiprecision divide. This algorithm is from Knuth vol. 2 (2nd ed),
* section 4.3.1, pp. 257--259.
*/
#define B (1 << HALF_BITS) /* digit base */
/* Combine two `digits' to make a single two-digit number. */
#define COMBINE(a, b) (((u_long)(a) << HALF_BITS) | (b))
/* select a type for digits in base B: use unsigned short if they fit */
#if ULONG_MAX == 0xffffffff && USHRT_MAX >= 0xffff
typedef unsigned short digit;
#else
typedef u_long digit;
#endif
/*
* Shift p[0]..p[len] left `sh' bits, ignoring any bits that
* `fall out' the left (there never will be any such anyway).
* We may assume len >= 0. NOTE THAT THIS WRITES len+1 DIGITS.
*/
static void
__shl(register digit *p, register int len, register int sh)
{
register int i;
for (i = 0; i < len; i++)
p[i] = LHALF(p[i] << sh) | (p[i + 1] >> (HALF_BITS - sh));
p[i] = LHALF(p[i] << sh);
}
/*
* __qdivrem(u, v, rem) returns u/v and, optionally, sets *rem to u%v.
*
* We do this in base 2-sup-HALF_BITS, so that all intermediate products
* fit within u_long. As a consequence, the maximum length dividend and
* divisor are 4 `digits' in this base (they are shorter if they have
* leading zeros).
*/
u_quad_t
__qdivrem(uq, vq, arq)
u_quad_t uq, vq, *arq;
{
union uu tmp;
digit *u, *v, *q;
register digit v1, v2;
u_long qhat, rhat, t;
int m, n, d, j, i;
digit uspace[5], vspace[5], qspace[5];
/*
* Take care of special cases: divide by zero, and u < v.
*/
if (vq == 0) {
/* divide by zero. */
static volatile const unsigned int zero = 0;
tmp.ul[H] = tmp.ul[L] = 1 / zero;
if (arq)
*arq = uq;
return (tmp.q);
}
if (uq < vq) {
if (arq)
*arq = uq;
return (0);
}
u = &uspace[0];
v = &vspace[0];
q = &qspace[0];
/*
* Break dividend and divisor into digits in base B, then
* count leading zeros to determine m and n. When done, we
* will have:
* u = (u[1]u[2]...u[m+n]) sub B
* v = (v[1]v[2]...v[n]) sub B
* v[1] != 0
* 1 < n <= 4 (if n = 1, we use a different division algorithm)
* m >= 0 (otherwise u < v, which we already checked)
* m + n = 4
* and thus
* m = 4 - n <= 2
*/
tmp.uq = uq;
u[0] = 0;
u[1] = HHALF(tmp.ul[H]);
u[2] = LHALF(tmp.ul[H]);
u[3] = HHALF(tmp.ul[L]);
u[4] = LHALF(tmp.ul[L]);
tmp.uq = vq;
v[1] = HHALF(tmp.ul[H]);
v[2] = LHALF(tmp.ul[H]);
v[3] = HHALF(tmp.ul[L]);
v[4] = LHALF(tmp.ul[L]);
for (n = 4; v[1] == 0; v++) {
if (--n == 1) {
u_long rbj; /* r*B+u[j] (not root boy jim) */
digit q1, q2, q3, q4;
/*
* Change of plan, per exercise 16.
* r = 0;
* for j = 1..4:
* q[j] = floor((r*B + u[j]) / v),
* r = (r*B + u[j]) % v;
* We unroll this completely here.
*/
t = v[2]; /* nonzero, by definition */
q1 = u[1] / t;
rbj = COMBINE(u[1] % t, u[2]);
q2 = rbj / t;
rbj = COMBINE(rbj % t, u[3]);
q3 = rbj / t;
rbj = COMBINE(rbj % t, u[4]);
q4 = rbj / t;
if (arq)
*arq = rbj % t;
tmp.ul[H] = COMBINE(q1, q2);
tmp.ul[L] = COMBINE(q3, q4);
return (tmp.q);
}
}
/*
* By adjusting q once we determine m, we can guarantee that
* there is a complete four-digit quotient at &qspace[1] when
* we finally stop.
*/
for (m = 4 - n; u[1] == 0; u++)
m--;
for (i = 4 - m; --i >= 0;)
q[i] = 0;
q += 4 - m;
/*
* Here we run Program D, translated from MIX to C and acquiring
* a few minor changes.
*
* D1: choose multiplier 1 << d to ensure v[1] >= B/2.
*/
d = 0;
for (t = v[1]; t < B / 2; t <<= 1)
d++;
if (d > 0) {
__shl(&u[0], m + n, d); /* u <<= d */
__shl(&v[1], n - 1, d); /* v <<= d */
}
/*
* D2: j = 0.
*/
j = 0;
v1 = v[1]; /* for D3 -- note that v[1..n] are constant */
v2 = v[2]; /* for D3 */
do {
register digit uj0, uj1, uj2;
/*
* D3: Calculate qhat (\^q, in TeX notation).
* Let qhat = min((u[j]*B + u[j+1])/v[1], B-1), and
* let rhat = (u[j]*B + u[j+1]) mod v[1].
* While rhat < B and v[2]*qhat > rhat*B+u[j+2],
* decrement qhat and increase rhat correspondingly.
* Note that if rhat >= B, v[2]*qhat < rhat*B.
*/
uj0 = u[j + 0]; /* for D3 only -- note that u[j+...] change */
uj1 = u[j + 1]; /* for D3 only */
uj2 = u[j + 2]; /* for D3 only */
if (uj0 == v1) {
qhat = B;
rhat = uj1;
goto qhat_too_big;
} else {
u_long nn = COMBINE(uj0, uj1);
qhat = nn / v1;
rhat = nn % v1;
}
while (v2 * qhat > COMBINE(rhat, uj2)) {
qhat_too_big:
qhat--;
if ((rhat += v1) >= B)
break;
}
/*
* D4: Multiply and subtract.
* The variable `t' holds any borrows across the loop.
* We split this up so that we do not require v[0] = 0,
* and to eliminate a final special case.
*/
for (t = 0, i = n; i > 0; i--) {
t = u[i + j] - v[i] * qhat - t;
u[i + j] = LHALF(t);
t = (B - HHALF(t)) & (B - 1);
}
t = u[j] - t;
u[j] = LHALF(t);
/*
* D5: test remainder.
* There is a borrow if and only if HHALF(t) is nonzero;
* in that (rare) case, qhat was too large (by exactly 1).
* Fix it by adding v[1..n] to u[j..j+n].
*/
if (HHALF(t)) {
qhat--;
for (t = 0, i = n; i > 0; i--) { /* D6: add back. */
t += u[i + j] + v[i];
u[i + j] = LHALF(t);
t = HHALF(t);
}
u[j] = LHALF(u[j] + t);
}
q[j] = qhat;
} while (++j <= m); /* D7: loop on j. */
/*
* If caller wants the remainder, we have to calculate it as
* u[m..m+n] >> d (this is at most n digits and thus fits in
* u[m+1..m+n], but we may need more source digits).
*/
if (arq) {
if (d) {
for (i = m + n; i > m; --i)
u[i] = (u[i] >> d) |
LHALF(u[i - 1] << (HALF_BITS - d));
u[i] = 0;
}
tmp.ul[H] = COMBINE(uspace[1], uspace[2]);
tmp.ul[L] = COMBINE(uspace[3], uspace[4]);
*arq = tmp.q;
}
tmp.ul[H] = COMBINE(qspace[1], qspace[2]);
tmp.ul[L] = COMBINE(qspace[3], qspace[4]);
return (tmp.q);
}
/*
* Return 0, 1, or 2 as a <, =, > b respectively.
* Neither a nor b are considered signed.
*/
int
__ucmpdi2(a, b)
u_quad_t a, b;
{
union uu aa, bb;
aa.uq = a;
bb.uq = b;
return (aa.ul[H] < bb.ul[H] ? 0 : aa.ul[H] > bb.ul[H] ? 2 :
aa.ul[L] < bb.ul[L] ? 0 : aa.ul[L] > bb.ul[L] ? 2 : 1);
}
/*
* Divide two unsigned quads.
*/
u_quad_t
__udivdi3(a, b)
u_quad_t a, b;
{
return (__qdivrem(a, b, (u_quad_t *)0));
}
/*
* Return remainder after dividing two unsigned quads.
*/
u_quad_t
__umoddi3(a, b)
u_quad_t a, b;
{
u_quad_t r;
(void)__qdivrem(a, b, &r);
return (r);
}
/*
* Divide two signed quads.
* This function is new in GCC 7.
*/
quad_t
__divmoddi4(a, b, rem)
quad_t a, b, *rem;
{
u_quad_t ua, ub, uq, ur;
int negq, negr;
if (a < 0)
ua = -(u_quad_t)a, negq = 1, negr = 1;
else
ua = a, negq = 0, negr = 0;
if (b < 0)
ub = -(u_quad_t)b, negq ^= 1;
else
ub = b;
uq = __qdivrem(ua, ub, &ur);
if (rem)
*rem = (negr ? -ur : ur);
return (negq ? -uq : uq);
}
u_quad_t
__udivmoddi4(u_quad_t a, u_quad_t b, u_quad_t *rem)
{
return __qdivrem(a, b, rem);
}
#else
static int __attribute__((unused)) dummy;
#endif /*deined (_X86_) && !defined (__x86_64__)*/
|