In C remquol() and thus also in C remainderl(), don't clobber the sign bit

of NaNs before possible returning a NaN.

The remquo*() and remainder*() functions should now give bitwise identical
results across arches and implementations, and bitwise consistent results
(with lower precisions having truncated mantissas) across precisions.  x86
already had consistency across amd64 and i386 and precisions by using the
i387 consistently and normally not using the C versions.  Inconsistencies
for C reqmquol() were first detected on sparc64.

Remove double second clearing of the sign bit and extra blank lines.
This commit is contained in:
Bruce Evans 2018-07-24 11:50:05 +00:00
parent 33683c3d3c
commit 2011986f09
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=336665

View File

@ -79,7 +79,6 @@ remquol(long double x, long double y, int *quo)
sxy = sx ^ uy.bits.sign;
ux.bits.sign = 0; /* |x| */
uy.bits.sign = 0; /* |y| */
x = ux.e;
/* purge off exception values */
if((uy.bits.exp|uy.bits.manh|uy.bits.manl)==0 || /* y=0 */
@ -126,7 +125,6 @@ remquol(long double x, long double y, int *quo)
/* fix point fmod */
n = ix - iy;
q = 0;
while(n--) {
hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
if(hz<0){hx = hx+hx+(lx>>MANL_SHIFT); lx = lx+lx;}
@ -154,9 +152,8 @@ remquol(long double x, long double y, int *quo)
} else {
ux.bits.exp = iy + BIAS;
}
ux.bits.sign = 0;
x = ux.e;
fixup:
x = ux.e; /* |x| */
y = fabsl(y);
if (y < LDBL_MIN * 2) {
if (x+x>y || (x+x==y && (q & 1))) {
@ -167,11 +164,9 @@ remquol(long double x, long double y, int *quo)
q++;
x-=y;
}
ux.e = x;
ux.bits.sign ^= sx;
x = ux.e;
q &= 0x7fffffff;
*quo = (sxy ? -q : q);
return x;