# include "misc.h"
int
-b64_add(e1,e2)
- register struct _mantissa *e1,*e2;
+flt_b64_add(e1,e2)
+ register struct flt_mantissa *e1,*e2;
{
int overflow;
int carry;
#include "misc.h"
-b64_sft(e,n)
- register struct _mantissa *e;
+flt_b64_sft(e,n)
+ register struct flt_mantissa *e;
register int n;
{
if (n > 0) {
}
}
-b64_lsft(e)
- register struct _mantissa *e;
+flt_b64_lsft(e)
+ register struct flt_mantissa *e;
{
/* shift left 1 bit */
e->flt_h_32 = (e->flt_h_32 << 1) & 0xFFFFFFFF;
e->flt_l_32 = (e->flt_l_32 << 1) & 0xFFFFFFFF;
}
-b64_rsft(e)
- register struct _mantissa *e;
+flt_b64_rsft(e)
+ register struct flt_mantissa *e;
{
/* shift right 1 bit */
e->flt_l_32 = (e->flt_l_32 >> 1) & 0x7FFFFFFF;
/* Add two extended numbers e1 and e2, and put the result
in e3
*/
- flt_arith ce2;
+ flt_arith ce1, ce2;
int diff;
flt_status = 0;
return;
}
ce2 = *e2;
- *e3 = *e1;
- e1 = &ce2;
+ ce1 = *e1;
+ e1 = &ce1;
+ e2 = &ce2;
/* adjust mantissas to equal power */
- diff = e3->flt_exp - e1->flt_exp;
+ diff = e2->flt_exp - e1->flt_exp;
if (diff < 0) {
diff = -diff;
- e3->flt_exp += diff;
- b64_sft(&(e3->flt_mantissa), diff);
+ e2->flt_exp += diff;
+ flt_b64_sft(&(e2->flt_mantissa), diff);
}
else if (diff > 0) {
e1->flt_exp += diff;
- b64_sft(&(e1->flt_mantissa), diff);
+ flt_b64_sft(&(e1->flt_mantissa), diff);
}
- if (e1->flt_sign != e3->flt_sign) {
- /* e3 + e1 = e3 - (-e1) */
- int tmp = ucmp(e1->m1, e3->m1);
- int tmp2 = ucmp(e1->m2, e3->m2);
+ if (e1->flt_sign != e2->flt_sign) {
+ /* e2 + e1 = e2 - (-e1) */
+ int tmp = ucmp(e1->m1, e2->m1);
+ int tmp2 = ucmp(e1->m2, e2->m2);
if (tmp > 0 || (tmp == 0 && tmp2 > 0)) {
- /* abs(e1) > abs(e3) */
+ /* abs(e1) > abs(e2) */
if (tmp2 < 0) {
e1->m1 -= 1; /* carry in */
}
- e1->m1 -= e3->m1;
- e1->m2 -= e3->m2;
+ e1->m1 -= e2->m1;
+ e1->m2 -= e2->m2;
*e3 = *e1;
}
else {
if (tmp2 > 0)
- e3->m1 -= 1; /* carry in */
- e3->m1 -= e1->m1;
- e3->m2 -= e1->m2;
+ e2->m1 -= 1; /* carry in */
+ e2->m1 -= e1->m1;
+ e2->m2 -= e1->m2;
+ *e3 = *e2;
}
}
else {
- if (b64_add(&e3->flt_mantissa,&e1->flt_mantissa)) {/* addition carry */
- b64_sft(&e3->flt_mantissa,1);/* shift mantissa one bit RIGHT */
+ *e3 = *e2;
+ if (flt_b64_add(&e3->flt_mantissa,&e1->flt_mantissa)) {/* addition carry */
+ flt_b64_rsft(&e3->flt_mantissa);
e3->m1 |= 0x80000000L; /* set max bit */
e3->flt_exp++; /* increase the exponent */
}
flt_sub(e1,e2,e3)
flt_arith *e1,*e2,*e3;
{
- flt_arith ce2;
-
- ce2 = *e2;
- ce2.flt_sign = ! ce2.flt_sign;
- flt_add(e1,&ce2,e3);
+ e2->flt_sign = ! e2->flt_sign;
+ flt_add(e1,e2,e3);
+ e2->flt_sign = ! e2->flt_sign;
}
e->flt_exp++;
}
for (i = 64; i > 0 && n != 0; i--) {
- b64_rsft(&(e->flt_mantissa));
+ flt_b64_rsft(&(e->flt_mantissa));
e->m1 |= (n & 1) << 31;
n >>= 1;
}
if (i > 0) {
- b64_sft(&(e->flt_mantissa), i);
+ flt_b64_sft(&(e->flt_mantissa), i);
}
flt_status = 0;
flt_nrm(e);
.nf
.B #include <flt_arith.h>
.PP
+.ta 5m 30m
+struct flt_mantissa {
+ long flt_h_32; /* high order 32 bits of mantissa */
+ long flt_l_32; /* low order 32 bits of mantissa */
+};
+
+typedef struct {
+ short flt_sign; /* 0 for positive, 1 for negative */
+ short flt_exp; /* between -16384 and 16384 */
+ struct flt_mantissa flt_mantissa; /* normalized, in [1,2). */
+} flt_arith;
+
+extern int flt_status;
+#define FLT_OVFL 001
+#define FLT_UNFL 002
+#define FLT_DIV0 004
+#define FLT_NOFLT 010
+#define FLT_BTSM 020
+.PP
.B flt_add(e1, e2, e3)
.B flt_arith *e1, *e2, *e3;
.PP
.PP
.B arith flt_flt2arith(e)
.B flt_arith *e;
+.PP
+.B flt_b64_sft(m, n)
+.B struct flt_mantissa *m;
+.B int n;
+.PP
+.B flt_b64_rsft(m)
+.B struct flt_mantissa *m;
+.PP
+.B flt_b64_lsft(m)
+.B struct flt_mantissa *m;
.SH DESCRIPTION
This set of routines emulates floating point arithmetic, in a high
precision. It is intended primarily for compilers that need to evaluate
indicates that the buffer is too small. The contents of the buffer is
undefined. This can only occur with the routine
.IR flt_flt2str .
+.PP
+The routine
+.I flt_b64_sft
+shifts the mantissa
+.I m
+.I |n|
+bits left or right, depending on the sign of
+.IR n .
+If
+.I n
+is negative, it is a left-shift; If
+.I n
+is positive, it is a right shift.
+.PP
+The routine
+.I flt_b64_rsft
+shifts the mantissa
+.I m
+1 bit right.
+.PP
+The routine
+.I flt_b64_lsft
+shifts the mantissa
+.I m
+1 bit left.
.SH FILES
~em/modules/h/flt_arith.h
.br
#ifndef __FLT_INCLUDED__
#define __FLT_INCLUDED__
-struct _mantissa {
- long flt_h_32;
- long flt_l_32;
+struct flt_mantissa {
+ long flt_h_32; /* high order 32 bits of mantissa */
+ long flt_l_32; /* low order 32 bits of mantissa */
};
-struct _EXTEND {
- short flt_sign;
- short flt_exp;
- struct _mantissa flt_mantissa;
-};
-
-typedef struct _EXTEND flt_arith;
+typedef struct {
+ short flt_sign; /* 0 for positive, 1 for negative */
+ short flt_exp; /* between -16384 and 16384 */
+ struct flt_mantissa flt_mantissa; /* normalized, in [1,2). */
+} flt_arith;
extern int flt_status;
#define FLT_OVFL 001
flt_div(e1,e2,e3)
register flt_arith *e1,*e2,*e3;
{
- short error = 0;
long result[2];
register long *lp;
unsigned short u[9], v[5];
e3->flt_exp = e1->flt_exp - e2->flt_exp + 2;
u[4] = (e1->m2 & 1) << 15;
- b64_rsft(&(e1->flt_mantissa));
+ flt_b64_rsft(&(e1->flt_mantissa));
u[0] = (e1->m1 >> 16) & 0xFFFF;
u[1] = e1->m1 & 0xFFFF;
u[2] = (e1->m2 >> 16) & 0xFFFF;
}
}
temp -= q_est * v[1];
- while (!(temp&0xFFFF0000) && ucmp(v[2]*q_est,((temp<<16)+u_p[2])) > 0) {
+ while (!(temp&0xFFFF0000) &&
+ ucmp((long)(v[2]*q_est),(long)((temp<<16)+u_p[2])) > 0) {
q_est--;
temp += v[1];
}
/* Convert the flt_arith "n" to an arith.
*/
arith n;
- struct _mantissa a;
+ struct flt_mantissa a;
flt_status = 0;
if (e->flt_exp < 0) {
}
a = e->flt_mantissa;
- b64_sft(&a, 63-e->flt_exp);
+ flt_b64_sft(&a, 63-e->flt_exp);
n = a.flt_l_32 | ((a.flt_h_32 << 16) << 16);
/* not << 32; this could be an undefined operation */
return n;
}
*ipart = *e;
/* "loose" low order bits */
- b64_sft(&(ipart->flt_mantissa), 63 - e->flt_exp);
- b64_sft(&(ipart->flt_mantissa), e->flt_exp - 63);
+ flt_b64_sft(&(ipart->flt_mantissa), 63 - e->flt_exp);
+ flt_b64_sft(&(ipart->flt_mantissa), e->flt_exp - 63);
flt_sub(e, ipart, fpart);
}
e3->m1 = ((long)result[0] << 16) + result[1];
e3->m2 = ((long)result[2] << 16) + result[3];
if (result[4] & 0x8000) {
- if (++e3->m2 == 0) {
- if (++e3->m1 == 0) {
+ if (++e3->m2 == 0 || (e3->m2 & ~ 0xFFFFFFFF)) {
+ e3->m2 = 0;
+ if (++e3->m1 == 0 || (e3->m1 & ~ 0xFFFFFFFF)) {
e3->m1 = 0x80000000;
e3->flt_exp++;
}
cnt--;
}
e->flt_exp += cnt;
- b64_sft(&(e->flt_mantissa), cnt);
+ flt_b64_sft(&(e->flt_mantissa), cnt);
}
}
/* $Header$ */
+#include <ctype.h>
#include "misc.h"
/* The following tables can be computed with the following bc(1)
{ 0, -1768, 0xD4EEC394, 0xD6258BF8 }
};
+#define BIGSZ (sizeof(big_10pow)/sizeof(big_10pow[0]))
+#define SMALLSZ (sizeof(s10pow)/sizeof(s10pow[0]))
+
static
add_exponent(e, exp)
register flt_arith *e;
flt_arith x;
if (neg) exp = -exp;
- divsz = exp / (sizeof(s10pow)/sizeof(s10pow[0]));
- modsz = exp % (sizeof(s10pow)/sizeof(s10pow[0]));
+ divsz = exp / SMALLSZ;
+ modsz = exp % SMALLSZ;
if (neg) {
flt_mul(e, &r_10pow[modsz], &x);
- while (divsz >= sizeof(r_big_10pow)/sizeof(r_big_10pow[0])) {
- flt_mul(&x, &r_big_10pow[sizeof(r_big_10pow)/sizeof(r_big_10pow[0])-1],&x);
- divsz -= sizeof(r_big_10pow)/sizeof(r_big_10pow[0])-1;
+ while (divsz >= BIGSZ) {
+ flt_mul(&x, &r_big_10pow[BIGSZ-1],&x);
+ divsz -= BIGSZ-1;
flt_chk(e);
if (flt_status != 0) return;
}
}
else {
flt_mul(e, &s10pow[modsz], &x);
- while (divsz >= sizeof(big_10pow)/sizeof(big_10pow[0])) {
- flt_mul(&x, &big_10pow[sizeof(big_10pow)/sizeof(big_10pow[0])-1],&x);
- divsz -= sizeof(big_10pow)/sizeof(big_10pow[0])-1;
+ while (divsz >= BIGSZ) {
+ flt_mul(&x, &big_10pow[BIGSZ-1],&x);
+ divsz -= BIGSZ-1;
flt_chk(e);
if (flt_status != 0) return;
}
if (c == '.') continue;
digitseen = 1;
if (e->m1 <= 0x7FFFFFFF/5) {
- struct _mantissa a1;
+ struct flt_mantissa a1;
a1 = e->flt_mantissa;
- b64_sft(&(e->flt_mantissa), -3);
- b64_sft(&a1, -1);
- b64_add(&(e->flt_mantissa), &a1);
+ flt_b64_sft(&(e->flt_mantissa), -3);
+ flt_b64_lsft(&a1);
+ flt_b64_add(&(e->flt_mantissa), &a1);
a1.flt_h_32 = 0;
a1.flt_l_32 = c - '0';
- b64_add(&(e->flt_mantissa), &a1);
+ flt_b64_add(&(e->flt_mantissa), &a1);
}
else exp++;
if (dotseen) exp--;
*decpt = 0;
if (e->m1 != 0) {
-#define BIGSZ (sizeof(big_10pow)/sizeof(big_10pow[0]))
-#define SMALLSZ (sizeof(s10pow)/sizeof(s10pow[0]))
register flt_arith *pp = &big_10pow[1];
while (flt_cmp(e, &big_10pow[BIGSZ-1]) >= 0) {
flt_mul(e,&r_big_10pow[pp-big_10pow],e);
*decpt += (pp - big_10pow)*SMALLSZ;
pp = &s10pow[1];
- while (pp < &s10pow[SMALLSZ] && cmp_ext(e, pp) >= 0) pp++;
+ while (pp < &s10pow[SMALLSZ] && flt_cmp(e, pp) >= 0) pp++;
pp--;
flt_mul(e, &r_10pow[pp-s10pow], e);
*decpt += pp - s10pow;
- if (cmp_ext(e, &s10pow[0]) < 0) {
+ if (flt_cmp(e, &s10pow[0]) < 0) {
while (flt_cmp(e, &r_big_10pow[BIGSZ-1]) < 0) {
flt_mul(e,&big_10pow[BIGSZ-1],e);
*decpt -= (BIGSZ-1)*SMALLSZ;
}
pp = &r_big_10pow[1];
- while(cmp_ext(e,pp) < 0) pp++;
+ while(flt_cmp(e,pp) < 0) pp++;
pp--;
flt_mul(e,&big_10pow[pp-r_big_10pow],e);
*decpt -= (pp - r_big_10pow)*SMALLSZ;
flt_mul(e, &s10pow[1], e);
(*decpt)--;
pp = &r_10pow[0];
- while(cmp_ext(e, pp) < 0) pp++;
+ while(flt_cmp(e, pp) < 0) pp++;
flt_mul(e, &s10pow[pp-r_10pow], e);
*decpt -= pp - r_10pow;
}
x.m2 = 0; x.flt_exp = e->flt_exp;
x.flt_sign = 1;
- x.m1 = e->m1>>(31-e->flt_exp);
+ x.m1 = (e->m1 >> 1) & 0x7FFFFFFF;
+ x.m1 = x.m1>>(30-e->flt_exp);
*p++ = (x.m1) + '0';
x.m1 = x.m1 << (31-e->flt_exp);
- add_ext(e, &x, e);
+ flt_add(e, &x, e);
}
else *p++ = '0';
flt_mul(e, &s10pow[1], e);
char *buf;
{
-#define NDIG 25
+#define NDIG 20
int sign, dp;
register int i;
register char *s1;
if ((e->m1 | e->m2) || e->flt_exp == EXT_MIN || e->flt_exp == EXT_MAX) {
--dp ;
}
- if (dp == 0) {
- *s = 0;
- return;
+ if (dp != 0) {
+ *s++ = 'e';
+ if ( dp<0 ) {
+ *s++ = '-' ; dp= -dp ;
+ } else {
+ *s++ = '+' ;
+ }
+ s1 = &Xbuf[NDIG+12];
+ *--s1 = '\0';
+ do {
+ *--s1 = dp % 10 + '0';
+ dp /= 10;
+ } while (dp != 0);
+ while (*s1) *s++ = *s1++;
}
- *s++ = 'e';
- if ( dp<0 ) {
- *s++ = '-' ; dp= -dp ;
- } else {
- *s++ = '+' ;
- }
- s1 = &Xbuf[NDIG+12];
- *--s1 = '\0';
- do {
- *--s1 = dp % 10 + '0';
- dp /= 10;
- } while (dp != 0);
- while (*s1) *s++ = *s1++;
*s++ = '\0';
if (s - Xbuf > bufsize) {
flt_status = FLT_BTSM;
#define ucmp flt__ucmp
#define flt_nrm flt__nrm
#define flt_chk flt__chk
-#define b64_add flt__64add
-#define b64_sft flt__64sft
-#define b64_rsft flt__64rsft
-#define b64_lsft flt__64lsft
+#define flt_b64_add flt__64add
{
if (l1 == l2) return 0;
if (l2 >= 0) {
- if (l1 > l2) return 1;
+ if (l1 > l2 || l1 < 0) return 1;
return -1;
}
if (l1 > 0 || l1 < l2) return -1;