Grow internal expr int representation to 64 bits

This is necessary to retain precision above 32 bits, but more importantly to prevent an expression like (0x80000000 | 1) from flipping the sign of the result.
This commit is contained in:
smurf3tte 2020-12-19 12:49:19 -08:00 committed by TryTwo
parent 11851a01e6
commit 3417920167

View File

@ -14,6 +14,7 @@ extern "C" {
#include <ctype.h> /* for isspace */ #include <ctype.h> /* for isspace */
#include <limits.h> #include <limits.h>
#include <math.h> /* for pow */ #include <math.h> /* for pow */
#include <stdint.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
@ -288,13 +289,13 @@ static struct expr_var *expr_get_var(struct expr_var_list *vars, const char *s,
return v; return v;
} }
static int to_int(double x) { static int64_t to_int(double x) {
if (isnan(x)) { if (isnan(x)) {
return 0; return 0;
} else if (isinf(x) != 0) { } else if (isinf(x) != 0) {
return INT_MAX * isinf(x); return INT64_MAX * isinf(x);
} else { } else {
return (int)x; return (int64_t)x;
} }
} }