diff --git a/libm/Android.mk b/libm/Android.mk index 0f63d7b91..d4cd7c3e8 100644 --- a/libm/Android.mk +++ b/libm/Android.mk @@ -18,6 +18,8 @@ libm_common_src_files += \ upstream-freebsd/lib/msun/bsdsrc/b_exp.c \ upstream-freebsd/lib/msun/bsdsrc/b_log.c \ upstream-freebsd/lib/msun/bsdsrc/b_tgamma.c \ + upstream-freebsd/lib/msun/src/catrig.c \ + upstream-freebsd/lib/msun/src/catrigf.c \ upstream-freebsd/lib/msun/src/e_acos.c \ upstream-freebsd/lib/msun/src/e_acosf.c \ upstream-freebsd/lib/msun/src/e_acosh.c \ @@ -84,6 +86,7 @@ libm_common_src_files += \ upstream-freebsd/lib/msun/src/s_atanf.c \ upstream-freebsd/lib/msun/src/s_carg.c \ upstream-freebsd/lib/msun/src/s_cargf.c \ + upstream-freebsd/lib/msun/src/s_cargl.c \ upstream-freebsd/lib/msun/src/s_cbrt.c \ upstream-freebsd/lib/msun/src/s_cbrtf.c \ upstream-freebsd/lib/msun/src/s_ccosh.c \ @@ -94,20 +97,25 @@ libm_common_src_files += \ upstream-freebsd/lib/msun/src/s_cexpf.c \ upstream-freebsd/lib/msun/src/s_cimag.c \ upstream-freebsd/lib/msun/src/s_cimagf.c \ + upstream-freebsd/lib/msun/src/s_cimagl.c \ upstream-freebsd/lib/msun/src/s_conj.c \ upstream-freebsd/lib/msun/src/s_conjf.c \ + upstream-freebsd/lib/msun/src/s_conjl.c \ upstream-freebsd/lib/msun/src/s_copysign.c \ upstream-freebsd/lib/msun/src/s_copysignf.c \ upstream-freebsd/lib/msun/src/s_cos.c \ upstream-freebsd/lib/msun/src/s_cosf.c \ upstream-freebsd/lib/msun/src/s_cproj.c \ upstream-freebsd/lib/msun/src/s_cprojf.c \ + upstream-freebsd/lib/msun/src/s_cprojl.c \ upstream-freebsd/lib/msun/src/s_creal.c \ upstream-freebsd/lib/msun/src/s_crealf.c \ + upstream-freebsd/lib/msun/src/s_creall.c \ upstream-freebsd/lib/msun/src/s_csinh.c \ upstream-freebsd/lib/msun/src/s_csinhf.c \ upstream-freebsd/lib/msun/src/s_csqrt.c \ upstream-freebsd/lib/msun/src/s_csqrtf.c \ + upstream-freebsd/lib/msun/src/s_csqrtl.c \ upstream-freebsd/lib/msun/src/s_ctanh.c \ upstream-freebsd/lib/msun/src/s_ctanhf.c \ upstream-freebsd/lib/msun/src/s_erf.c \ @@ -174,6 +182,7 @@ libm_common_src_files += \ upstream-freebsd/lib/msun/src/s_truncf.c \ upstream-freebsd/lib/msun/src/w_cabs.c \ upstream-freebsd/lib/msun/src/w_cabsf.c \ + upstream-freebsd/lib/msun/src/w_cabsl.c \ upstream-freebsd/lib/msun/src/w_drem.c \ upstream-freebsd/lib/msun/src/w_dremf.c \ @@ -181,7 +190,7 @@ libm_common_src_files += \ fake_long_double.c \ signbit.c \ -libm_ld_src_files = \ +libm_ld128_src_files = \ upstream-freebsd/lib/msun/src/e_acosl.c \ upstream-freebsd/lib/msun/src/e_acoshl.c \ upstream-freebsd/lib/msun/src/e_asinl.c \ @@ -225,7 +234,7 @@ libm_ld_src_files = \ upstream-freebsd/lib/msun/src/s_tanl.c \ upstream-freebsd/lib/msun/src/s_truncl.c \ -libm_ld_src_files += \ +libm_ld128_src_files += \ upstream-freebsd/lib/msun/ld128/invtrig.c \ upstream-freebsd/lib/msun/ld128/e_lgammal_r.c \ upstream-freebsd/lib/msun/ld128/k_cosl.c \ @@ -282,18 +291,18 @@ LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/arm LOCAL_SRC_FILES_arm := arm/fenv.c LOCAL_C_INCLUDES_arm64 := $(libm_ld_includes) -LOCAL_SRC_FILES_arm64 := arm64/fenv.c $(libm_ld_src_files) +LOCAL_SRC_FILES_arm64 := arm64/fenv.c $(libm_ld128_src_files) LOCAL_C_INCLUDES_x86 := $(LOCAL_PATH)/i387 LOCAL_SRC_FILES_x86 := i387/fenv.c LOCAL_C_INCLUDES_x86_64 := $(libm_ld_includes) -LOCAL_SRC_FILES_x86_64 := amd64/fenv.c $(libm_ld_src_files) +LOCAL_SRC_FILES_x86_64 := amd64/fenv.c $(libm_ld128_src_files) LOCAL_SRC_FILES_mips := mips/fenv.c LOCAL_C_INCLUDES_mips64 := $(libm_ld_includes) -LOCAL_SRC_FILES_mips64 := mips/fenv.c $(libm_ld_src_files) +LOCAL_SRC_FILES_mips64 := mips/fenv.c $(libm_ld128_src_files) LOCAL_CXX_STL := none include $(BUILD_STATIC_LIBRARY) diff --git a/libm/include/complex.h b/libm/include/complex.h index 0702541c4..ff6b166f4 100644 --- a/libm/include/complex.h +++ b/libm/include/complex.h @@ -46,14 +46,39 @@ _Static_assert(__generic(_Complex_I, float _Complex, 1, 0), #define complex _Complex #define I _Complex_I +#if __ISO_C_VISIBLE >= 2011 +#ifdef __clang__ +#define CMPLX(x, y) ((double complex){ x, y }) +#define CMPLXF(x, y) ((float complex){ x, y }) +#define CMPLXL(x, y) ((long double complex){ x, y }) +#elif __GNUC_PREREQ__(4, 7) +#define CMPLX(x, y) __builtin_complex((double)(x), (double)(y)) +#define CMPLXF(x, y) __builtin_complex((float)(x), (float)(y)) +#define CMPLXL(x, y) __builtin_complex((long double)(x), (long double)(y)) +#endif +#endif /* __ISO_C_VISIBLE >= 2011 */ + __BEGIN_DECLS +#pragma GCC visibility push(default) double cabs(double complex); float cabsf(float complex); long double cabsl(long double complex); +double complex cacos(double complex); +float complex cacosf(float complex); +double complex cacosh(double complex); +float complex cacoshf(float complex); double carg(double complex); float cargf(float complex); long double cargl(long double complex); +double complex casin(double complex); +float complex casinf(float complex); +double complex casinh(double complex); +float complex casinhf(float complex); +double complex catan(double complex); +float complex catanf(float complex); +double complex catanh(double complex); +float complex catanhf(float complex); double complex ccos(double complex); float complex ccosf(float complex); double complex ccosh(double complex); @@ -87,6 +112,7 @@ float complex ctanf(float complex); double complex ctanh(double complex); float complex ctanhf(float complex); +#pragma GCC visibility pop __END_DECLS #endif /* _COMPLEX_H */ diff --git a/libm/upstream-freebsd/lib/msun/src/catrig.c b/libm/upstream-freebsd/lib/msun/src/catrig.c new file mode 100644 index 000000000..200977c0a --- /dev/null +++ b/libm/upstream-freebsd/lib/msun/src/catrig.c @@ -0,0 +1,639 @@ +/*- + * Copyright (c) 2012 Stephen Montgomery-Smith + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +#include "math.h" +#include "math_private.h" + +#undef isinf +#define isinf(x) (fabs(x) == INFINITY) +#undef isnan +#define isnan(x) ((x) != (x)) +#define raise_inexact() do { volatile float junk = 1 + tiny; } while(0) +#undef signbit +#define signbit(x) (__builtin_signbit(x)) + +/* We need that DBL_EPSILON^2/128 is larger than FOUR_SQRT_MIN. */ +static const double +A_crossover = 10, /* Hull et al suggest 1.5, but 10 works better */ +B_crossover = 0.6417, /* suggested by Hull et al */ +FOUR_SQRT_MIN = 0x1p-509, /* >= 4 * sqrt(DBL_MIN) */ +QUARTER_SQRT_MAX = 0x1p509, /* <= sqrt(DBL_MAX) / 4 */ +m_e = 2.7182818284590452e0, /* 0x15bf0a8b145769.0p-51 */ +m_ln2 = 6.9314718055994531e-1, /* 0x162e42fefa39ef.0p-53 */ +pio2_hi = 1.5707963267948966e0, /* 0x1921fb54442d18.0p-52 */ +RECIP_EPSILON = 1 / DBL_EPSILON, +SQRT_3_EPSILON = 2.5809568279517849e-8, /* 0x1bb67ae8584caa.0p-78 */ +SQRT_6_EPSILON = 3.6500241499888571e-8, /* 0x13988e1409212e.0p-77 */ +SQRT_MIN = 0x1p-511; /* >= sqrt(DBL_MIN) */ + +static const volatile double +pio2_lo = 6.1232339957367659e-17; /* 0x11a62633145c07.0p-106 */ +static const volatile float +tiny = 0x1p-100; + +static double complex clog_for_large_values(double complex z); + +/* + * Testing indicates that all these functions are accurate up to 4 ULP. + * The functions casin(h) and cacos(h) are about 2.5 times slower than asinh. + * The functions catan(h) are a little under 2 times slower than atanh. + * + * The code for casinh, casin, cacos, and cacosh comes first. The code is + * rather complicated, and the four functions are highly interdependent. + * + * The code for catanh and catan comes at the end. It is much simpler than + * the other functions, and the code for these can be disconnected from the + * rest of the code. + */ + +/* + * ================================ + * | casinh, casin, cacos, cacosh | + * ================================ + */ + +/* + * The algorithm is very close to that in "Implementing the complex arcsine + * and arccosine functions using exception handling" by T. E. Hull, Thomas F. + * Fairgrieve, and Ping Tak Peter Tang, published in ACM Transactions on + * Mathematical Software, Volume 23 Issue 3, 1997, Pages 299-335, + * http://dl.acm.org/citation.cfm?id=275324. + * + * Throughout we use the convention z = x + I*y. + * + * casinh(z) = sign(x)*log(A+sqrt(A*A-1)) + I*asin(B) + * where + * A = (|z+I| + |z-I|) / 2 + * B = (|z+I| - |z-I|) / 2 = y/A + * + * These formulas become numerically unstable: + * (a) for Re(casinh(z)) when z is close to the line segment [-I, I] (that + * is, Re(casinh(z)) is close to 0); + * (b) for Im(casinh(z)) when z is close to either of the intervals + * [I, I*infinity) or (-I*infinity, -I] (that is, |Im(casinh(z))| is + * close to PI/2). + * + * These numerical problems are overcome by defining + * f(a, b) = (hypot(a, b) - b) / 2 = a*a / (hypot(a, b) + b) / 2 + * Then if A < A_crossover, we use + * log(A + sqrt(A*A-1)) = log1p((A-1) + sqrt((A-1)*(A+1))) + * A-1 = f(x, 1+y) + f(x, 1-y) + * and if B > B_crossover, we use + * asin(B) = atan2(y, sqrt(A*A - y*y)) = atan2(y, sqrt((A+y)*(A-y))) + * A-y = f(x, y+1) + f(x, y-1) + * where without loss of generality we have assumed that x and y are + * non-negative. + * + * Much of the difficulty comes because the intermediate computations may + * produce overflows or underflows. This is dealt with in the paper by Hull + * et al by using exception handling. We do this by detecting when + * computations risk underflow or overflow. The hardest part is handling the + * underflows when computing f(a, b). + * + * Note that the function f(a, b) does not appear explicitly in the paper by + * Hull et al, but the idea may be found on pages 308 and 309. Introducing the + * function f(a, b) allows us to concentrate many of the clever tricks in this + * paper into one function. + */ + +/* + * Function f(a, b, hypot_a_b) = (hypot(a, b) - b) / 2. + * Pass hypot(a, b) as the third argument. + */ +static inline double +f(double a, double b, double hypot_a_b) +{ + if (b < 0) + return ((hypot_a_b - b) / 2); + if (b == 0) + return (a / 2); + return (a * a / (hypot_a_b + b) / 2); +} + +/* + * All the hard work is contained in this function. + * x and y are assumed positive or zero, and less than RECIP_EPSILON. + * Upon return: + * rx = Re(casinh(z)) = -Im(cacos(y + I*x)). + * B_is_usable is set to 1 if the value of B is usable. + * If B_is_usable is set to 0, sqrt_A2my2 = sqrt(A*A - y*y), and new_y = y. + * If returning sqrt_A2my2 has potential to result in an underflow, it is + * rescaled, and new_y is similarly rescaled. + */ +static inline void +do_hard_work(double x, double y, double *rx, int *B_is_usable, double *B, + double *sqrt_A2my2, double *new_y) +{ + double R, S, A; /* A, B, R, and S are as in Hull et al. */ + double Am1, Amy; /* A-1, A-y. */ + + R = hypot(x, y + 1); /* |z+I| */ + S = hypot(x, y - 1); /* |z-I| */ + + /* A = (|z+I| + |z-I|) / 2 */ + A = (R + S) / 2; + /* + * Mathematically A >= 1. There is a small chance that this will not + * be so because of rounding errors. So we will make certain it is + * so. + */ + if (A < 1) + A = 1; + + if (A < A_crossover) { + /* + * Am1 = fp + fm, where fp = f(x, 1+y), and fm = f(x, 1-y). + * rx = log1p(Am1 + sqrt(Am1*(A+1))) + */ + if (y == 1 && x < DBL_EPSILON * DBL_EPSILON / 128) { + /* + * fp is of order x^2, and fm = x/2. + * A = 1 (inexactly). + */ + *rx = sqrt(x); + } else if (x >= DBL_EPSILON * fabs(y - 1)) { + /* + * Underflow will not occur because + * x >= DBL_EPSILON^2/128 >= FOUR_SQRT_MIN + */ + Am1 = f(x, 1 + y, R) + f(x, 1 - y, S); + *rx = log1p(Am1 + sqrt(Am1 * (A + 1))); + } else if (y < 1) { + /* + * fp = x*x/(1+y)/4, fm = x*x/(1-y)/4, and + * A = 1 (inexactly). + */ + *rx = x / sqrt((1 - y) * (1 + y)); + } else { /* if (y > 1) */ + /* + * A-1 = y-1 (inexactly). + */ + *rx = log1p((y - 1) + sqrt((y - 1) * (y + 1))); + } + } else { + *rx = log(A + sqrt(A * A - 1)); + } + + *new_y = y; + + if (y < FOUR_SQRT_MIN) { + /* + * Avoid a possible underflow caused by y/A. For casinh this + * would be legitimate, but will be picked up by invoking atan2 + * later on. For cacos this would not be legitimate. + */ + *B_is_usable = 0; + *sqrt_A2my2 = A * (2 / DBL_EPSILON); + *new_y = y * (2 / DBL_EPSILON); + return; + } + + /* B = (|z+I| - |z-I|) / 2 = y/A */ + *B = y / A; + *B_is_usable = 1; + + if (*B > B_crossover) { + *B_is_usable = 0; + /* + * Amy = fp + fm, where fp = f(x, y+1), and fm = f(x, y-1). + * sqrt_A2my2 = sqrt(Amy*(A+y)) + */ + if (y == 1 && x < DBL_EPSILON / 128) { + /* + * fp is of order x^2, and fm = x/2. + * A = 1 (inexactly). + */ + *sqrt_A2my2 = sqrt(x) * sqrt((A + y) / 2); + } else if (x >= DBL_EPSILON * fabs(y - 1)) { + /* + * Underflow will not occur because + * x >= DBL_EPSILON/128 >= FOUR_SQRT_MIN + * and + * x >= DBL_EPSILON^2 >= FOUR_SQRT_MIN + */ + Amy = f(x, y + 1, R) + f(x, y - 1, S); + *sqrt_A2my2 = sqrt(Amy * (A + y)); + } else if (y > 1) { + /* + * fp = x*x/(y+1)/4, fm = x*x/(y-1)/4, and + * A = y (inexactly). + * + * y < RECIP_EPSILON. So the following + * scaling should avoid any underflow problems. + */ + *sqrt_A2my2 = x * (4 / DBL_EPSILON / DBL_EPSILON) * y / + sqrt((y + 1) * (y - 1)); + *new_y = y * (4 / DBL_EPSILON / DBL_EPSILON); + } else { /* if (y < 1) */ + /* + * fm = 1-y >= DBL_EPSILON, fp is of order x^2, and + * A = 1 (inexactly). + */ + *sqrt_A2my2 = sqrt((1 - y) * (1 + y)); + } + } +} + +/* + * casinh(z) = z + O(z^3) as z -> 0 + * + * casinh(z) = sign(x)*clog(sign(x)*z) + O(1/z^2) as z -> infinity + * The above formula works for the imaginary part as well, because + * Im(casinh(z)) = sign(x)*atan2(sign(x)*y, fabs(x)) + O(y/z^3) + * as z -> infinity, uniformly in y + */ +double complex +casinh(double complex z) +{ + double x, y, ax, ay, rx, ry, B, sqrt_A2my2, new_y; + int B_is_usable; + double complex w; + + x = creal(z); + y = cimag(z); + ax = fabs(x); + ay = fabs(y); + + if (isnan(x) || isnan(y)) { + /* casinh(+-Inf + I*NaN) = +-Inf + I*NaN */ + if (isinf(x)) + return (cpack(x, y + y)); + /* casinh(NaN + I*+-Inf) = opt(+-)Inf + I*NaN */ + if (isinf(y)) + return (cpack(y, x + x)); + /* casinh(NaN + I*0) = NaN + I*0 */ + if (y == 0) + return (cpack(x + x, y)); + /* + * All other cases involving NaN return NaN + I*NaN. + * C99 leaves it optional whether to raise invalid if one of + * the arguments is not NaN, so we opt not to raise it. + */ + return (cpack(x + 0.0L + (y + 0), x + 0.0L + (y + 0))); + } + + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { + /* clog...() will raise inexact unless x or y is infinite. */ + if (signbit(x) == 0) + w = clog_for_large_values(z) + m_ln2; + else + w = clog_for_large_values(-z) + m_ln2; + return (cpack(copysign(creal(w), x), copysign(cimag(w), y))); + } + + /* Avoid spuriously raising inexact for z = 0. */ + if (x == 0 && y == 0) + return (z); + + /* All remaining cases are inexact. */ + raise_inexact(); + + if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) + return (z); + + do_hard_work(ax, ay, &rx, &B_is_usable, &B, &sqrt_A2my2, &new_y); + if (B_is_usable) + ry = asin(B); + else + ry = atan2(new_y, sqrt_A2my2); + return (cpack(copysign(rx, x), copysign(ry, y))); +} + +/* + * casin(z) = reverse(casinh(reverse(z))) + * where reverse(x + I*y) = y + I*x = I*conj(z). + */ +double complex +casin(double complex z) +{ + double complex w = casinh(cpack(cimag(z), creal(z))); + + return (cpack(cimag(w), creal(w))); +} + +/* + * cacos(z) = PI/2 - casin(z) + * but do the computation carefully so cacos(z) is accurate when z is + * close to 1. + * + * cacos(z) = PI/2 - z + O(z^3) as z -> 0 + * + * cacos(z) = -sign(y)*I*clog(z) + O(1/z^2) as z -> infinity + * The above formula works for the real part as well, because + * Re(cacos(z)) = atan2(fabs(y), x) + O(y/z^3) + * as z -> infinity, uniformly in y + */ +double complex +cacos(double complex z) +{ + double x, y, ax, ay, rx, ry, B, sqrt_A2mx2, new_x; + int sx, sy; + int B_is_usable; + double complex w; + + x = creal(z); + y = cimag(z); + sx = signbit(x); + sy = signbit(y); + ax = fabs(x); + ay = fabs(y); + + if (isnan(x) || isnan(y)) { + /* cacos(+-Inf + I*NaN) = NaN + I*opt(-)Inf */ + if (isinf(x)) + return (cpack(y + y, -INFINITY)); + /* cacos(NaN + I*+-Inf) = NaN + I*-+Inf */ + if (isinf(y)) + return (cpack(x + x, -y)); + /* cacos(0 + I*NaN) = PI/2 + I*NaN with inexact */ + if (x == 0) + return (cpack(pio2_hi + pio2_lo, y + y)); + /* + * All other cases involving NaN return NaN + I*NaN. + * C99 leaves it optional whether to raise invalid if one of + * the arguments is not NaN, so we opt not to raise it. + */ + return (cpack(x + 0.0L + (y + 0), x + 0.0L + (y + 0))); + } + + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { + /* clog...() will raise inexact unless x or y is infinite. */ + w = clog_for_large_values(z); + rx = fabs(cimag(w)); + ry = creal(w) + m_ln2; + if (sy == 0) + ry = -ry; + return (cpack(rx, ry)); + } + + /* Avoid spuriously raising inexact for z = 1. */ + if (x == 1 && y == 0) + return (cpack(0, -y)); + + /* All remaining cases are inexact. */ + raise_inexact(); + + if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) + return (cpack(pio2_hi - (x - pio2_lo), -y)); + + do_hard_work(ay, ax, &ry, &B_is_usable, &B, &sqrt_A2mx2, &new_x); + if (B_is_usable) { + if (sx == 0) + rx = acos(B); + else + rx = acos(-B); + } else { + if (sx == 0) + rx = atan2(sqrt_A2mx2, new_x); + else + rx = atan2(sqrt_A2mx2, -new_x); + } + if (sy == 0) + ry = -ry; + return (cpack(rx, ry)); +} + +/* + * cacosh(z) = I*cacos(z) or -I*cacos(z) + * where the sign is chosen so Re(cacosh(z)) >= 0. + */ +double complex +cacosh(double complex z) +{ + double complex w; + double rx, ry; + + w = cacos(z); + rx = creal(w); + ry = cimag(w); + /* cacosh(NaN + I*NaN) = NaN + I*NaN */ + if (isnan(rx) && isnan(ry)) + return (cpack(ry, rx)); + /* cacosh(NaN + I*+-Inf) = +Inf + I*NaN */ + /* cacosh(+-Inf + I*NaN) = +Inf + I*NaN */ + if (isnan(rx)) + return (cpack(fabs(ry), rx)); + /* cacosh(0 + I*NaN) = NaN + I*NaN */ + if (isnan(ry)) + return (cpack(ry, ry)); + return (cpack(fabs(ry), copysign(rx, cimag(z)))); +} + +/* + * Optimized version of clog() for |z| finite and larger than ~RECIP_EPSILON. + */ +static double complex +clog_for_large_values(double complex z) +{ + double x, y; + double ax, ay, t; + + x = creal(z); + y = cimag(z); + ax = fabs(x); + ay = fabs(y); + if (ax < ay) { + t = ax; + ax = ay; + ay = t; + } + + /* + * Avoid overflow in hypot() when x and y are both very large. + * Divide x and y by E, and then add 1 to the logarithm. This depends + * on E being larger than sqrt(2). + * Dividing by E causes an insignificant loss of accuracy; however + * this method is still poor since it is uneccessarily slow. + */ + if (ax > DBL_MAX / 2) + return (cpack(log(hypot(x / m_e, y / m_e)) + 1, atan2(y, x))); + + /* + * Avoid overflow when x or y is large. Avoid underflow when x or + * y is small. + */ + if (ax > QUARTER_SQRT_MAX || ay < SQRT_MIN) + return (cpack(log(hypot(x, y)), atan2(y, x))); + + return (cpack(log(ax * ax + ay * ay) / 2, atan2(y, x))); +} + +/* + * ================= + * | catanh, catan | + * ================= + */ + +/* + * sum_squares(x,y) = x*x + y*y (or just x*x if y*y would underflow). + * Assumes x*x and y*y will not overflow. + * Assumes x and y are finite. + * Assumes y is non-negative. + * Assumes fabs(x) >= DBL_EPSILON. + */ +static inline double +sum_squares(double x, double y) +{ + + /* Avoid underflow when y is small. */ + if (y < SQRT_MIN) + return (x * x); + + return (x * x + y * y); +} + +/* + * real_part_reciprocal(x, y) = Re(1/(x+I*y)) = x/(x*x + y*y). + * Assumes x and y are not NaN, and one of x and y is larger than + * RECIP_EPSILON. We avoid unwarranted underflow. It is important to not use + * the code creal(1/z), because the imaginary part may produce an unwanted + * underflow. + * This is only called in a context where inexact is always raised before + * the call, so no effort is made to avoid or force inexact. + */ +static inline double +real_part_reciprocal(double x, double y) +{ + double scale; + uint32_t hx, hy; + int32_t ix, iy; + + /* + * This code is inspired by the C99 document n1124.pdf, Section G.5.1, + * example 2. + */ + GET_HIGH_WORD(hx, x); + ix = hx & 0x7ff00000; + GET_HIGH_WORD(hy, y); + iy = hy & 0x7ff00000; +#define BIAS (DBL_MAX_EXP - 1) +/* XXX more guard digits are useful iff there is extra precision. */ +#define CUTOFF (DBL_MANT_DIG / 2 + 1) /* just half or 1 guard digit */ + if (ix - iy >= CUTOFF << 20 || isinf(x)) + return (1 / x); /* +-Inf -> +-0 is special */ + if (iy - ix >= CUTOFF << 20) + return (x / y / y); /* should avoid double div, but hard */ + if (ix <= (BIAS + DBL_MAX_EXP / 2 - CUTOFF) << 20) + return (x / (x * x + y * y)); + scale = 1; + SET_HIGH_WORD(scale, 0x7ff00000 - ix); /* 2**(1-ilogb(x)) */ + x *= scale; + y *= scale; + return (x / (x * x + y * y) * scale); +} + +/* + * catanh(z) = log((1+z)/(1-z)) / 2 + * = log1p(4*x / |z-1|^2) / 4 + * + I * atan2(2*y, (1-x)*(1+x)-y*y) / 2 + * + * catanh(z) = z + O(z^3) as z -> 0 + * + * catanh(z) = 1/z + sign(y)*I*PI/2 + O(1/z^3) as z -> infinity + * The above formula works for the real part as well, because + * Re(catanh(z)) = x/|z|^2 + O(x/z^4) + * as z -> infinity, uniformly in x + */ +double complex +catanh(double complex z) +{ + double x, y, ax, ay, rx, ry; + + x = creal(z); + y = cimag(z); + ax = fabs(x); + ay = fabs(y); + + /* This helps handle many cases. */ + if (y == 0 && ax <= 1) + return (cpack(atanh(x), y)); + + /* To ensure the same accuracy as atan(), and to filter out z = 0. */ + if (x == 0) + return (cpack(x, atan(y))); + + if (isnan(x) || isnan(y)) { + /* catanh(+-Inf + I*NaN) = +-0 + I*NaN */ + if (isinf(x)) + return (cpack(copysign(0, x), y + y)); + /* catanh(NaN + I*+-Inf) = sign(NaN)0 + I*+-PI/2 */ + if (isinf(y)) + return (cpack(copysign(0, x), + copysign(pio2_hi + pio2_lo, y))); + /* + * All other cases involving NaN return NaN + I*NaN. + * C99 leaves it optional whether to raise invalid if one of + * the arguments is not NaN, so we opt not to raise it. + */ + return (cpack(x + 0.0L + (y + 0), x + 0.0L + (y + 0))); + } + + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) + return (cpack(real_part_reciprocal(x, y), + copysign(pio2_hi + pio2_lo, y))); + + if (ax < SQRT_3_EPSILON / 2 && ay < SQRT_3_EPSILON / 2) { + /* + * z = 0 was filtered out above. All other cases must raise + * inexact, but this is the only only that needs to do it + * explicitly. + */ + raise_inexact(); + return (z); + } + + if (ax == 1 && ay < DBL_EPSILON) + rx = (m_ln2 - log(ay)) / 2; + else + rx = log1p(4 * ax / sum_squares(ax - 1, ay)) / 4; + + if (ax == 1) + ry = atan2(2, -ay) / 2; + else if (ay < DBL_EPSILON) + ry = atan2(2 * ay, (1 - ax) * (1 + ax)) / 2; + else + ry = atan2(2 * ay, (1 - ax) * (1 + ax) - ay * ay) / 2; + + return (cpack(copysign(rx, x), copysign(ry, y))); +} + +/* + * catan(z) = reverse(catanh(reverse(z))) + * where reverse(x + I*y) = y + I*x = I*conj(z). + */ +double complex +catan(double complex z) +{ + double complex w = catanh(cpack(cimag(z), creal(z))); + + return (cpack(cimag(w), creal(w))); +} diff --git a/libm/upstream-freebsd/lib/msun/src/catrigf.c b/libm/upstream-freebsd/lib/msun/src/catrigf.c new file mode 100644 index 000000000..08ebef783 --- /dev/null +++ b/libm/upstream-freebsd/lib/msun/src/catrigf.c @@ -0,0 +1,393 @@ +/*- + * Copyright (c) 2012 Stephen Montgomery-Smith + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * The algorithm is very close to that in "Implementing the complex arcsine + * and arccosine functions using exception handling" by T. E. Hull, Thomas F. + * Fairgrieve, and Ping Tak Peter Tang, published in ACM Transactions on + * Mathematical Software, Volume 23 Issue 3, 1997, Pages 299-335, + * http://dl.acm.org/citation.cfm?id=275324. + * + * See catrig.c for complete comments. + * + * XXX comments were removed automatically, and even short ones on the right + * of statements were removed (all of them), contrary to normal style. Only + * a few comments on the right of declarations remain. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +#include "math.h" +#include "math_private.h" + +#undef isinf +#define isinf(x) (fabsf(x) == INFINITY) +#undef isnan +#define isnan(x) ((x) != (x)) +#define raise_inexact() do { volatile float junk = 1 + tiny; } while(0) +#undef signbit +#define signbit(x) (__builtin_signbitf(x)) + +static const float +A_crossover = 10, +B_crossover = 0.6417, +FOUR_SQRT_MIN = 0x1p-61, +QUARTER_SQRT_MAX = 0x1p61, +m_e = 2.7182818285e0, /* 0xadf854.0p-22 */ +m_ln2 = 6.9314718056e-1, /* 0xb17218.0p-24 */ +pio2_hi = 1.5707962513e0, /* 0xc90fda.0p-23 */ +RECIP_EPSILON = 1 / FLT_EPSILON, +SQRT_3_EPSILON = 5.9801995673e-4, /* 0x9cc471.0p-34 */ +SQRT_6_EPSILON = 8.4572793338e-4, /* 0xddb3d7.0p-34 */ +SQRT_MIN = 0x1p-63; + +static const volatile float +pio2_lo = 7.5497899549e-8, /* 0xa22169.0p-47 */ +tiny = 0x1p-100; + +static float complex clog_for_large_values(float complex z); + +static inline float +f(float a, float b, float hypot_a_b) +{ + if (b < 0) + return ((hypot_a_b - b) / 2); + if (b == 0) + return (a / 2); + return (a * a / (hypot_a_b + b) / 2); +} + +static inline void +do_hard_work(float x, float y, float *rx, int *B_is_usable, float *B, + float *sqrt_A2my2, float *new_y) +{ + float R, S, A; + float Am1, Amy; + + R = hypotf(x, y + 1); + S = hypotf(x, y - 1); + + A = (R + S) / 2; + if (A < 1) + A = 1; + + if (A < A_crossover) { + if (y == 1 && x < FLT_EPSILON * FLT_EPSILON / 128) { + *rx = sqrtf(x); + } else if (x >= FLT_EPSILON * fabsf(y - 1)) { + Am1 = f(x, 1 + y, R) + f(x, 1 - y, S); + *rx = log1pf(Am1 + sqrtf(Am1 * (A + 1))); + } else if (y < 1) { + *rx = x / sqrtf((1 - y) * (1 + y)); + } else { + *rx = log1pf((y - 1) + sqrtf((y - 1) * (y + 1))); + } + } else { + *rx = logf(A + sqrtf(A * A - 1)); + } + + *new_y = y; + + if (y < FOUR_SQRT_MIN) { + *B_is_usable = 0; + *sqrt_A2my2 = A * (2 / FLT_EPSILON); + *new_y = y * (2 / FLT_EPSILON); + return; + } + + *B = y / A; + *B_is_usable = 1; + + if (*B > B_crossover) { + *B_is_usable = 0; + if (y == 1 && x < FLT_EPSILON / 128) { + *sqrt_A2my2 = sqrtf(x) * sqrtf((A + y) / 2); + } else if (x >= FLT_EPSILON * fabsf(y - 1)) { + Amy = f(x, y + 1, R) + f(x, y - 1, S); + *sqrt_A2my2 = sqrtf(Amy * (A + y)); + } else if (y > 1) { + *sqrt_A2my2 = x * (4 / FLT_EPSILON / FLT_EPSILON) * y / + sqrtf((y + 1) * (y - 1)); + *new_y = y * (4 / FLT_EPSILON / FLT_EPSILON); + } else { + *sqrt_A2my2 = sqrtf((1 - y) * (1 + y)); + } + } +} + +float complex +casinhf(float complex z) +{ + float x, y, ax, ay, rx, ry, B, sqrt_A2my2, new_y; + int B_is_usable; + float complex w; + + x = crealf(z); + y = cimagf(z); + ax = fabsf(x); + ay = fabsf(y); + + if (isnan(x) || isnan(y)) { + if (isinf(x)) + return (cpackf(x, y + y)); + if (isinf(y)) + return (cpackf(y, x + x)); + if (y == 0) + return (cpackf(x + x, y)); + return (cpackf(x + 0.0L + (y + 0), x + 0.0L + (y + 0))); + } + + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { + if (signbit(x) == 0) + w = clog_for_large_values(z) + m_ln2; + else + w = clog_for_large_values(-z) + m_ln2; + return (cpackf(copysignf(crealf(w), x), + copysignf(cimagf(w), y))); + } + + if (x == 0 && y == 0) + return (z); + + raise_inexact(); + + if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) + return (z); + + do_hard_work(ax, ay, &rx, &B_is_usable, &B, &sqrt_A2my2, &new_y); + if (B_is_usable) + ry = asinf(B); + else + ry = atan2f(new_y, sqrt_A2my2); + return (cpackf(copysignf(rx, x), copysignf(ry, y))); +} + +float complex +casinf(float complex z) +{ + float complex w = casinhf(cpackf(cimagf(z), crealf(z))); + + return (cpackf(cimagf(w), crealf(w))); +} + +float complex +cacosf(float complex z) +{ + float x, y, ax, ay, rx, ry, B, sqrt_A2mx2, new_x; + int sx, sy; + int B_is_usable; + float complex w; + + x = crealf(z); + y = cimagf(z); + sx = signbit(x); + sy = signbit(y); + ax = fabsf(x); + ay = fabsf(y); + + if (isnan(x) || isnan(y)) { + if (isinf(x)) + return (cpackf(y + y, -INFINITY)); + if (isinf(y)) + return (cpackf(x + x, -y)); + if (x == 0) + return (cpackf(pio2_hi + pio2_lo, y + y)); + return (cpackf(x + 0.0L + (y + 0), x + 0.0L + (y + 0))); + } + + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) { + w = clog_for_large_values(z); + rx = fabsf(cimagf(w)); + ry = crealf(w) + m_ln2; + if (sy == 0) + ry = -ry; + return (cpackf(rx, ry)); + } + + if (x == 1 && y == 0) + return (cpackf(0, -y)); + + raise_inexact(); + + if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) + return (cpackf(pio2_hi - (x - pio2_lo), -y)); + + do_hard_work(ay, ax, &ry, &B_is_usable, &B, &sqrt_A2mx2, &new_x); + if (B_is_usable) { + if (sx == 0) + rx = acosf(B); + else + rx = acosf(-B); + } else { + if (sx == 0) + rx = atan2f(sqrt_A2mx2, new_x); + else + rx = atan2f(sqrt_A2mx2, -new_x); + } + if (sy == 0) + ry = -ry; + return (cpackf(rx, ry)); +} + +float complex +cacoshf(float complex z) +{ + float complex w; + float rx, ry; + + w = cacosf(z); + rx = crealf(w); + ry = cimagf(w); + if (isnan(rx) && isnan(ry)) + return (cpackf(ry, rx)); + if (isnan(rx)) + return (cpackf(fabsf(ry), rx)); + if (isnan(ry)) + return (cpackf(ry, ry)); + return (cpackf(fabsf(ry), copysignf(rx, cimagf(z)))); +} + +static float complex +clog_for_large_values(float complex z) +{ + float x, y; + float ax, ay, t; + + x = crealf(z); + y = cimagf(z); + ax = fabsf(x); + ay = fabsf(y); + if (ax < ay) { + t = ax; + ax = ay; + ay = t; + } + + if (ax > FLT_MAX / 2) + return (cpackf(logf(hypotf(x / m_e, y / m_e)) + 1, + atan2f(y, x))); + + if (ax > QUARTER_SQRT_MAX || ay < SQRT_MIN) + return (cpackf(logf(hypotf(x, y)), atan2f(y, x))); + + return (cpackf(logf(ax * ax + ay * ay) / 2, atan2f(y, x))); +} + +static inline float +sum_squares(float x, float y) +{ + + if (y < SQRT_MIN) + return (x * x); + + return (x * x + y * y); +} + +static inline float +real_part_reciprocal(float x, float y) +{ + float scale; + uint32_t hx, hy; + int32_t ix, iy; + + GET_FLOAT_WORD(hx, x); + ix = hx & 0x7f800000; + GET_FLOAT_WORD(hy, y); + iy = hy & 0x7f800000; +#define BIAS (FLT_MAX_EXP - 1) +#define CUTOFF (FLT_MANT_DIG / 2 + 1) + if (ix - iy >= CUTOFF << 23 || isinf(x)) + return (1 / x); + if (iy - ix >= CUTOFF << 23) + return (x / y / y); + if (ix <= (BIAS + FLT_MAX_EXP / 2 - CUTOFF) << 23) + return (x / (x * x + y * y)); + SET_FLOAT_WORD(scale, 0x7f800000 - ix); + x *= scale; + y *= scale; + return (x / (x * x + y * y) * scale); +} + +float complex +catanhf(float complex z) +{ + float x, y, ax, ay, rx, ry; + + x = crealf(z); + y = cimagf(z); + ax = fabsf(x); + ay = fabsf(y); + + if (y == 0 && ax <= 1) + return (cpackf(atanhf(x), y)); + + if (x == 0) + return (cpackf(x, atanf(y))); + + if (isnan(x) || isnan(y)) { + if (isinf(x)) + return (cpackf(copysignf(0, x), y + y)); + if (isinf(y)) + return (cpackf(copysignf(0, x), + copysignf(pio2_hi + pio2_lo, y))); + return (cpackf(x + 0.0L + (y + 0), x + 0.0L + (y + 0))); + } + + if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) + return (cpackf(real_part_reciprocal(x, y), + copysignf(pio2_hi + pio2_lo, y))); + + if (ax < SQRT_3_EPSILON / 2 && ay < SQRT_3_EPSILON / 2) { + raise_inexact(); + return (z); + } + + if (ax == 1 && ay < FLT_EPSILON) + rx = (m_ln2 - logf(ay)) / 2; + else + rx = log1pf(4 * ax / sum_squares(ax - 1, ay)) / 4; + + if (ax == 1) + ry = atan2f(2, -ay) / 2; + else if (ay < FLT_EPSILON) + ry = atan2f(2 * ay, (1 - ax) * (1 + ax)) / 2; + else + ry = atan2f(2 * ay, (1 - ax) * (1 + ax) - ay * ay) / 2; + + return (cpackf(copysignf(rx, x), copysignf(ry, y))); +} + +float complex +catanf(float complex z) +{ + float complex w = catanhf(cpackf(cimagf(z), crealf(z))); + + return (cpackf(cimagf(w), crealf(w))); +} diff --git a/tests/Android.mk b/tests/Android.mk index 1a7a5e063..5e0c593f7 100644 --- a/tests/Android.mk +++ b/tests/Android.mk @@ -54,6 +54,7 @@ test_cppflags = \ libBionicStandardTests_src_files := \ arpa_inet_test.cpp \ buffer_tests.cpp \ + complex_test.cpp \ ctype_test.cpp \ dirent_test.cpp \ eventfd_test.cpp \ diff --git a/tests/complex_test.cpp b/tests/complex_test.cpp new file mode 100644 index 000000000..47964f6b9 --- /dev/null +++ b/tests/complex_test.cpp @@ -0,0 +1,260 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +// libc++ actively gets in the way of including from C++, so we +// have to declare the complex math functions ourselves. +// (libc++ also seems to have really bad implementations of its own that ignore +// the intricacies of floating point math.) +// http://llvm.org/bugs/show_bug.cgi?id=21504 + +#include // For M_PI. + +extern "C" double cabs(double _Complex); +TEST(complex, cabs) { + ASSERT_EQ(0.0, cabs(0)); +} + +extern "C" float cabsf(float _Complex); +TEST(complex, cabsf) { + ASSERT_EQ(0.0, cabsf(0)); +} + +extern "C" long double cabsl(long double _Complex); +TEST(complex, cabsl) { + ASSERT_EQ(0.0, cabsl(0)); +} + +extern "C" double _Complex cacos(double _Complex); +TEST(complex, cacos) { + ASSERT_EQ(M_PI/2.0, cacos(0.0)); +} + +extern "C" float _Complex cacosf(float _Complex); +TEST(complex, cacosf) { + ASSERT_EQ(static_cast(M_PI)/2.0f, cacosf(0.0)); +} + +extern "C" double _Complex cacosh(double _Complex); +TEST(complex, cacosh) { + ASSERT_EQ(0.0, cacosh(1.0)); +} + +extern "C" float _Complex cacoshf(float _Complex); +TEST(complex, cacoshf) { + ASSERT_EQ(0.0, cacoshf(1.0)); +} + +extern "C" double carg(double _Complex); +TEST(complex, carg) { + ASSERT_EQ(0.0, carg(0)); +} + +extern "C" float cargf(float _Complex); +TEST(complex, cargf) { + ASSERT_EQ(0.0, cargf(0)); +} + +extern "C" long double cargl(long double _Complex); +TEST(complex, cargl) { + ASSERT_EQ(0.0, cargl(0)); +} + +extern "C" double _Complex casin(double _Complex); +TEST(complex, casin) { + ASSERT_EQ(0.0, casin(0)); +} + +extern "C" float _Complex casinf(float _Complex); +TEST(complex, casinf) { + ASSERT_EQ(0.0, casinf(0)); +} + +extern "C" double _Complex casinh(double _Complex); +TEST(complex, casinh) { + ASSERT_EQ(0.0, casinh(0)); +} + +extern "C" float _Complex casinhf(float _Complex); +TEST(complex, casinhf) { + ASSERT_EQ(0.0, casinhf(0)); +} + +extern "C" double _Complex catan(double _Complex); +TEST(complex, catan) { + ASSERT_EQ(0.0, catan(0)); +} + +extern "C" float _Complex catanf(float _Complex); +TEST(complex, catanf) { + ASSERT_EQ(0.0, catanf(0)); +} + +extern "C" double _Complex catanh(double _Complex); +TEST(complex, catanh) { + ASSERT_EQ(0.0, catanh(0)); +} + +extern "C" float _Complex catanhf(float _Complex); +TEST(complex, catanhf) { + ASSERT_EQ(0.0, catanhf(0)); +} + +extern "C" double _Complex ccos(double _Complex); +TEST(complex, ccos) { + ASSERT_EQ(1.0, ccos(0)); +} + +extern "C" float _Complex ccosf(float _Complex); +TEST(complex, ccosf) { + ASSERT_EQ(1.0, ccosf(0)); +} + +extern "C" double _Complex ccosh(double _Complex); +TEST(complex, ccosh) { + ASSERT_EQ(1.0, ccosh(0)); +} + +extern "C" float _Complex ccoshf(float _Complex); +TEST(complex, ccoshf) { + ASSERT_EQ(1.0, ccoshf(0)); +} + +extern "C" double _Complex cexp(double _Complex); +TEST(complex, cexp) { + ASSERT_EQ(1.0, cexp(0)); +} + +extern "C" float _Complex cexpf(float _Complex); +TEST(complex, cexpf) { + ASSERT_EQ(1.0, cexpf(0)); +} + +extern "C" double cimag(double _Complex); +TEST(complex, cimag) { + ASSERT_EQ(0.0, cimag(0)); +} + +extern "C" float cimagf(float _Complex); +TEST(complex, cimagf) { + ASSERT_EQ(0.0f, cimagf(0)); +} + +extern "C" long double cimagl(long double _Complex); +TEST(complex, cimagl) { + ASSERT_EQ(0.0, cimagl(0)); +} + +extern "C" double _Complex conj(double _Complex); +TEST(complex, conj) { + ASSERT_EQ(0.0, conj(0)); +} + +extern "C" float _Complex conjf(float _Complex); +TEST(complex, conjf) { + ASSERT_EQ(0.0f, conjf(0)); +} + +extern "C" long double _Complex conjl(long double _Complex); +TEST(complex, conjl) { + ASSERT_EQ(0.0, conjl(0)); +} + +extern "C" double _Complex cproj(double _Complex); +TEST(complex, cproj) { + ASSERT_EQ(0.0, cproj(0)); +} + +extern "C" float _Complex cprojf(float _Complex); +TEST(complex, cprojf) { + ASSERT_EQ(0.0f, cprojf(0)); +} + +extern "C" long double _Complex cprojl(long double _Complex); +TEST(complex, cprojl) { + ASSERT_EQ(0.0, cprojl(0)); +} + +extern "C" double creal(double _Complex); +TEST(complex, creal) { + ASSERT_EQ(0.0, creal(0)); +} + +extern "C" float crealf(float _Complex); +TEST(complex, crealf) { + ASSERT_EQ(0.0f, crealf(0)); +} + +extern "C" long double creall(long double _Complex); +TEST(complex, creall) { + ASSERT_EQ(0.0, creall(0)); +} + +extern "C" double _Complex csin(double _Complex); +TEST(complex, csin) { + ASSERT_EQ(0.0, csin(0)); +} + +extern "C" float _Complex csinf(float _Complex); +TEST(complex, csinf) { + ASSERT_EQ(0.0, csinf(0)); +} + +extern "C" double _Complex csinh(double _Complex); +TEST(complex, csinh) { + ASSERT_EQ(0.0, csinh(0)); +} + +extern "C" float _Complex csinhf(float _Complex); +TEST(complex, csinhf) { + ASSERT_EQ(0.0, csinhf(0)); +} + +extern "C" double _Complex csqrt(double _Complex); +TEST(complex, csqrt) { + ASSERT_EQ(0.0, csqrt(0)); +} + +extern "C" float _Complex csqrtf(float _Complex); +TEST(complex, csqrtf) { + ASSERT_EQ(0.0f, csqrt(0)); +} + +extern "C" long double _Complex csqrtl(long double _Complex); +TEST(complex, csqrtl) { + ASSERT_EQ(0.0, csqrtl(0)); +} + +extern "C" double _Complex ctan(double _Complex); +TEST(complex, ctan) { + ASSERT_EQ(0.0, ctan(0)); +} + +extern "C" float _Complex ctanf(float _Complex); +TEST(complex, ctanf) { + ASSERT_EQ(0.0, ctanf(0)); +} + +extern "C" double _Complex ctanh(double _Complex); +TEST(complex, ctanh) { + ASSERT_EQ(0.0, ctanh(0)); +} + +extern "C" float _Complex ctanhf(float _Complex); +TEST(complex, ctanhf) { + ASSERT_EQ(0.0, ctanhf(0)); +}