21 #if defined(HAVE_CONFIG_H) && !defined(HAVE_LARGE_FILE_SUPPORT)
22 extern "C" int fcntl_old(
int fd,
int cmd, ...);
24 __asm(
".symver fcntl_old,fcntl@GLIBC_2.0");
25 #elif defined(__amd64__)
26 __asm(
".symver fcntl_old,fcntl@GLIBC_2.2.5");
27 #elif defined(__arm__)
28 __asm(
".symver fcntl_old,fcntl@GLIBC_2.4");
29 #elif defined(__aarch64__)
30 __asm(
".symver fcntl_old,fcntl@GLIBC_2.17");
33 extern "C" int __wrap_fcntl(
int fd,
int cmd, ...) {
37 ret = fcntl_old(fd, cmd, va_arg(vargs,
void *));
41 extern "C" int __wrap_fcntl64(
int fd,
int cmd, ...) {
45 ret = fcntl_old(fd, cmd, va_arg(vargs,
void *));
57 __asm(
".symver exp_old,exp@GLIBC_2.1");
58 #elif defined(__amd64__)
59 __asm(
".symver exp_old,exp@GLIBC_2.2.5");
60 #elif defined(__arm__)
61 __asm(
".symver exp_old,exp@GLIBC_2.4");
62 #elif defined(__aarch64__)
63 __asm(
".symver exp_old,exp@GLIBC_2.17");
71 __asm(
".symver log_old,log@GLIBC_2.1");
72 #elif defined(__amd64__)
73 __asm(
".symver log_old,log@GLIBC_2.2.5");
74 #elif defined(__arm__)
75 __asm(
".symver log_old,log@GLIBC_2.4");
76 #elif defined(__aarch64__)
77 __asm(
".symver log_old,log@GLIBC_2.17");
85 __asm(
".symver log2_old,log2@GLIBC_2.1");
86 #elif defined(__amd64__)
87 __asm(
".symver log2_old,log2@GLIBC_2.2.5");
88 #elif defined(__arm__)
89 __asm(
".symver log2_old,log2@GLIBC_2.4");
90 #elif defined(__aarch64__)
91 __asm(
".symver log2_old,log2@GLIBC_2.17");
99 __asm(
".symver pow_old,pow@GLIBC_2.1");
100 #elif defined(__amd64__)
101 __asm(
".symver pow_old,pow@GLIBC_2.2.5");
102 #elif defined(__arm__)
103 __asm(
".symver pow_old,pow@GLIBC_2.4");
104 #elif defined(__aarch64__)
105 __asm(
".symver pow_old,pow@GLIBC_2.17");
float __wrap_exp(float x)
float __wrap_log2(float x)
float __wrap_log(float x)
float __wrap_pow(float x)
float exp_old(float x)
Starting with GLIBC_2.29 there is an optimized version of the math functions.