diff --git a/lib/libtcc1.c b/lib/libtcc1.c index 284965e..87f4e84 100644 --- a/lib/libtcc1.c +++ b/lib/libtcc1.c @@ -54,29 +54,29 @@ typedef long double XFtype; #define HIGH_WORD_COEFF (((UDWtype) 1) << WORD_SIZE) /* the following deal with IEEE single-precision numbers */ -#define EXCESS 126 -#define SIGNBIT 0x80000000 -#define HIDDEN (1 << 23) -#define SIGN(fp) ((fp) & SIGNBIT) -#define EXP(fp) (((fp) >> 23) & 0xFF) -#define MANT(fp) (((fp) & 0x7FFFFF) | HIDDEN) -#define PACK(s,e,m) ((s) | ((e) << 23) | (m)) +#define EXCESS 126 +#define SIGNBIT 0x80000000 +#define HIDDEN (1 << 23) +#define SIGN(fp) ((fp) & SIGNBIT) +#define EXP(fp) (((fp) >> 23) & 0xFF) +#define MANT(fp) (((fp) & 0x7FFFFF) | HIDDEN) +#define PACK(s,e,m) ((s) | ((e) << 23) | (m)) /* the following deal with IEEE double-precision numbers */ -#define EXCESSD 1022 -#define HIDDEND (1 << 20) -#define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF) -#define SIGND(fp) ((fp.l.upper) & SIGNBIT) -#define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \ - (fp.l.lower >> 22)) -#define HIDDEND_LL ((long long)1 << 52) -#define MANTD_LL(fp) ((fp.ll & (HIDDEND_LL-1)) | HIDDEND_LL) -#define PACKD_LL(s,e,m) (((long long)((s)+((e)<<20))<<32)|(m)) +#define EXCESSD 1022 +#define HIDDEND (1 << 20) +#define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF) +#define SIGND(fp) ((fp.l.upper) & SIGNBIT) +#define MANTD(fp) (((((fp.l.upper) & 0xFFFFF) | HIDDEND) << 10) | \ + (fp.l.lower >> 22)) +#define HIDDEND_LL ((long long)1 << 52) +#define MANTD_LL(fp) ((fp.ll & (HIDDEND_LL-1)) | HIDDEND_LL) +#define PACKD_LL(s,e,m) (((long long)((s)+((e)<<20))<<32)|(m)) /* the following deal with x86 long double-precision numbers */ -#define EXCESSLD 16382 -#define EXPLD(fp) (fp.l.upper & 0x7fff) -#define SIGNLD(fp) ((fp.l.upper) & 0x8000) +#define EXCESSLD 16382 +#define EXPLD(fp) (fp.l.upper & 0x7fff) +#define SIGNLD(fp) ((fp.l.upper) & 0x8000) /* only for x86 */ union ldouble_long { @@ -114,32 +114,32 @@ union float_long { /* XXX: use gcc/tcc intrinsic ? */ #if defined(TCC_TARGET_I386) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ - __asm__ ("subl %5,%1\n\tsbbl %3,%0" \ - : "=r" ((USItype) (sh)), \ - "=&r" ((USItype) (sl)) \ - : "0" ((USItype) (ah)), \ - "g" ((USItype) (bh)), \ - "1" ((USItype) (al)), \ - "g" ((USItype) (bl))) + __asm__ ("subl %5,%1\n\tsbbl %3,%0" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "0" ((USItype) (ah)), \ + "g" ((USItype) (bh)), \ + "1" ((USItype) (al)), \ + "g" ((USItype) (bl))) #define umul_ppmm(w1, w0, u, v) \ - __asm__ ("mull %3" \ - : "=a" ((USItype) (w0)), \ - "=d" ((USItype) (w1)) \ - : "%0" ((USItype) (u)), \ - "rm" ((USItype) (v))) + __asm__ ("mull %3" \ + : "=a" ((USItype) (w0)), \ + "=d" ((USItype) (w1)) \ + : "%0" ((USItype) (u)), \ + "rm" ((USItype) (v))) #define udiv_qrnnd(q, r, n1, n0, dv) \ - __asm__ ("divl %4" \ - : "=a" ((USItype) (q)), \ - "=d" ((USItype) (r)) \ - : "0" ((USItype) (n0)), \ - "1" ((USItype) (n1)), \ - "rm" ((USItype) (dv))) + __asm__ ("divl %4" \ + : "=a" ((USItype) (q)), \ + "=d" ((USItype) (r)) \ + : "0" ((USItype) (n0)), \ + "1" ((USItype) (n1)), \ + "rm" ((USItype) (dv))) #define count_leading_zeros(count, x) \ - do { \ - USItype __cbtmp; \ - __asm__ ("bsrl %1,%0" \ - : "=r" (__cbtmp) : "rm" ((USItype) (x))); \ - (count) = __cbtmp ^ 31; \ + do { \ + USItype __cbtmp; \ + __asm__ ("bsrl %1,%0" \ + : "=r" (__cbtmp) : "rm" ((USItype) (x))); \ + (count) = __cbtmp ^ 31; \ } while (0) #else #error unsupported CPU type @@ -168,33 +168,33 @@ static UDWtype __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp) if (d1 == 0) { if (d0 > n1) - { - /* 0q = nn / 0D */ + { + /* 0q = nn / 0D */ - udiv_qrnnd (q0, n0, n1, n0, d0); - q1 = 0; + udiv_qrnnd (q0, n0, n1, n0, d0); + q1 = 0; - /* Remainder in n0. */ - } + /* Remainder in n0. */ + } else - { - /* qq = NN / 0d */ + { + /* qq = NN / 0d */ - if (d0 == 0) - d0 = 1 / d0; /* Divide intentionally by zero. */ + if (d0 == 0) + d0 = 1 / d0; /* Divide intentionally by zero. */ - udiv_qrnnd (q1, n1, 0, n1, d0); - udiv_qrnnd (q0, n0, n1, n0, d0); + udiv_qrnnd (q1, n1, 0, n1, d0); + udiv_qrnnd (q0, n0, n1, n0, d0); - /* Remainder in n0. */ - } + /* Remainder in n0. */ + } if (rp != 0) - { - rr.s.low = n0; - rr.s.high = 0; - *rp = rr.ll; - } + { + rr.s.low = n0; + rr.s.high = 0; + *rp = rr.ll; + } } #else /* UDIV_NEEDS_NORMALIZATION */ @@ -202,160 +202,160 @@ static UDWtype __udivmoddi4 (UDWtype n, UDWtype d, UDWtype *rp) if (d1 == 0) { if (d0 > n1) - { - /* 0q = nn / 0D */ + { + /* 0q = nn / 0D */ - count_leading_zeros (bm, d0); + count_leading_zeros (bm, d0); - if (bm != 0) - { - /* Normalize, i.e. make the most significant bit of the - denominator set. */ + if (bm != 0) + { + /* Normalize, i.e. make the most significant bit of the + denominator set. */ - d0 = d0 << bm; - n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm)); - n0 = n0 << bm; - } + d0 = d0 << bm; + n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm)); + n0 = n0 << bm; + } - udiv_qrnnd (q0, n0, n1, n0, d0); - q1 = 0; + udiv_qrnnd (q0, n0, n1, n0, d0); + q1 = 0; - /* Remainder in n0 >> bm. */ - } + /* Remainder in n0 >> bm. */ + } else - { - /* qq = NN / 0d */ + { + /* qq = NN / 0d */ - if (d0 == 0) - d0 = 1 / d0; /* Divide intentionally by zero. */ + if (d0 == 0) + d0 = 1 / d0; /* Divide intentionally by zero. */ - count_leading_zeros (bm, d0); + count_leading_zeros (bm, d0); - if (bm == 0) - { - /* From (n1 >= d0) /\ (the most significant bit of d0 is set), - conclude (the most significant bit of n1 is set) /\ (the - leading quotient digit q1 = 1). + if (bm == 0) + { + /* From (n1 >= d0) /\ (the most significant bit of d0 is set), + conclude (the most significant bit of n1 is set) /\ (the + leading quotient digit q1 = 1). - This special case is necessary, not an optimization. - (Shifts counts of W_TYPE_SIZE are undefined.) */ + This special case is necessary, not an optimization. + (Shifts counts of W_TYPE_SIZE are undefined.) */ - n1 -= d0; - q1 = 1; - } - else - { - /* Normalize. */ + n1 -= d0; + q1 = 1; + } + else + { + /* Normalize. */ - b = W_TYPE_SIZE - bm; + b = W_TYPE_SIZE - bm; - d0 = d0 << bm; - n2 = n1 >> b; - n1 = (n1 << bm) | (n0 >> b); - n0 = n0 << bm; + d0 = d0 << bm; + n2 = n1 >> b; + n1 = (n1 << bm) | (n0 >> b); + n0 = n0 << bm; - udiv_qrnnd (q1, n1, n2, n1, d0); - } + udiv_qrnnd (q1, n1, n2, n1, d0); + } - /* n1 != d0... */ + /* n1 != d0... */ - udiv_qrnnd (q0, n0, n1, n0, d0); + udiv_qrnnd (q0, n0, n1, n0, d0); - /* Remainder in n0 >> bm. */ - } + /* Remainder in n0 >> bm. */ + } if (rp != 0) - { - rr.s.low = n0 >> bm; - rr.s.high = 0; - *rp = rr.ll; - } + { + rr.s.low = n0 >> bm; + rr.s.high = 0; + *rp = rr.ll; + } } #endif /* UDIV_NEEDS_NORMALIZATION */ else { if (d1 > n1) - { - /* 00 = nn / DD */ + { + /* 00 = nn / DD */ - q0 = 0; - q1 = 0; + q0 = 0; + q1 = 0; - /* Remainder in n1n0. */ - if (rp != 0) - { - rr.s.low = n0; - rr.s.high = n1; - *rp = rr.ll; - } - } + /* Remainder in n1n0. */ + if (rp != 0) + { + rr.s.low = n0; + rr.s.high = n1; + *rp = rr.ll; + } + } else - { - /* 0q = NN / dd */ + { + /* 0q = NN / dd */ - count_leading_zeros (bm, d1); - if (bm == 0) - { - /* From (n1 >= d1) /\ (the most significant bit of d1 is set), - conclude (the most significant bit of n1 is set) /\ (the - quotient digit q0 = 0 or 1). + count_leading_zeros (bm, d1); + if (bm == 0) + { + /* From (n1 >= d1) /\ (the most significant bit of d1 is set), + conclude (the most significant bit of n1 is set) /\ (the + quotient digit q0 = 0 or 1). - This special case is necessary, not an optimization. */ + This special case is necessary, not an optimization. */ - /* The condition on the next line takes advantage of that - n1 >= d1 (true due to program flow). */ - if (n1 > d1 || n0 >= d0) - { - q0 = 1; - sub_ddmmss (n1, n0, n1, n0, d1, d0); - } - else - q0 = 0; + /* The condition on the next line takes advantage of that + n1 >= d1 (true due to program flow). */ + if (n1 > d1 || n0 >= d0) + { + q0 = 1; + sub_ddmmss (n1, n0, n1, n0, d1, d0); + } + else + q0 = 0; - q1 = 0; + q1 = 0; - if (rp != 0) - { - rr.s.low = n0; - rr.s.high = n1; - *rp = rr.ll; - } - } - else - { - UWtype m1, m0; - /* Normalize. */ + if (rp != 0) + { + rr.s.low = n0; + rr.s.high = n1; + *rp = rr.ll; + } + } + else + { + UWtype m1, m0; + /* Normalize. */ - b = W_TYPE_SIZE - bm; + b = W_TYPE_SIZE - bm; - d1 = (d1 << bm) | (d0 >> b); - d0 = d0 << bm; - n2 = n1 >> b; - n1 = (n1 << bm) | (n0 >> b); - n0 = n0 << bm; + d1 = (d1 << bm) | (d0 >> b); + d0 = d0 << bm; + n2 = n1 >> b; + n1 = (n1 << bm) | (n0 >> b); + n0 = n0 << bm; - udiv_qrnnd (q0, n1, n2, n1, d1); - umul_ppmm (m1, m0, q0, d0); + udiv_qrnnd (q0, n1, n2, n1, d1); + umul_ppmm (m1, m0, q0, d0); - if (m1 > n1 || (m1 == n1 && m0 > n0)) - { - q0--; - sub_ddmmss (m1, m0, m1, m0, d1, d0); - } + if (m1 > n1 || (m1 == n1 && m0 > n0)) + { + q0--; + sub_ddmmss (m1, m0, m1, m0, d1, d0); + } - q1 = 0; + q1 = 0; - /* Remainder in (n1n0 - m1m0) >> bm. */ - if (rp != 0) - { - sub_ddmmss (n1, n0, n1, n0, m1, m0); - rr.s.low = (n1 << b) | (n0 >> bm); - rr.s.high = n1 >> bm; - *rp = rr.ll; - } - } - } + /* Remainder in (n1n0 - m1m0) >> bm. */ + if (rp != 0) + { + sub_ddmmss (n1, n0, n1, n0, m1, m0); + rr.s.low = (n1 << b) | (n0 >> bm); + rr.s.high = n1 >> bm; + *rp = rr.ll; + } + } + } } ww.s.low = q0; @@ -537,13 +537,13 @@ unsigned long long __fixunssfdi (float a1) fl1.f = a1; if (fl1.l == 0) - return (0); + return (0); exp = EXP (fl1.l) - EXCESS - 24; l = MANT(fl1.l); if (exp >= 41) - return (unsigned long long)-1; + return (unsigned long long)-1; else if (exp >= 0) return (unsigned long long)l << exp; else if (exp >= -23) @@ -561,14 +561,14 @@ unsigned long long __fixunsdfdi (double a1) dl1.d = a1; if (dl1.ll == 0) - return (0); + return (0); exp = EXPD (dl1) - EXCESSD - 53; l = MANTD_LL(dl1); if (exp >= 12) - return (unsigned long long)-1; + return (unsigned long long)-1; else if (exp >= 0) return l << exp; else if (exp >= -52) @@ -586,14 +586,14 @@ unsigned long long __fixunsxfdi (long double a1) dl1.ld = a1; if (dl1.l.lower == 0 && dl1.l.upper == 0) - return (0); + return (0); exp = EXPLD (dl1) - EXCESSLD - 64; l = dl1.l.lower; if (exp > 0) - return (unsigned long long)-1; + return (unsigned long long)-1; else if (exp >= -63) return l >> -exp; else diff --git a/tests/tcctest.c b/tests/tcctest.c index 54465a6..66a1cac 100644 --- a/tests/tcctest.c +++ b/tests/tcctest.c @@ -155,8 +155,8 @@ static int onetwothree = 123; #define B3 4 #endif -#define __INT64_C(c) c ## LL -#define INT64_MIN (-__INT64_C(9223372036854775807)-1) +#define __INT64_C(c) c ## LL +#define INT64_MIN (-__INT64_C(9223372036854775807)-1) int qq(int x) { @@ -1444,30 +1444,30 @@ struct complexinit { const static struct complexinit cix[] = { [0] = { - .a = 2000, - .b = (const struct complexinit0[]) { - { 2001, 2002 }, - { 2003, 2003 }, - {} - } + .a = 2000, + .b = (const struct complexinit0[]) { + { 2001, 2002 }, + { 2003, 2003 }, + {} + } } }; struct complexinit2 { - int a; - int b[]; + int a; + int b[]; }; struct complexinit2 cix20; struct complexinit2 cix21 = { - .a = 3000, - .b = { 3001, 3002, 3003 } + .a = 3000, + .b = { 3001, 3002, 3003 } }; struct complexinit2 cix22 = { - .a = 4000, - .b = { 4001, 4002, 4003, 4004, 4005, 4006 } + .a = 4000, + .b = { 4001, 4002, 4003, 4004, 4005, 4006 } }; void init_test(void) @@ -1564,10 +1564,10 @@ void init_test(void) printf("\n"); /* complex init check */ printf("cix: %d %d %d %d %d %d %d\n", - cix[0].a, - cix[0].b[0].a, cix[0].b[0].b, - cix[0].b[1].a, cix[0].b[1].b, - cix[0].b[2].a, cix[0].b[2].b); + cix[0].a, + cix[0].b[0].a, cix[0].b[0].b, + cix[0].b[1].a, cix[0].b[1].b, + cix[0].b[2].a, cix[0].b[2].b); printf("cix2: %d %d\n", cix21.b[2], cix22.b[5]); printf("sizeof cix20 %d, cix21 %d, sizeof cix22 %d\n", sizeof cix20, sizeof cix21, sizeof cix22); } @@ -2454,21 +2454,21 @@ static char * strncat1(char * dest,const char * src,size_t count) { int d0, d1, d2, d3; __asm__ __volatile__( - "repne\n\t" - "scasb\n\t" - "decl %1\n\t" - "movl %8,%3\n" - "1:\tdecl %3\n\t" - "js 2f\n\t" - "lodsb\n\t" - "stosb\n\t" - "testb %%al,%%al\n\t" - "jne 1b\n" - "2:\txorl %2,%2\n\t" - "stosb" - : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) - : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count) - : "memory"); + "repne\n\t" + "scasb\n\t" + "decl %1\n\t" + "movl %8,%3\n" + "1:\tdecl %3\n\t" + "js 2f\n\t" + "lodsb\n\t" + "stosb\n\t" + "testb %%al,%%al\n\t" + "jne 1b\n" + "2:\txorl %2,%2\n\t" + "stosb" + : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) + : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count) + : "memory"); return dest; } @@ -2476,20 +2476,20 @@ static char * strncat2(char * dest,const char * src,size_t count) { int d0, d1, d2, d3; __asm__ __volatile__( - "repne scasb\n\t" /* one-line repne prefix + string op */ - "decl %1\n\t" - "movl %8,%3\n" - "1:\tdecl %3\n\t" - "js 2f\n\t" - "lodsb\n\t" - "stosb\n\t" - "testb %%al,%%al\n\t" - "jne 1b\n" - "2:\txorl %2,%2\n\t" - "stosb" - : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) - : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count) - : "memory"); + "repne scasb\n\t" /* one-line repne prefix + string op */ + "decl %1\n\t" + "movl %8,%3\n" + "1:\tdecl %3\n\t" + "js 2f\n\t" + "lodsb\n\t" + "stosb\n\t" + "testb %%al,%%al\n\t" + "jne 1b\n" + "2:\txorl %2,%2\n\t" + "stosb" + : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3) + : "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count) + : "memory"); return dest; } @@ -2497,17 +2497,17 @@ static inline void * memcpy1(void * to, const void * from, size_t n) { int d0, d1, d2; __asm__ __volatile__( - "rep ; movsl\n\t" - "testb $2,%b4\n\t" - "je 1f\n\t" - "movsw\n" - "1:\ttestb $1,%b4\n\t" - "je 2f\n\t" - "movsb\n" - "2:" - : "=&c" (d0), "=&D" (d1), "=&S" (d2) - :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) - : "memory"); + "rep ; movsl\n\t" + "testb $2,%b4\n\t" + "je 1f\n\t" + "movsw\n" + "1:\ttestb $1,%b4\n\t" + "je 2f\n\t" + "movsb\n" + "2:" + : "=&c" (d0), "=&D" (d1), "=&S" (d2) + :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) + : "memory"); return (to); } @@ -2515,38 +2515,38 @@ static inline void * memcpy2(void * to, const void * from, size_t n) { int d0, d1, d2; __asm__ __volatile__( - "rep movsl\n\t" /* one-line rep prefix + string op */ - "testb $2,%b4\n\t" - "je 1f\n\t" - "movsw\n" - "1:\ttestb $1,%b4\n\t" - "je 2f\n\t" - "movsb\n" - "2:" - : "=&c" (d0), "=&D" (d1), "=&S" (d2) - :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) - : "memory"); + "rep movsl\n\t" /* one-line rep prefix + string op */ + "testb $2,%b4\n\t" + "je 1f\n\t" + "movsw\n" + "1:\ttestb $1,%b4\n\t" + "je 2f\n\t" + "movsb\n" + "2:" + : "=&c" (d0), "=&D" (d1), "=&S" (d2) + :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) + : "memory"); return (to); } static __inline__ void sigaddset1(unsigned int *set, int _sig) { - __asm__("btsl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); + __asm__("btsl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); } static __inline__ void sigdelset1(unsigned int *set, int _sig) { - asm("btrl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); + asm("btrl %1,%0" : "=m"(*set) : "Ir"(_sig - 1) : "cc"); } static __inline__ __const__ unsigned int swab32(unsigned int x) { - __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ - "rorl $16,%0\n\t" /* swap words */ - "xchgb %b0,%h0" /* swap higher bytes */ - :"=q" (x) - : "0" (x)); - return x; + __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ + "rorl $16,%0\n\t" /* swap words */ + "xchgb %b0,%h0" /* swap higher bytes */ + :"=q" (x) + : "0" (x)); + return x; } static __inline__ unsigned long long mul64(unsigned int a, unsigned int b) @@ -2675,23 +2675,23 @@ int weak_toolate() { return 0; } void __attribute__((weak)) weak_test(void) { - printf("weak_f1=%d\n", weak_f1 ? weak_f1() : 123); - printf("weak_f2=%d\n", weak_f2 ? weak_f2() : 123); - printf("weak_f3=%d\n", weak_f3 ? weak_f3() : 123); - printf("weak_v1=%d\n",&weak_v1 ? weak_v1 : 123); - printf("weak_v2=%d\n",&weak_v2 ? weak_v2 : 123); - printf("weak_v3=%d\n",&weak_v3 ? weak_v3 : 123); + printf("weak_f1=%d\n", weak_f1 ? weak_f1() : 123); + printf("weak_f2=%d\n", weak_f2 ? weak_f2() : 123); + printf("weak_f3=%d\n", weak_f3 ? weak_f3() : 123); + printf("weak_v1=%d\n",&weak_v1 ? weak_v1 : 123); + printf("weak_v2=%d\n",&weak_v2 ? weak_v2 : 123); + printf("weak_v3=%d\n",&weak_v3 ? weak_v3 : 123); - printf("weak_fpa=%d\n",&weak_fpa ? weak_fpa() : 123); - printf("weak_fpb=%d\n",&weak_fpb ? weak_fpb() : 123); - printf("weak_fpc=%d\n",&weak_fpc ? weak_fpc() : 123); - - printf("weak_asm_f1=%d\n", weak_asm_f1 != NULL); - printf("weak_asm_f2=%d\n", weak_asm_f2 != NULL); - printf("weak_asm_f3=%d\n", weak_asm_f3 != NULL); - printf("weak_asm_v1=%d\n",&weak_asm_v1 != NULL); - printf("weak_asm_v2=%d\n",&weak_asm_v2 != NULL); - printf("weak_asm_v3=%d\n",&weak_asm_v3 != NULL); + printf("weak_fpa=%d\n",&weak_fpa ? weak_fpa() : 123); + printf("weak_fpb=%d\n",&weak_fpb ? weak_fpb() : 123); + printf("weak_fpc=%d\n",&weak_fpc ? weak_fpc() : 123); + + printf("weak_asm_f1=%d\n", weak_asm_f1 != NULL); + printf("weak_asm_f2=%d\n", weak_asm_f2 != NULL); + printf("weak_asm_f3=%d\n", weak_asm_f3 != NULL); + printf("weak_asm_v1=%d\n",&weak_asm_v1 != NULL); + printf("weak_asm_v2=%d\n",&weak_asm_v2 != NULL); + printf("weak_asm_v3=%d\n",&weak_asm_v3 != NULL); } int __attribute__((weak)) weak_f2() { return 222; }