patches/glibc/2.2.2/glibc-2.2.2-allow-gcc3-longlong.patch
author "Yann E. MORIN" <yann.morin.1998@anciens.enib.fr>
Sat Feb 24 11:00:05 2007 +0000 (2007-02-24)
changeset 1 eeea35fbf182
permissions -rw-r--r--
Add the full crosstool-NG sources to the new repository of its own.
You might just say: 'Yeah! crosstool-NG's got its own repo!".
Unfortunately, that's because the previous repo got damaged beyond repair and I had no backup.
That means I'm putting backups in place in the afternoon.
That also means we've lost history... :-(
     1 Fixes error
     2 ./longlong.h:423: error: parse error before '%' token
     3 ./longlong.h:423: error: missing terminating " character
     4 ./longlong.h:432: error: missing terminating " character
     5 See also patches/glibc-2.1.3/glibc-2.1.3-allow-gcc3-longlong.patch
     6 
     7 ===================================================================
     8 --- glibc-2.2.2/stdlib/longlong.h.old	2000-02-11 15:48:58.000000000 -0800
     9 +++ glibc-2.2.2/stdlib/longlong.h	2005-04-11 15:36:10.000000000 -0700
    10 @@ -108,8 +108,8 @@
    11  
    12  #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
    13  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    14 -  __asm__ ("add %1,%4,%5
    15 -	addc %0,%2,%3"							\
    16 +  __asm__ ("add %1,%4,%5\n"           \
    17 +	"addc %0,%2,%3"							\
    18  	   : "=r" ((USItype) (sh)),					\
    19  	    "=&r" ((USItype) (sl))					\
    20  	   : "%r" ((USItype) (ah)),					\
    21 @@ -117,8 +117,8 @@
    22  	     "%r" ((USItype) (al)),					\
    23  	     "rI" ((USItype) (bl)))
    24  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    25 -  __asm__ ("sub %1,%4,%5
    26 -	subc %0,%2,%3"							\
    27 +  __asm__ ("sub %1,%4,%5\n"           \
    28 +	"subc %0,%2,%3"							\
    29  	   : "=r" ((USItype) (sh)),					\
    30  	     "=&r" ((USItype) (sl))					\
    31  	   : "r" ((USItype) (ah)),					\
    32 @@ -175,8 +175,8 @@
    33  
    34  #if defined (__arc__) && W_TYPE_SIZE == 32
    35  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    36 -  __asm__ ("add.f	%1, %4, %5
    37 -	adc	%0, %2, %3"						\
    38 +  __asm__ ("add.f	%1, %4, %5\n"       \
    39 +	"adc	%0, %2, %3"						\
    40  	   : "=r" ((USItype) (sh)),					\
    41  	     "=&r" ((USItype) (sl))					\
    42  	   : "%r" ((USItype) (ah)),					\
    43 @@ -184,8 +184,8 @@
    44  	     "%r" ((USItype) (al)),					\
    45  	     "rIJ" ((USItype) (bl)))
    46  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    47 -  __asm__ ("sub.f	%1, %4, %5
    48 -	sbc	%0, %2, %3"						\
    49 +  __asm__ ("sub.f	%1, %4, %5\n"       \
    50 +	"sbc	%0, %2, %3"						\
    51  	   : "=r" ((USItype) (sh)),					\
    52  	     "=&r" ((USItype) (sl))					\
    53  	   : "r" ((USItype) (ah)),					\
    54 @@ -206,8 +206,8 @@
    55  
    56  #if defined (__arm__) && W_TYPE_SIZE == 32
    57  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    58 -  __asm__ ("adds	%1, %4, %5
    59 -	adc	%0, %2, %3"						\
    60 +  __asm__ ("adds	%1, %4, %5\n"       \
    61 +	"adc	%0, %2, %3"						\
    62  	   : "=r" ((USItype) (sh)),					\
    63  	     "=&r" ((USItype) (sl))					\
    64  	   : "%r" ((USItype) (ah)),					\
    65 @@ -215,8 +215,8 @@
    66  	     "%r" ((USItype) (al)),					\
    67  	     "rI" ((USItype) (bl)))
    68  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    69 -  __asm__ ("subs	%1, %4, %5
    70 -	sbc	%0, %2, %3"						\
    71 +  __asm__ ("subs	%1, %4, %5\n"       \
    72 +	"sbc	%0, %2, %3"						\
    73  	   : "=r" ((USItype) (sh)),					\
    74  	     "=&r" ((USItype) (sl))					\
    75  	   : "r" ((USItype) (ah)),					\
    76 @@ -225,19 +225,19 @@
    77  	     "rI" ((USItype) (bl)))
    78  #define umul_ppmm(xh, xl, a, b) \
    79  {register USItype __t0, __t1, __t2;					\
    80 -  __asm__ ("%@ Inlined umul_ppmm
    81 -	mov	%2, %5, lsr #16
    82 -	mov	%0, %6, lsr #16
    83 -	bic	%3, %5, %2, lsl #16
    84 -	bic	%4, %6, %0, lsl #16
    85 -	mul	%1, %3, %4
    86 -	mul	%4, %2, %4
    87 -	mul	%3, %0, %3
    88 -	mul	%0, %2, %0
    89 -	adds	%3, %4, %3
    90 -	addcs	%0, %0, #65536
    91 -	adds	%1, %1, %3, lsl #16
    92 -	adc	%0, %0, %3, lsr #16"					\
    93 +  __asm__ ("%@ Inlined umul_ppmm\n"   \
    94 +	"mov	%2, %5, lsr #16\n"            \
    95 +	"mov	%0, %6, lsr #16\n"            \
    96 +	"bic	%3, %5, %2, lsl #16\n"        \
    97 +	"bic	%4, %6, %0, lsl #16\n"        \
    98 +	"mul	%1, %3, %4\n"                 \
    99 +	"mul	%4, %2, %4\n"                 \
   100 +	"mul	%3, %0, %3\n"                 \
   101 +	"mul	%0, %2, %0\n"                 \
   102 +	"adds	%3, %4, %3\n"                 \
   103 +	"addcs	%0, %0, #65536\n"           \
   104 +	"adds	%1, %1, %3, lsl #16\n"        \
   105 +	"adc	%0, %0, %3, lsr #16"					\
   106  	   : "=&r" ((USItype) (xh)),					\
   107  	     "=r" ((USItype) (xl)),					\
   108  	     "=&r" (__t0), "=&r" (__t1), "=r" (__t2)			\
   109 @@ -277,8 +277,8 @@
   110  
   111  #if defined (__gmicro__) && W_TYPE_SIZE == 32
   112  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   113 -  __asm__ ("add.w %5,%1
   114 -	addx %3,%0"							\
   115 +  __asm__ ("add.w %5,%1\n"            \
   116 +	"addx %3,%0"							\
   117  	   : "=g" ((USItype) (sh)),					\
   118  	     "=&g" ((USItype) (sl))					\
   119  	   : "%0" ((USItype) (ah)),					\
   120 @@ -286,8 +286,8 @@
   121  	     "%1" ((USItype) (al)),					\
   122  	     "g" ((USItype) (bl)))
   123  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   124 -  __asm__ ("sub.w %5,%1
   125 -	subx %3,%0"							\
   126 +  __asm__ ("sub.w %5,%1\n"            \
   127 +	"subx %3,%0"							\
   128  	   : "=g" ((USItype) (sh)),					\
   129  	     "=&g" ((USItype) (sl))					\
   130  	   : "0" ((USItype) (ah)),					\
   131 @@ -316,8 +316,8 @@
   132  
   133  #if defined (__hppa) && W_TYPE_SIZE == 32
   134  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   135 -  __asm__ ("add %4,%5,%1
   136 -	addc %2,%3,%0"							\
   137 +  __asm__ ("add %4,%5,%1\n"           \
   138 +	"addc %2,%3,%0"							\
   139  	   : "=r" ((USItype) (sh)),					\
   140  	     "=&r" ((USItype) (sl))					\
   141  	   : "%rM" ((USItype) (ah)),					\
   142 @@ -325,8 +325,8 @@
   143  	     "%rM" ((USItype) (al)),					\
   144  	     "rM" ((USItype) (bl)))
   145  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   146 -  __asm__ ("sub %4,%5,%1
   147 -	subb %2,%3,%0"							\
   148 +  __asm__ ("sub %4,%5,%1\n"           \
   149 +	"subb %2,%3,%0"							\
   150  	   : "=r" ((USItype) (sh)),					\
   151  	     "=&r" ((USItype) (sl))					\
   152  	   : "rM" ((USItype) (ah)),					\
   153 @@ -357,22 +357,22 @@
   154    do {									\
   155      USItype __tmp;							\
   156      __asm__ (								\
   157 -       "ldi		1,%0
   158 -	extru,=		%1,15,16,%%r0		; Bits 31..16 zero?
   159 -	extru,tr	%1,15,16,%1		; No.  Shift down, skip add.
   160 -	ldo		16(%0),%0		; Yes.  Perform add.
   161 -	extru,=		%1,23,8,%%r0		; Bits 15..8 zero?
   162 -	extru,tr	%1,23,8,%1		; No.  Shift down, skip add.
   163 -	ldo		8(%0),%0		; Yes.  Perform add.
   164 -	extru,=		%1,27,4,%%r0		; Bits 7..4 zero?
   165 -	extru,tr	%1,27,4,%1		; No.  Shift down, skip add.
   166 -	ldo		4(%0),%0		; Yes.  Perform add.
   167 -	extru,=		%1,29,2,%%r0		; Bits 3..2 zero?
   168 -	extru,tr	%1,29,2,%1		; No.  Shift down, skip add.
   169 -	ldo		2(%0),%0		; Yes.  Perform add.
   170 -	extru		%1,30,1,%1		; Extract bit 1.
   171 -	sub		%0,%1,%0		; Subtract it.
   172 -	" : "=r" (count), "=r" (__tmp) : "1" (x));			\
   173 +       "ldi		1,%0\n"                                        \
   174 +	"extru,=		%1,15,16,%%r0		; Bits 31..16 zero?\n"         \
   175 +	"extru,tr	%1,15,16,%1		; No.  Shift down, skip add.\n"    \
   176 +	"ldo		16(%0),%0		; Yes.  Perform add.\n"                \
   177 +	"extru,=		%1,23,8,%%r0		; Bits 15..8 zero?\n"          \
   178 +	"extru,tr	%1,23,8,%1		; No.  Shift down, skip add.\n"    \
   179 +	"ldo		8(%0),%0		; Yes.  Perform add.\n"                \
   180 +	"extru,=		%1,27,4,%%r0		; Bits 7..4 zero?\n"           \
   181 +	"extru,tr	%1,27,4,%1		; No.  Shift down, skip add.\n"    \
   182 +	"ldo		4(%0),%0		; Yes.  Perform add.\n"                \
   183 +	"extru,=		%1,29,2,%%r0		; Bits 3..2 zero?\n"           \
   184 +	"extru,tr	%1,29,2,%1		; No.  Shift down, skip add.\n"    \
   185 +	"ldo		2(%0),%0		; Yes.  Perform add.\n"                \
   186 +	"extru		%1,30,1,%1		; Extract bit 1.\n"                \
   187 +	"sub		%0,%1,%0		; Subtract it.\n"                      \
   188 +	: "=r" (count), "=r" (__tmp) : "1" (x));			\
   189    } while (0)
   190  #endif
   191  
   192 @@ -419,8 +419,8 @@
   193  
   194  #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
   195  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   196 -  __asm__ ("addl %5,%1
   197 -	adcl %3,%0"							\
   198 +  __asm__ ("addl %5,%1\n"             \
   199 +	"adcl %3,%0"							\
   200  	   : "=r" ((USItype) (sh)),					\
   201  	     "=&r" ((USItype) (sl))					\
   202  	   : "%0" ((USItype) (ah)),					\
   203 @@ -428,8 +428,8 @@
   204  	     "%1" ((USItype) (al)),					\
   205  	     "g" ((USItype) (bl)))
   206  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   207 -  __asm__ ("subl %5,%1
   208 -	sbbl %3,%0"							\
   209 +  __asm__ ("subl %5,%1\n"             \
   210 +	"sbbl %3,%0"							\
   211  	   : "=r" ((USItype) (sh)),					\
   212  	     "=&r" ((USItype) (sl))					\
   213  	   : "0" ((USItype) (ah)),					\
   214 @@ -525,9 +525,9 @@
   215  #if defined (__M32R__) && W_TYPE_SIZE == 32
   216  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   217    /* The cmp clears the condition bit.  */ \
   218 -  __asm__ ("cmp %0,%0
   219 -	addx %%5,%1
   220 -	addx %%3,%0"							\
   221 +  __asm__ ("cmp %0,%0\n"              \
   222 +	"addx %%5,%1\n"                     \
   223 +	"addx %%3,%0"							\
   224  	   : "=r" ((USItype) (sh)),					\
   225  	     "=&r" ((USItype) (sl))					\
   226  	   : "%0" ((USItype) (ah)),					\
   227 @@ -537,9 +537,9 @@
   228  	   : "cbit")
   229  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   230    /* The cmp clears the condition bit.  */ \
   231 -  __asm__ ("cmp %0,%0
   232 -	subx %5,%1
   233 -	subx %3,%0"							\
   234 +  __asm__ ("cmp %0,%0\n"              \
   235 +	"subx %5,%1\n"                      \
   236 +	"subx %3,%0"							\
   237  	   : "=r" ((USItype) (sh)),					\
   238  	     "=&r" ((USItype) (sl))					\
   239  	   : "0" ((USItype) (ah)),					\
   240 @@ -551,8 +551,8 @@
   241  
   242  #if defined (__mc68000__) && W_TYPE_SIZE == 32
   243  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   244 -  __asm__ ("add%.l %5,%1
   245 -	addx%.l %3,%0"							\
   246 +  __asm__ ("add%.l %5,%1\n"           \
   247 +	"addx%.l %3,%0"							\
   248  	   : "=d" ((USItype) (sh)),					\
   249  	     "=&d" ((USItype) (sl))					\
   250  	   : "%0" ((USItype) (ah)),					\
   251 @@ -560,8 +560,8 @@
   252  	     "%1" ((USItype) (al)),					\
   253  	     "g" ((USItype) (bl)))
   254  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   255 -  __asm__ ("sub%.l %5,%1
   256 -	subx%.l %3,%0"							\
   257 +  __asm__ ("sub%.l %5,%1\n"           \
   258 +	"subx%.l %3,%0"							\
   259  	   : "=d" ((USItype) (sh)),					\
   260  	     "=&d" ((USItype) (sl))					\
   261  	   : "0" ((USItype) (ah)),					\
   262 @@ -602,32 +602,32 @@
   263  #if !defined(__mcf5200__)
   264  /* %/ inserts REGISTER_PREFIX, %# inserts IMMEDIATE_PREFIX.  */
   265  #define umul_ppmm(xh, xl, a, b) \
   266 -  __asm__ ("| Inlined umul_ppmm
   267 -	move%.l	%2,%/d0
   268 -	move%.l	%3,%/d1
   269 -	move%.l	%/d0,%/d2
   270 -	swap	%/d0
   271 -	move%.l	%/d1,%/d3
   272 -	swap	%/d1
   273 -	move%.w	%/d2,%/d4
   274 -	mulu	%/d3,%/d4
   275 -	mulu	%/d1,%/d2
   276 -	mulu	%/d0,%/d3
   277 -	mulu	%/d0,%/d1
   278 -	move%.l	%/d4,%/d0
   279 -	eor%.w	%/d0,%/d0
   280 -	swap	%/d0
   281 -	add%.l	%/d0,%/d2
   282 -	add%.l	%/d3,%/d2
   283 -	jcc	1f
   284 -	add%.l	%#65536,%/d1
   285 -1:	swap	%/d2
   286 -	moveq	%#0,%/d0
   287 -	move%.w	%/d2,%/d0
   288 -	move%.w	%/d4,%/d2
   289 -	move%.l	%/d2,%1
   290 -	add%.l	%/d1,%/d0
   291 -	move%.l	%/d0,%0"						\
   292 +  __asm__ ("| Inlined umul_ppmm\n"    \
   293 +	"move%.l	%2,%/d0\n"                \
   294 +	"move%.l	%3,%/d1\n"                \
   295 +	"move%.l	%/d0,%/d2\n"              \
   296 +	"swap	%/d0\n"                       \
   297 +	"move%.l	%/d1,%/d3\n"              \
   298 +	"swap	%/d1\n"                       \
   299 +	"move%.w	%/d2,%/d4\n"              \
   300 +	"mulu	%/d3,%/d4\n"                  \
   301 +	"mulu	%/d1,%/d2\n"                  \
   302 +	"mulu	%/d0,%/d3\n"                  \
   303 +	"mulu	%/d0,%/d1\n"                  \
   304 +	"move%.l	%/d4,%/d0\n"              \
   305 +	"eor%.w	%/d0,%/d0\n"                \
   306 +	"swap	%/d0\n"                       \
   307 +	"add%.l	%/d0,%/d2\n"                \
   308 +	"add%.l	%/d3,%/d2\n"                \
   309 +	"jcc	1f\n"                         \
   310 +	"add%.l	%#65536,%/d1\n"             \
   311 +"1:	swap	%/d2\n"                     \
   312 +	"moveq	%#0,%/d0\n"                 \
   313 +	"move%.w	%/d2,%/d0\n"              \
   314 +	"move%.w	%/d4,%/d2\n"              \
   315 +	"move%.l	%/d2,%1\n"                \
   316 +	"add%.l	%/d1,%/d0\n"                \
   317 +	"move%.l	%/d0,%0"						\
   318  	   : "=g" ((USItype) (xh)),					\
   319  	     "=g" ((USItype) (xl))					\
   320  	   : "g" ((USItype) (a)),					\
   321 @@ -653,8 +653,8 @@
   322  
   323  #if defined (__m88000__) && W_TYPE_SIZE == 32
   324  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   325 -  __asm__ ("addu.co %1,%r4,%r5
   326 -	addu.ci %0,%r2,%r3"						\
   327 +  __asm__ ("addu.co %1,%r4,%r5\n"     \
   328 +	"addu.ci %0,%r2,%r3"						\
   329  	   : "=r" ((USItype) (sh)),					\
   330  	     "=&r" ((USItype) (sl))					\
   331  	   : "%rJ" ((USItype) (ah)),					\
   332 @@ -662,8 +662,8 @@
   333  	     "%rJ" ((USItype) (al)),					\
   334  	     "rJ" ((USItype) (bl)))
   335  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   336 -  __asm__ ("subu.co %1,%r4,%r5
   337 -	subu.ci %0,%r2,%r3"						\
   338 +  __asm__ ("subu.co %1,%r4,%r5\n"     \
   339 +	"subu.ci %0,%r2,%r3"						\
   340  	   : "=r" ((USItype) (sh)),					\
   341  	     "=&r" ((USItype) (sl))					\
   342  	   : "rJ" ((USItype) (ah)),					\
   343 @@ -880,8 +880,8 @@
   344  
   345  #if defined (__pyr__) && W_TYPE_SIZE == 32
   346  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   347 -  __asm__ ("addw	%5,%1
   348 -	addwc	%3,%0"							\
   349 +  __asm__ ("addw	%5,%1\n"            \
   350 +	"addwc	%3,%0"							\
   351  	   : "=r" ((USItype) (sh)),					\
   352  	     "=&r" ((USItype) (sl))					\
   353  	   : "%0" ((USItype) (ah)),					\
   354 @@ -889,8 +889,8 @@
   355  	     "%1" ((USItype) (al)),					\
   356  	     "g" ((USItype) (bl)))
   357  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   358 -  __asm__ ("subw	%5,%1
   359 -	subwb	%3,%0"							\
   360 +  __asm__ ("subw	%5,%1\n"            \
   361 +	"subwb	%3,%0"							\
   362  	   : "=r" ((USItype) (sh)),					\
   363  	     "=&r" ((USItype) (sl))					\
   364  	   : "0" ((USItype) (ah)),					\
   365 @@ -902,8 +902,8 @@
   366    ({union {UDItype __ll;						\
   367  	   struct {USItype __h, __l;} __i;				\
   368  	  } __xx;							\
   369 -  __asm__ ("movw %1,%R0
   370 -	uemul %2,%0"							\
   371 +  __asm__ ("movw %1,%R0\n"          \
   372 +	"uemul %2,%0"							\
   373  	   : "=&r" (__xx.__ll)						\
   374  	   : "g" ((USItype) (u)),					\
   375  	     "g" ((USItype) (v)));					\
   376 @@ -912,8 +912,8 @@
   377  
   378  #if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
   379  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   380 -  __asm__ ("a %1,%5
   381 -	ae %0,%3"							\
   382 +  __asm__ ("a %1,%5\n"                \
   383 +	"ae %0,%3"							\
   384  	   : "=r" ((USItype) (sh)),					\
   385  	     "=&r" ((USItype) (sl))					\
   386  	   : "%0" ((USItype) (ah)),					\
   387 @@ -921,8 +921,8 @@
   388  	     "%1" ((USItype) (al)),					\
   389  	     "r" ((USItype) (bl)))
   390  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   391 -  __asm__ ("s %1,%5
   392 -	se %0,%3"							\
   393 +  __asm__ ("s %1,%5\n"                \
   394 +	"se %0,%3"							\
   395  	   : "=r" ((USItype) (sh)),					\
   396  	     "=&r" ((USItype) (sl))					\
   397  	   : "0" ((USItype) (ah)),					\
   398 @@ -933,26 +933,26 @@
   399    do {									\
   400      USItype __m0 = (m0), __m1 = (m1);					\
   401      __asm__ (								\
   402 -       "s	r2,r2
   403 -	mts	r10,%2
   404 -	m	r2,%3
   405 -	m	r2,%3
   406 -	m	r2,%3
   407 -	m	r2,%3
   408 -	m	r2,%3
   409 -	m	r2,%3
   410 -	m	r2,%3
   411 -	m	r2,%3
   412 -	m	r2,%3
   413 -	m	r2,%3
   414 -	m	r2,%3
   415 -	m	r2,%3
   416 -	m	r2,%3
   417 -	m	r2,%3
   418 -	m	r2,%3
   419 -	m	r2,%3
   420 -	cas	%0,r2,r0
   421 -	mfs	r10,%1"							\
   422 +       "s	r2,r2\n"
   423 +	"mts	r10,%2\n"                       \
   424 +	"m	r2,%3\n"                          \
   425 +	"m	r2,%3\n"                          \
   426 +	"m	r2,%3\n"                          \
   427 +	"m	r2,%3\n"                          \
   428 +	"m	r2,%3\n"                          \
   429 +	"m	r2,%3\n"                          \
   430 +	"m	r2,%3\n"                          \
   431 +	"m	r2,%3\n"                          \
   432 +	"m	r2,%3\n"                          \
   433 +	"m	r2,%3\n"                          \
   434 +	"m	r2,%3\n"                          \
   435 +	"m	r2,%3\n"                          \
   436 +	"m	r2,%3\n"                          \
   437 +	"m	r2,%3\n"                          \
   438 +	"m	r2,%3\n"                          \
   439 +	"m	r2,%3\n"                          \
   440 +	"cas	%0,r2,r0\n"                     \
   441 +	"mfs	r10,%1"							\
   442  	     : "=r" ((USItype) (ph)),					\
   443  	       "=r" ((USItype) (pl))					\
   444  	     : "%r" (__m0),						\
   445 @@ -982,9 +982,9 @@
   446  #if defined (__sh2__) && W_TYPE_SIZE == 32
   447  #define umul_ppmm(w1, w0, u, v) \
   448    __asm__ (								\
   449 -       "dmulu.l	%2,%3
   450 -	sts	macl,%1
   451 -	sts	mach,%0"						\
   452 +       "dmulu.l	%2,%3\n"            \
   453 +	"sts	macl,%1\n"                  \
   454 +	"sts	mach,%0"						\
   455  	   : "=r" ((USItype)(w1)),					\
   456  	     "=r" ((USItype)(w0))					\
   457  	   : "r" ((USItype)(u)),					\
   458 @@ -996,8 +996,8 @@
   459  #if defined (__sparc__) && !defined(__arch64__) \
   460      && !defined(__sparcv9) && W_TYPE_SIZE == 32
   461  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   462 -  __asm__ ("addcc %r4,%5,%1
   463 -	addx %r2,%3,%0"							\
   464 +  __asm__ ("addcc %r4,%5,%1\n"        \
   465 +	"addx %r2,%3,%0"							\
   466  	   : "=r" ((USItype) (sh)),					\
   467  	     "=&r" ((USItype) (sl))					\
   468  	   : "%rJ" ((USItype) (ah)),					\
   469 @@ -1006,8 +1006,8 @@
   470  	     "rI" ((USItype) (bl))					\
   471  	   __CLOBBER_CC)
   472  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   473 -  __asm__ ("subcc %r4,%5,%1
   474 -	subx %r2,%3,%0"							\
   475 +  __asm__ ("subcc %r4,%5,%1\n"        \
   476 +	"subx %r2,%3,%0"							\
   477  	   : "=r" ((USItype) (sh)),					\
   478  	     "=&r" ((USItype) (sl))					\
   479  	   : "rJ" ((USItype) (ah)),					\
   480 @@ -1040,45 +1040,45 @@
   481  	   : "r" ((USItype) (u)),					\
   482  	     "r" ((USItype) (v)))
   483  #define udiv_qrnnd(q, r, n1, n0, d) \
   484 -  __asm__ ("! Inlined udiv_qrnnd
   485 -	wr	%%g0,%2,%%y	! Not a delayed write for sparclite
   486 -	tst	%%g0
   487 -	divscc	%3,%4,%%g1
   488 -	divscc	%%g1,%4,%%g1
   489 -	divscc	%%g1,%4,%%g1
   490 -	divscc	%%g1,%4,%%g1
   491 -	divscc	%%g1,%4,%%g1
   492 -	divscc	%%g1,%4,%%g1
   493 -	divscc	%%g1,%4,%%g1
   494 -	divscc	%%g1,%4,%%g1
   495 -	divscc	%%g1,%4,%%g1
   496 -	divscc	%%g1,%4,%%g1
   497 -	divscc	%%g1,%4,%%g1
   498 -	divscc	%%g1,%4,%%g1
   499 -	divscc	%%g1,%4,%%g1
   500 -	divscc	%%g1,%4,%%g1
   501 -	divscc	%%g1,%4,%%g1
   502 -	divscc	%%g1,%4,%%g1
   503 -	divscc	%%g1,%4,%%g1
   504 -	divscc	%%g1,%4,%%g1
   505 -	divscc	%%g1,%4,%%g1
   506 -	divscc	%%g1,%4,%%g1
   507 -	divscc	%%g1,%4,%%g1
   508 -	divscc	%%g1,%4,%%g1
   509 -	divscc	%%g1,%4,%%g1
   510 -	divscc	%%g1,%4,%%g1
   511 -	divscc	%%g1,%4,%%g1
   512 -	divscc	%%g1,%4,%%g1
   513 -	divscc	%%g1,%4,%%g1
   514 -	divscc	%%g1,%4,%%g1
   515 -	divscc	%%g1,%4,%%g1
   516 -	divscc	%%g1,%4,%%g1
   517 -	divscc	%%g1,%4,%%g1
   518 -	divscc	%%g1,%4,%0
   519 -	rd	%%y,%1
   520 -	bl,a 1f
   521 -	add	%1,%4,%1
   522 -1:	! End of inline udiv_qrnnd"					\
   523 +  __asm__ ("! Inlined udiv_qrnnd\n"                      \
   524 +	"wr	%%g0,%2,%%y	! Not a delayed write for sparclite\n" \
   525 +	"tst	%%g0\n"                                          \
   526 +	"divscc	%3,%4,%%g1\n"                                  \
   527 +	"divscc	%%g1,%4,%%g1\n"                                \
   528 +	"divscc	%%g1,%4,%%g1\n"                                \
   529 +	"divscc	%%g1,%4,%%g1\n"                                \
   530 +	"divscc	%%g1,%4,%%g1\n"                                \
   531 +	"divscc	%%g1,%4,%%g1\n"                                \
   532 +	"divscc	%%g1,%4,%%g1\n"                                \
   533 +	"divscc	%%g1,%4,%%g1\n"                                \
   534 +	"divscc	%%g1,%4,%%g1\n"                                \
   535 +	"divscc	%%g1,%4,%%g1\n"                                \
   536 +	"divscc	%%g1,%4,%%g1\n"                                \
   537 +	"divscc	%%g1,%4,%%g1\n"                                \
   538 +	"divscc	%%g1,%4,%%g1\n"                                \
   539 +	"divscc	%%g1,%4,%%g1\n"                                \
   540 +	"divscc	%%g1,%4,%%g1\n"                                \
   541 +	"divscc	%%g1,%4,%%g1\n"                                \
   542 +	"divscc	%%g1,%4,%%g1\n"                                \
   543 +	"divscc	%%g1,%4,%%g1\n"                                \
   544 +	"divscc	%%g1,%4,%%g1\n"                                \
   545 +	"divscc	%%g1,%4,%%g1\n"                                \
   546 +	"divscc	%%g1,%4,%%g1\n"                                \
   547 +	"divscc	%%g1,%4,%%g1\n"                                \
   548 +	"divscc	%%g1,%4,%%g1\n"                                \
   549 +	"divscc	%%g1,%4,%%g1\n"                                \
   550 +	"divscc	%%g1,%4,%%g1\n"                                \
   551 +	"divscc	%%g1,%4,%%g1\n"                                \
   552 +	"divscc	%%g1,%4,%%g1\n"                                \
   553 +	"divscc	%%g1,%4,%%g1\n"                                \
   554 +	"divscc	%%g1,%4,%%g1\n"                                \
   555 +	"divscc	%%g1,%4,%%g1\n"                                \
   556 +	"divscc	%%g1,%4,%%g1\n"                                \
   557 +	"divscc	%%g1,%4,%0\n"                                  \
   558 +	"rd	%%y,%1\n"                                          \
   559 +	"bl,a 1f\n"                                            \
   560 +	"add	%1,%4,%1\n"                                      \
   561 +"1:	! End of inline udiv_qrnnd"					\
   562  	   : "=r" ((USItype) (q)),					\
   563  	     "=r" ((USItype) (r))					\
   564  	   : "r" ((USItype) (n1)),					\
   565 @@ -1099,46 +1099,46 @@
   566  /* SPARC without integer multiplication and divide instructions.
   567     (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
   568  #define umul_ppmm(w1, w0, u, v) \
   569 -  __asm__ ("! Inlined umul_ppmm
   570 -	wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr
   571 -	sra	%3,31,%%o5	! Don't move this insn
   572 -	and	%2,%%o5,%%o5	! Don't move this insn
   573 -	andcc	%%g0,0,%%g1	! Don't move this insn
   574 -	mulscc	%%g1,%3,%%g1
   575 -	mulscc	%%g1,%3,%%g1
   576 -	mulscc	%%g1,%3,%%g1
   577 -	mulscc	%%g1,%3,%%g1
   578 -	mulscc	%%g1,%3,%%g1
   579 -	mulscc	%%g1,%3,%%g1
   580 -	mulscc	%%g1,%3,%%g1
   581 -	mulscc	%%g1,%3,%%g1
   582 -	mulscc	%%g1,%3,%%g1
   583 -	mulscc	%%g1,%3,%%g1
   584 -	mulscc	%%g1,%3,%%g1
   585 -	mulscc	%%g1,%3,%%g1
   586 -	mulscc	%%g1,%3,%%g1
   587 -	mulscc	%%g1,%3,%%g1
   588 -	mulscc	%%g1,%3,%%g1
   589 -	mulscc	%%g1,%3,%%g1
   590 -	mulscc	%%g1,%3,%%g1
   591 -	mulscc	%%g1,%3,%%g1
   592 -	mulscc	%%g1,%3,%%g1
   593 -	mulscc	%%g1,%3,%%g1
   594 -	mulscc	%%g1,%3,%%g1
   595 -	mulscc	%%g1,%3,%%g1
   596 -	mulscc	%%g1,%3,%%g1
   597 -	mulscc	%%g1,%3,%%g1
   598 -	mulscc	%%g1,%3,%%g1
   599 -	mulscc	%%g1,%3,%%g1
   600 -	mulscc	%%g1,%3,%%g1
   601 -	mulscc	%%g1,%3,%%g1
   602 -	mulscc	%%g1,%3,%%g1
   603 -	mulscc	%%g1,%3,%%g1
   604 -	mulscc	%%g1,%3,%%g1
   605 -	mulscc	%%g1,%3,%%g1
   606 -	mulscc	%%g1,0,%%g1
   607 -	add	%%g1,%%o5,%0
   608 -	rd	%%y,%1"							\
   609 +  __asm__ ("! Inlined umul_ppmm\n"                         \
   610 +	"wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr\n" \
   611 +	"sra	%3,31,%%o5	! Don't move this insn\n"              \
   612 +	"and	%2,%%o5,%%o5	! Don't move this insn\n"            \
   613 +	"andcc	%%g0,0,%%g1	! Don't move this insn\n"            \
   614 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   615 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   616 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   617 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   618 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   619 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   620 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   621 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   622 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   623 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   624 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   625 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   626 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   627 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   628 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   629 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   630 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   631 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   632 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   633 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   634 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   635 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   636 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   637 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   638 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   639 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   640 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   641 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   642 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   643 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   644 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   645 +	"mulscc	%%g1,%3,%%g1\n"                                  \
   646 +	"mulscc	%%g1,0,%%g1\n"                                   \
   647 +	"add	%%g1,%%o5,%0\n"                                    \
   648 +	"rd	%%y,%1"							\
   649  	   : "=r" ((USItype) (w1)),					\
   650  	     "=r" ((USItype) (w0))					\
   651  	   : "%rI" ((USItype) (u)),					\
   652 @@ -1148,30 +1148,30 @@
   653  /* It's quite necessary to add this much assembler for the sparc.
   654     The default udiv_qrnnd (in C) is more than 10 times slower!  */
   655  #define udiv_qrnnd(q, r, n1, n0, d) \
   656 -  __asm__ ("! Inlined udiv_qrnnd
   657 -	mov	32,%%g1
   658 -	subcc	%1,%2,%%g0
   659 -1:	bcs	5f
   660 -	 addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb
   661 -	sub	%1,%2,%1	! this kills msb of n
   662 -	addx	%1,%1,%1	! so this can't give carry
   663 -	subcc	%%g1,1,%%g1
   664 -2:	bne	1b
   665 -	 subcc	%1,%2,%%g0
   666 -	bcs	3f
   667 -	 addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb
   668 -	b	3f
   669 -	 sub	%1,%2,%1	! this kills msb of n
   670 -4:	sub	%1,%2,%1
   671 -5:	addxcc	%1,%1,%1
   672 -	bcc	2b
   673 -	 subcc	%%g1,1,%%g1
   674 -! Got carry from n.  Subtract next step to cancel this carry.
   675 -	bne	4b
   676 -	 addcc	%0,%0,%0	! shift n1n0 and a 0-bit in lsb
   677 -	sub	%1,%2,%1
   678 -3:	xnor	%0,0,%0
   679 -	! End of inline udiv_qrnnd"					\
   680 +  __asm__ ("! Inlined udiv_qrnnd\n"                                 \
   681 +	"mov	32,%%g1\n"                                                  \
   682 +	"subcc	%1,%2,%%g0\n"                                             \
   683 +"1:	bcs	5f\n"                                                       \
   684 +	 "addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n"              \
   685 +	"sub	%1,%2,%1	! this kills msb of n\n"                          \
   686 +	"addx	%1,%1,%1	! so this can't give carry\n"                     \
   687 +	"subcc	%%g1,1,%%g1\n"                                            \
   688 +"2:	bne	1b\n"                                                       \
   689 +	 "subcc	%1,%2,%%g0\n"                                             \
   690 +	"bcs	3f\n"                                                       \
   691 +	 "addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n"              \
   692 +	"b	3f\n"                                                         \
   693 +	 "sub	%1,%2,%1	! this kills msb of n\n"                          \
   694 +"4:	sub	%1,%2,%1\n"                                                 \
   695 +"5:	addxcc	%1,%1,%1\n"                                             \
   696 +	"bcc	2b\n"                                                       \
   697 +	 "subcc	%%g1,1,%%g1\n"                                            \
   698 +"! Got carry from n.  Subtract next step to cancel this carry.\n"   \
   699 +	"bne	4b\n"                                                       \
   700 +	 "addcc	%0,%0,%0	! shift n1n0 and a 0-bit in lsb\n"              \
   701 +	"sub	%1,%2,%1\n"                                                 \
   702 +"3:	xnor	%0,0,%0\n"                                                \
   703 +	"! End of inline udiv_qrnnd"					\
   704  	   : "=&r" ((USItype) (q)),					\
   705  	     "=&r" ((USItype) (r))					\
   706  	   : "r" ((USItype) (d)),					\
   707 @@ -1185,11 +1185,11 @@
   708  #if ((defined (__sparc__) && defined (__arch64__)) \
   709       || defined (__sparcv9)) && W_TYPE_SIZE == 64
   710  #define add_ssaaaa(sh, sl, ah, al, bh, bl)				\
   711 -  __asm__ ("addcc %r4,%5,%1
   712 -  	    add %r2,%3,%0
   713 -  	    bcs,a,pn %%xcc, 1f
   714 -  	    add %0, 1, %0
   715 -  	    1:"								\
   716 +  __asm__ ("addcc %r4,%5,%1\n"            \
   717 +  	    "add %r2,%3,%0\n"                 \
   718 +  	    "bcs,a,pn %%xcc, 1f\n"            \
   719 +  	    "add %0, 1, %0\n"                 \
   720 +  	    "1:"								\
   721  	   : "=r" ((UDItype)(sh)),				      	\
   722  	     "=&r" ((UDItype)(sl))				      	\
   723  	   : "%rJ" ((UDItype)(ah)),				     	\
   724 @@ -1199,11 +1199,11 @@
   725  	   __CLOBBER_CC)
   726  
   727  #define sub_ddmmss(sh, sl, ah, al, bh, bl) 				\
   728 -  __asm__ ("subcc %r4,%5,%1
   729 -  	    sub %r2,%3,%0
   730 -  	    bcs,a,pn %%xcc, 1f
   731 -  	    sub %0, 1, %0
   732 -  	    1:"								\
   733 +  __asm__ ("subcc %r4,%5,%1\n"            \
   734 +  	    "sub %r2,%3,%0\n"                 \
   735 +  	    "bcs,a,pn %%xcc, 1f\n"            \
   736 +  	    "sub %0, 1, %0\n"                 \
   737 +  	    "1:"								\
   738  	   : "=r" ((UDItype)(sh)),				      	\
   739  	     "=&r" ((UDItype)(sl))				      	\
   740  	   : "rJ" ((UDItype)(ah)),				     	\
   741 @@ -1216,27 +1216,27 @@
   742    do {									\
   743  	  UDItype tmp1, tmp2, tmp3, tmp4;				\
   744  	  __asm__ __volatile__ (					\
   745 -		   "srl %7,0,%3
   746 -		    mulx %3,%6,%1
   747 -		    srlx %6,32,%2
   748 -		    mulx %2,%3,%4
   749 -		    sllx %4,32,%5
   750 -		    srl %6,0,%3
   751 -		    sub %1,%5,%5
   752 -		    srlx %5,32,%5
   753 -		    addcc %4,%5,%4
   754 -		    srlx %7,32,%5
   755 -		    mulx %3,%5,%3
   756 -		    mulx %2,%5,%5
   757 -		    sethi %%hi(0x80000000),%2
   758 -		    addcc %4,%3,%4
   759 -		    srlx %4,32,%4
   760 -		    add %2,%2,%2
   761 -		    movcc %%xcc,%%g0,%2
   762 -		    addcc %5,%4,%5
   763 -		    sllx %3,32,%3
   764 -		    add %1,%3,%1
   765 -		    add %5,%2,%0"					\
   766 +		   "srl %7,0,%3\n"                     \
   767 +		    "mulx %3,%6,%1\n"                  \
   768 +		    "srlx %6,32,%2\n"                  \
   769 +		    "mulx %2,%3,%4\n"                  \
   770 +		    "sllx %4,32,%5\n"                  \
   771 +		    "srl %6,0,%3\n"                    \
   772 +		    "sub %1,%5,%5\n"                   \
   773 +		    "srlx %5,32,%5\n"                  \
   774 +		    "addcc %4,%5,%4\n"                 \
   775 +		    "srlx %7,32,%5\n"                  \
   776 +		    "mulx %3,%5,%3\n"                  \
   777 +		    "mulx %2,%5,%5\n"                  \
   778 +		    "sethi %%hi(0x80000000),%2\n"      \
   779 +		    "addcc %4,%3,%4\n"                 \
   780 +		    "srlx %4,32,%4\n"                  \
   781 +		    "add %2,%2,%2\n"                   \
   782 +		    "movcc %%xcc,%%g0,%2\n"            \
   783 +		    "addcc %5,%4,%5\n"                 \
   784 +		    "sllx %3,32,%3\n"                  \
   785 +		    "add %1,%3,%1\n"                   \
   786 +		    "add %5,%2,%0"					\
   787  	   : "=r" ((UDItype)(wh)),					\
   788  	     "=&r" ((UDItype)(wl)),					\
   789  	     "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4)	\
   790 @@ -1250,8 +1250,8 @@
   791  
   792  #if defined (__vax__) && W_TYPE_SIZE == 32
   793  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   794 -  __asm__ ("addl2 %5,%1
   795 -	adwc %3,%0"							\
   796 +  __asm__ ("addl2 %5,%1\n"            \
   797 +	"adwc %3,%0"							\
   798  	   : "=g" ((USItype) (sh)),					\
   799  	     "=&g" ((USItype) (sl))					\
   800  	   : "%0" ((USItype) (ah)),					\
   801 @@ -1259,8 +1259,8 @@
   802  	     "%1" ((USItype) (al)),					\
   803  	     "g" ((USItype) (bl)))
   804  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   805 -  __asm__ ("subl2 %5,%1
   806 -	sbwc %3,%0"							\
   807 +  __asm__ ("subl2 %5,%1\n"            \
   808 +	"sbwc %3,%0"							\
   809  	   : "=g" ((USItype) (sh)),					\
   810  	     "=&g" ((USItype) (sl))					\
   811  	   : "0" ((USItype) (ah)),					\