patches/glibc/2.1.3/glibc-2.1.3-allow-gcc3-longlong.patch
author "Yann E. MORIN" <yann.morin.1998@anciens.enib.fr>
Sat Jul 28 21:34:41 2007 +0000 (2007-07-28)
changeset 301 2be7232a73ac
permissions -rw-r--r--
Bump version to 0.2.2.
     1 --- glibc-2.1.3/stdlib/longlong.h.old	2004-03-05 14:49:14.000000000 -0800
     2 +++ glibc-2.1.3/stdlib/longlong.h	2004-03-05 15:19:26.000000000 -0800
     3 @@ -106,8 +106,8 @@
     4  
     5  #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
     6  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
     7 -  __asm__ ("add %1,%4,%5
     8 -	addc %0,%2,%3"							\
     9 +  __asm__ ("add %1,%4,%5\n"						\
    10 +	"addc %0,%2,%3"							\
    11  	   : "=r" ((USItype)(sh)),					\
    12  	    "=&r" ((USItype)(sl))					\
    13  	   : "%r" ((USItype)(ah)),					\
    14 @@ -115,8 +115,8 @@
    15  	     "%r" ((USItype)(al)),					\
    16  	     "rI" ((USItype)(bl)))
    17  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    18 -  __asm__ ("sub %1,%4,%5
    19 -	subc %0,%2,%3"							\
    20 +  __asm__ ("sub %1,%4,%5\n"						\
    21 +	"subc %0,%2,%3"							\
    22  	   : "=r" ((USItype)(sh)),					\
    23  	     "=&r" ((USItype)(sl))					\
    24  	   : "r" ((USItype)(ah)),					\
    25 @@ -173,8 +173,8 @@
    26  
    27  #if defined (__arm__) && W_TYPE_SIZE == 32
    28  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    29 -  __asm__ ("adds	%1, %4, %5
    30 -	adc	%0, %2, %3"						\
    31 +  __asm__ ("adds	%1, %4, %5\n"					\
    32 +	"adc	%0, %2, %3"						\
    33  	   : "=r" ((USItype)(sh)),					\
    34  	     "=&r" ((USItype)(sl))					\
    35  	   : "%r" ((USItype)(ah)),					\
    36 @@ -182,8 +182,8 @@
    37  	     "%r" ((USItype)(al)),					\
    38  	     "rI" ((USItype)(bl)))
    39  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    40 -  __asm__ ("subs	%1, %4, %5
    41 -	sbc	%0, %2, %3"						\
    42 +  __asm__ ("subs	%1, %4, %5\n"					\
    43 +	"sbc	%0, %2, %3"						\
    44  	   : "=r" ((USItype)(sh)),					\
    45  	     "=&r" ((USItype)(sl))					\
    46  	   : "r" ((USItype)(ah)),					\
    47 @@ -192,19 +192,19 @@
    48  	     "rI" ((USItype)(bl)))
    49  #if 0
    50  #define umul_ppmm(xh, xl, a, b) \
    51 -  __asm__ ("%@ Inlined umul_ppmm
    52 -	mov	%|r0, %2, lsr #16
    53 -	mov	%|r2, %3, lsr #16
    54 -	bic	%|r1, %2, %|r0, lsl #16
    55 -	bic	%|r2, %3, %|r2, lsl #16
    56 -	mul	%1, %|r1, %|r2
    57 -	mul	%|r2, %|r0, %|r2
    58 -	mul	%|r1, %0, %|r1
    59 -	mul	%0, %|r0, %0
    60 -	adds	%|r1, %|r2, %|r1
    61 -	addcs	%0, %0, #65536
    62 -	adds	%1, %1, %|r1, lsl #16
    63 -	adc	%0, %0, %|r1, lsr #16"					\
    64 +  __asm__ ("%@ Inlined umul_ppmm\n"					\
    65 +	"mov	%|r0, %2, lsr #16\n"					\
    66 +	"mov	%|r2, %3, lsr #16\n"					\
    67 +	"bic	%|r1, %2, %|r0, lsl #16\n"				\
    68 +	"bic	%|r2, %3, %|r2, lsl #16\n"				\
    69 +	"mul	%1, %|r1, %|r2\n"					\
    70 +	"mul	%|r2, %|r0, %|r2\n"					\
    71 +	"mul	%|r1, %0, %|r1\n"					\
    72 +	"mul	%0, %|r0, %0\n"						\
    73 +	"adds	%|r1, %|r2, %|r1\n"					\
    74 +	"addcs	%0, %0, #65536\n"					\
    75 +	"adds	%1, %1, %|r1, lsl #16\n"				\
    76 +	"adc	%0, %0, %|r1, lsr #16"					\
    77  	   : "=&r" ((USItype)(xh)),					\
    78  	     "=r" ((USItype)(xl))					\
    79  	   : "r" ((USItype)(a)),					\
    80 @@ -245,8 +245,8 @@
    81  
    82  #if defined (__gmicro__) && W_TYPE_SIZE == 32
    83  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
    84 -  __asm__ ("add.w %5,%1
    85 -	addx %3,%0"							\
    86 +  __asm__ ("add.w %5,%1\n"						\
    87 +	"addx %3,%0"							\
    88  	   : "=g" ((USItype)(sh)),					\
    89  	     "=&g" ((USItype)(sl))					\
    90  	   : "%0" ((USItype)(ah)),					\
    91 @@ -254,8 +254,8 @@
    92  	     "%1" ((USItype)(al)),					\
    93  	     "g" ((USItype)(bl)))
    94  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
    95 -  __asm__ ("sub.w %5,%1
    96 -	subx %3,%0"							\
    97 +  __asm__ ("sub.w %5,%1\n"						\
    98 +	"subx %3,%0"							\
    99  	   : "=g" ((USItype)(sh)),					\
   100  	     "=&g" ((USItype)(sl))					\
   101  	   : "0" ((USItype)(ah)),					\
   102 @@ -284,8 +284,8 @@
   103  
   104  #if defined (__hppa) && W_TYPE_SIZE == 32
   105  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   106 -  __asm__ ("add %4,%5,%1
   107 -	addc %2,%3,%0"							\
   108 +  __asm__ ("add %4,%5,%1\n"						\
   109 +	"addc %2,%3,%0"							\
   110  	   : "=r" ((USItype)(sh)),					\
   111  	     "=&r" ((USItype)(sl))					\
   112  	   : "%rM" ((USItype)(ah)),					\
   113 @@ -293,8 +293,8 @@
   114  	     "%rM" ((USItype)(al)),					\
   115  	     "rM" ((USItype)(bl)))
   116  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   117 -  __asm__ ("sub %4,%5,%1
   118 -	subb %2,%3,%0"							\
   119 +  __asm__ ("sub %4,%5,%1\n"						\
   120 +	"subb %2,%3,%0"							\
   121  	   : "=r" ((USItype)(sh)),					\
   122  	     "=&r" ((USItype)(sl))					\
   123  	   : "rM" ((USItype)(ah)),					\
   124 @@ -332,22 +332,22 @@
   125    do {									\
   126      USItype __tmp;							\
   127      __asm__ (								\
   128 -       "ldi		1,%0
   129 -	extru,=		%1,15,16,%%r0		; Bits 31..16 zero?
   130 -	extru,tr	%1,15,16,%1		; No.  Shift down, skip add.
   131 -	ldo		16(%0),%0		; Yes.  Perform add.
   132 -	extru,=		%1,23,8,%%r0		; Bits 15..8 zero?
   133 -	extru,tr	%1,23,8,%1		; No.  Shift down, skip add.
   134 -	ldo		8(%0),%0		; Yes.  Perform add.
   135 -	extru,=		%1,27,4,%%r0		; Bits 7..4 zero?
   136 -	extru,tr	%1,27,4,%1		; No.  Shift down, skip add.
   137 -	ldo		4(%0),%0		; Yes.  Perform add.
   138 -	extru,=		%1,29,2,%%r0		; Bits 3..2 zero?
   139 -	extru,tr	%1,29,2,%1		; No.  Shift down, skip add.
   140 -	ldo		2(%0),%0		; Yes.  Perform add.
   141 -	extru		%1,30,1,%1		; Extract bit 1.
   142 -	sub		%0,%1,%0		; Subtract it.
   143 -	" : "=r" (count), "=r" (__tmp) : "1" (x));			\
   144 +       "ldi		1,%0\n"						\
   145 +	"extru,=	%1,15,16,%%r0		; Bits 31..16 zero?\n"			\
   146 +	"extru,tr	%1,15,16,%1		; No.  Shift down, skip add.\n"		\
   147 +	"ldo		16(%0),%0		; Yes.  Perform add.\n"			\
   148 +	"extru,=	%1,23,8,%%r0		; Bits 15..8 zero?\n"			\
   149 +	"extru,tr	%1,23,8,%1		; No.  Shift down, skip add.\n"		\
   150 +	"ldo		8(%0),%0		; Yes.  Perform add.\n"			\
   151 +	"extru,=	%1,27,4,%%r0		; Bits 7..4 zero?\n"			\
   152 +	"extru,tr	%1,27,4,%1		; No.  Shift down, skip add.\n"		\
   153 +	"ldo		4(%0),%0		; Yes.  Perform add.\n"			\
   154 +	"extru,=	%1,29,2,%%r0		; Bits 3..2 zero?\n"			\
   155 +	"extru,tr	%1,29,2,%1		; No.  Shift down, skip add.\n"		\
   156 +	"ldo		2(%0),%0		; Yes.  Perform add.\n"			\
   157 +	"extru		%1,30,1,%1		; Extract bit 1.\n"			\
   158 +	"sub		%0,%1,%0		; Subtract it.\n"			\
   159 +	: "=r" (count), "=r" (__tmp) : "1" (x));			\
   160    } while (0)
   161  #endif /* hppa */
   162  
   163 @@ -394,8 +394,8 @@
   164  
   165  #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
   166  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   167 -  __asm__ ("addl %5,%1
   168 -	adcl %3,%0"							\
   169 +  __asm__ ("addl %5,%1\n"						\
   170 +	"adcl %3,%0"							\
   171  	   : "=r" ((USItype)(sh)),					\
   172  	     "=&r" ((USItype)(sl))					\
   173  	   : "%0" ((USItype)(ah)),					\
   174 @@ -403,8 +403,8 @@
   175  	     "%1" ((USItype)(al)),					\
   176  	     "g" ((USItype)(bl)))
   177  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   178 -  __asm__ ("subl %5,%1
   179 -	sbbl %3,%0"							\
   180 +  __asm__ ("subl %5,%1\n"						\
   181 +	"sbbl %3,%0"							\
   182  	   : "=r" ((USItype)(sh)),					\
   183  	     "=&r" ((USItype)(sl))					\
   184  	   : "0" ((USItype)(ah)),					\
   185 @@ -516,8 +516,8 @@
   186  
   187  #if (defined (__mc68000__) || defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
   188  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   189 -  __asm__ ("add%.l %5,%1
   190 -	addx%.l %3,%0"							\
   191 +  __asm__ ("add%.l %5,%1\n"						\
   192 +	"addx%.l %3,%0"							\
   193  	   : "=d" ((USItype)(sh)),					\
   194  	     "=&d" ((USItype)(sl))					\
   195  	   : "%0" ((USItype)(ah)),					\
   196 @@ -525,8 +525,8 @@
   197  	     "%1" ((USItype)(al)),					\
   198  	     "g" ((USItype)(bl)))
   199  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   200 -  __asm__ ("sub%.l %5,%1
   201 -	subx%.l %3,%0"							\
   202 +  __asm__ ("sub%.l %5,%1\n"						\
   203 +	"subx%.l %3,%0"							\
   204  	   : "=d" ((USItype)(sh)),					\
   205  	     "=&d" ((USItype)(sl))					\
   206  	   : "0" ((USItype)(ah)),					\
   207 @@ -564,28 +564,28 @@
   208  #else /* not mc68020 */
   209  #define umul_ppmm(xh, xl, a, b) \
   210    do { USItype __umul_tmp1, __umul_tmp2;				\
   211 -	__asm__ ("| Inlined umul_ppmm
   212 -	move%.l	%5,%3
   213 -	move%.l	%2,%0
   214 -	move%.w	%3,%1
   215 -	swap	%3
   216 -	swap	%0
   217 -	mulu	%2,%1
   218 -	mulu	%3,%0
   219 -	mulu	%2,%3
   220 -	swap	%2
   221 -	mulu	%5,%2
   222 -	add%.l	%3,%2
   223 -	jcc	1f
   224 -	add%.l	%#0x10000,%0
   225 -1:	move%.l	%2,%3
   226 -	clr%.w	%2
   227 -	swap	%2
   228 -	swap	%3
   229 -	clr%.w	%3
   230 -	add%.l	%3,%1
   231 -	addx%.l	%2,%0
   232 -	| End inlined umul_ppmm"					\
   233 +	__asm__ ("| Inlined umul_ppmm\n"				\
   234 +	"move%.l	%5,%3\n"					\
   235 +	"move%.l	%2,%0\n"					\
   236 +	"move%.w	%3,%1\n"					\
   237 +	"swap	%3\n"							\
   238 +	"swap	%0\n"							\
   239 +	"mulu	%2,%1\n"						\
   240 +	"mulu	%3,%0\n"						\
   241 +	"mulu	%2,%3\n"						\
   242 +	"swap	%2\n"							\
   243 +	"mulu	%5,%2\n"						\
   244 +	"add%.l	%3,%2\n"						\
   245 +	"jcc	1f\n"							\
   246 +	"add%.l	%#0x10000,%0\n"						\
   247 +"1:	move%.l	%2,%3\n"						\
   248 +	"clr%.w	%2\n"							\
   249 +	"swap	%2\n"							\
   250 +	"swap	%3\n"							\
   251 +	"clr%.w	%3\n"							\
   252 +	"add%.l	%3,%1\n"						\
   253 +	"addx%.l	%2,%0\n"					\
   254 +	"| End inlined umul_ppmm"					\
   255  	      : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)),		\
   256  		"=d" (__umul_tmp1), "=&d" (__umul_tmp2)			\
   257  	      : "%2" ((USItype)(a)), "d" ((USItype)(b)));		\
   258 @@ -597,8 +597,8 @@
   259  
   260  #if defined (__m88000__) && W_TYPE_SIZE == 32
   261  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   262 -  __asm__ ("addu.co %1,%r4,%r5
   263 -	addu.ci %0,%r2,%r3"						\
   264 +  __asm__ ("addu.co %1,%r4,%r5\n"					\
   265 +	"addu.ci %0,%r2,%r3"						\
   266  	   : "=r" ((USItype)(sh)),					\
   267  	     "=&r" ((USItype)(sl))					\
   268  	   : "%rJ" ((USItype)(ah)),					\
   269 @@ -606,8 +606,8 @@
   270  	     "%rJ" ((USItype)(al)),					\
   271  	     "rJ" ((USItype)(bl)))
   272  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   273 -  __asm__ ("subu.co %1,%r4,%r5
   274 -	subu.ci %0,%r2,%r3"						\
   275 +  __asm__ ("subu.co %1,%r4,%r5\n"					\
   276 +	"subu.ci %0,%r2,%r3"						\
   277  	   : "=r" ((USItype)(sh)),					\
   278  	     "=&r" ((USItype)(sl))					\
   279  	   : "rJ" ((USItype)(ah)),					\
   280 @@ -665,9 +665,9 @@
   281  	     "d" ((USItype)(v)))
   282  #else
   283  #define umul_ppmm(w1, w0, u, v) \
   284 -  __asm__ ("multu %2,%3
   285 -	mflo %0
   286 -	mfhi %1"							\
   287 +  __asm__ ("multu %2,%3\n"						\
   288 +	"mflo %0\n"							\
   289 +	"mfhi %1"							\
   290  	   : "=d" ((USItype)(w0)),					\
   291  	     "=d" ((USItype)(w1))					\
   292  	   : "d" ((USItype)(u)),					\
   293 @@ -687,9 +687,9 @@
   294  	     "d" ((UDItype)(v)))
   295  #else
   296  #define umul_ppmm(w1, w0, u, v) \
   297 -  __asm__ ("dmultu %2,%3
   298 -	mflo %0
   299 -	mfhi %1"							\
   300 +  __asm__ ("dmultu %2,%3\n"						\
   301 +	"mflo %0\n"							\
   302 +	"mfhi %1"							\
   303  	   : "=d" ((UDItype)(w0)),					\
   304  	     "=d" ((UDItype)(w1))					\
   305  	   : "d" ((UDItype)(u)),					\
   306 @@ -857,8 +857,8 @@
   307  
   308  #if defined (__pyr__) && W_TYPE_SIZE == 32
   309  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   310 -  __asm__ ("addw	%5,%1
   311 -	addwc	%3,%0"							\
   312 +  __asm__ ("addw	%5,%1\n"					\
   313 +	"addwc	%3,%0"							\
   314  	   : "=r" ((USItype)(sh)),					\
   315  	     "=&r" ((USItype)(sl))					\
   316  	   : "%0" ((USItype)(ah)),					\
   317 @@ -866,8 +866,8 @@
   318  	     "%1" ((USItype)(al)),					\
   319  	     "g" ((USItype)(bl)))
   320  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   321 -  __asm__ ("subw	%5,%1
   322 -	subwb	%3,%0"							\
   323 +  __asm__ ("subw	%5,%1\n"					\
   324 +	"subwb	%3,%0"							\
   325  	   : "=r" ((USItype)(sh)),					\
   326  	     "=&r" ((USItype)(sl))					\
   327  	   : "0" ((USItype)(ah)),					\
   328 @@ -879,8 +879,8 @@
   329    ({union {UDItype __ll;						\
   330  	   struct {USItype __h, __l;} __i;				\
   331  	  } __xx;							\
   332 -  __asm__ ("movw %1,%R0
   333 -	uemul %2,%0"							\
   334 +  __asm__ ("movw %1,%R0\n"						\
   335 +	"uemul %2,%0"							\
   336  	   : "=&r" (__xx.__ll)						\
   337  	   : "g" ((USItype) (u)),					\
   338  	     "g" ((USItype)(v)));					\
   339 @@ -889,8 +889,8 @@
   340  
   341  #if defined (__ibm032__) /* RT/ROMP */  && W_TYPE_SIZE == 32
   342  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   343 -  __asm__ ("a %1,%5
   344 -	ae %0,%3"							\
   345 +  __asm__ ("a %1,%5\n"							\
   346 +	"ae %0,%3"							\
   347  	   : "=r" ((USItype)(sh)),					\
   348  	     "=&r" ((USItype)(sl))					\
   349  	   : "%0" ((USItype)(ah)),					\
   350 @@ -898,8 +898,8 @@
   351  	     "%1" ((USItype)(al)),					\
   352  	     "r" ((USItype)(bl)))
   353  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   354 -  __asm__ ("s %1,%5
   355 -	se %0,%3"							\
   356 +  __asm__ ("s %1,%5\n"							\
   357 +	"se %0,%3"							\
   358  	   : "=r" ((USItype)(sh)),					\
   359  	     "=&r" ((USItype)(sl))					\
   360  	   : "0" ((USItype)(ah)),					\
   361 @@ -910,26 +910,26 @@
   362    do {									\
   363      USItype __m0 = (m0), __m1 = (m1);					\
   364      __asm__ (								\
   365 -       "s	r2,r2
   366 -	mts	r10,%2
   367 -	m	r2,%3
   368 -	m	r2,%3
   369 -	m	r2,%3
   370 -	m	r2,%3
   371 -	m	r2,%3
   372 -	m	r2,%3
   373 -	m	r2,%3
   374 -	m	r2,%3
   375 -	m	r2,%3
   376 -	m	r2,%3
   377 -	m	r2,%3
   378 -	m	r2,%3
   379 -	m	r2,%3
   380 -	m	r2,%3
   381 -	m	r2,%3
   382 -	m	r2,%3
   383 -	cas	%0,r2,r0
   384 -	mfs	r10,%1"							\
   385 +       "s	r2,r2\n"						\
   386 +	"mts	r10,%2\n"						\
   387 +	"m	r2,%3\n"						\
   388 +	"m	r2,%3\n"						\
   389 +	"m	r2,%3\n"						\
   390 +	"m	r2,%3\n"						\
   391 +	"m	r2,%3\n"						\
   392 +	"m	r2,%3\n"						\
   393 +	"m	r2,%3\n"						\
   394 +	"m	r2,%3\n"						\
   395 +	"m	r2,%3\n"						\
   396 +	"m	r2,%3\n"						\
   397 +	"m	r2,%3\n"						\
   398 +	"m	r2,%3\n"						\
   399 +	"m	r2,%3\n"						\
   400 +	"m	r2,%3\n"						\
   401 +	"m	r2,%3\n"						\
   402 +	"m	r2,%3\n"						\
   403 +	"cas	%0,r2,r0\n"						\
   404 +	"mfs	r10,%1"							\
   405  	     : "=r" ((USItype)(ph)),					\
   406  	       "=r" ((USItype)(pl))					\
   407  	     : "%r" (__m0),						\
   408 @@ -959,9 +959,9 @@
   409  #if defined (__sh2__) && W_TYPE_SIZE == 32
   410  #define umul_ppmm(w1, w0, u, v) \
   411    __asm__ (								\
   412 -       "dmulu.l	%2,%3
   413 -	sts	macl,%1
   414 -	sts	mach,%0"						\
   415 +       "dmulu.l	%2,%3\n"						\
   416 +	"sts	macl,%1\n"						\
   417 +	"sts	mach,%0"						\
   418  	   : "=r" ((USItype)(w1)),					\
   419  	     "=r" ((USItype)(w0))					\
   420  	   : "r" ((USItype)(u)),					\
   421 @@ -972,8 +972,8 @@
   422  
   423  #if defined (__sparc__) && W_TYPE_SIZE == 32
   424  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   425 -  __asm__ ("addcc %r4,%5,%1
   426 -	addx %r2,%3,%0"							\
   427 +  __asm__ ("addcc %r4,%5,%1\n"						\
   428 +	"addx %r2,%3,%0"							\
   429  	   : "=r" ((USItype)(sh)),					\
   430  	     "=&r" ((USItype)(sl))					\
   431  	   : "%rJ" ((USItype)(ah)),					\
   432 @@ -982,8 +982,8 @@
   433  	     "rI" ((USItype)(bl))					\
   434  	   __CLOBBER_CC)
   435  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   436 -  __asm__ ("subcc %r4,%5,%1
   437 -	subx %r2,%3,%0"							\
   438 +  __asm__ ("subcc %r4,%5,%1\n"						\
   439 +	"subx %r2,%3,%0"							\
   440  	   : "=r" ((USItype)(sh)),					\
   441  	     "=&r" ((USItype)(sl))					\
   442  	   : "rJ" ((USItype)(ah)),					\
   443 @@ -1029,45 +1029,45 @@
   444  	     "r" ((USItype)(v)))
   445  #define UMUL_TIME 5
   446  #define udiv_qrnnd(q, r, n1, n0, d) \
   447 -  __asm__ ("! Inlined udiv_qrnnd
   448 -	wr	%%g0,%2,%%y	! Not a delayed write for sparclite
   449 -	tst	%%g0
   450 -	divscc	%3,%4,%%g1
   451 -	divscc	%%g1,%4,%%g1
   452 -	divscc	%%g1,%4,%%g1
   453 -	divscc	%%g1,%4,%%g1
   454 -	divscc	%%g1,%4,%%g1
   455 -	divscc	%%g1,%4,%%g1
   456 -	divscc	%%g1,%4,%%g1
   457 -	divscc	%%g1,%4,%%g1
   458 -	divscc	%%g1,%4,%%g1
   459 -	divscc	%%g1,%4,%%g1
   460 -	divscc	%%g1,%4,%%g1
   461 -	divscc	%%g1,%4,%%g1
   462 -	divscc	%%g1,%4,%%g1
   463 -	divscc	%%g1,%4,%%g1
   464 -	divscc	%%g1,%4,%%g1
   465 -	divscc	%%g1,%4,%%g1
   466 -	divscc	%%g1,%4,%%g1
   467 -	divscc	%%g1,%4,%%g1
   468 -	divscc	%%g1,%4,%%g1
   469 -	divscc	%%g1,%4,%%g1
   470 -	divscc	%%g1,%4,%%g1
   471 -	divscc	%%g1,%4,%%g1
   472 -	divscc	%%g1,%4,%%g1
   473 -	divscc	%%g1,%4,%%g1
   474 -	divscc	%%g1,%4,%%g1
   475 -	divscc	%%g1,%4,%%g1
   476 -	divscc	%%g1,%4,%%g1
   477 -	divscc	%%g1,%4,%%g1
   478 -	divscc	%%g1,%4,%%g1
   479 -	divscc	%%g1,%4,%%g1
   480 -	divscc	%%g1,%4,%%g1
   481 -	divscc	%%g1,%4,%0
   482 -	rd	%%y,%1
   483 -	bl,a 1f
   484 -	add	%1,%4,%1
   485 -1:	! End of inline udiv_qrnnd"					\
   486 +  __asm__ ("! Inlined udiv_qrnnd\n"					\
   487 +	"wr	%%g0,%2,%%y	! Not a delayed write for sparclite\n"	\
   488 +	"tst	%%g0\n"							\
   489 +	"divscc	%3,%4,%%g1\n"						\
   490 +	"divscc	%%g1,%4,%%g1\n"						\
   491 +	"divscc	%%g1,%4,%%g1\n"						\
   492 +	"divscc	%%g1,%4,%%g1\n"						\
   493 +	"divscc	%%g1,%4,%%g1\n"						\
   494 +	"divscc	%%g1,%4,%%g1\n"						\
   495 +	"divscc	%%g1,%4,%%g1\n"						\
   496 +	"divscc	%%g1,%4,%%g1\n"						\
   497 +	"divscc	%%g1,%4,%%g1\n"						\
   498 +	"divscc	%%g1,%4,%%g1\n"						\
   499 +	"divscc	%%g1,%4,%%g1\n"						\
   500 +	"divscc	%%g1,%4,%%g1\n"						\
   501 +	"divscc	%%g1,%4,%%g1\n"						\
   502 +	"divscc	%%g1,%4,%%g1\n"						\
   503 +	"divscc	%%g1,%4,%%g1\n"						\
   504 +	"divscc	%%g1,%4,%%g1\n"						\
   505 +	"divscc	%%g1,%4,%%g1\n"						\
   506 +	"divscc	%%g1,%4,%%g1\n"						\
   507 +	"divscc	%%g1,%4,%%g1\n"						\
   508 +	"divscc	%%g1,%4,%%g1\n"						\
   509 +	"divscc	%%g1,%4,%%g1\n"						\
   510 +	"divscc	%%g1,%4,%%g1\n"						\
   511 +	"divscc	%%g1,%4,%%g1\n"						\
   512 +	"divscc	%%g1,%4,%%g1\n"						\
   513 +	"divscc	%%g1,%4,%%g1\n"						\
   514 +	"divscc	%%g1,%4,%%g1\n"						\
   515 +	"divscc	%%g1,%4,%%g1\n"						\
   516 +	"divscc	%%g1,%4,%%g1\n"						\
   517 +	"divscc	%%g1,%4,%%g1\n"						\
   518 +	"divscc	%%g1,%4,%%g1\n"						\
   519 +	"divscc	%%g1,%4,%%g1\n"						\
   520 +	"divscc	%%g1,%4,%0\n"						\
   521 +	"rd	%%y,%1\n"						\
   522 +	"bl,a 1f\n"							\
   523 +	"add	%1,%4,%1\n"						\
   524 +"1:	! End of inline udiv_qrnnd"					\
   525  	   : "=r" ((USItype)(q)),					\
   526  	     "=r" ((USItype)(r))					\
   527  	   : "r" ((USItype)(n1)),					\
   528 @@ -1087,46 +1087,46 @@
   529  /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd.  */
   530  #ifndef umul_ppmm
   531  #define umul_ppmm(w1, w0, u, v) \
   532 -  __asm__ ("! Inlined umul_ppmm
   533 -	wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr
   534 -	sra	%3,31,%%g2	! Don't move this insn
   535 -	and	%2,%%g2,%%g2	! Don't move this insn
   536 -	andcc	%%g0,0,%%g1	! Don't move this insn
   537 -	mulscc	%%g1,%3,%%g1
   538 -	mulscc	%%g1,%3,%%g1
   539 -	mulscc	%%g1,%3,%%g1
   540 -	mulscc	%%g1,%3,%%g1
   541 -	mulscc	%%g1,%3,%%g1
   542 -	mulscc	%%g1,%3,%%g1
   543 -	mulscc	%%g1,%3,%%g1
   544 -	mulscc	%%g1,%3,%%g1
   545 -	mulscc	%%g1,%3,%%g1
   546 -	mulscc	%%g1,%3,%%g1
   547 -	mulscc	%%g1,%3,%%g1
   548 -	mulscc	%%g1,%3,%%g1
   549 -	mulscc	%%g1,%3,%%g1
   550 -	mulscc	%%g1,%3,%%g1
   551 -	mulscc	%%g1,%3,%%g1
   552 -	mulscc	%%g1,%3,%%g1
   553 -	mulscc	%%g1,%3,%%g1
   554 -	mulscc	%%g1,%3,%%g1
   555 -	mulscc	%%g1,%3,%%g1
   556 -	mulscc	%%g1,%3,%%g1
   557 -	mulscc	%%g1,%3,%%g1
   558 -	mulscc	%%g1,%3,%%g1
   559 -	mulscc	%%g1,%3,%%g1
   560 -	mulscc	%%g1,%3,%%g1
   561 -	mulscc	%%g1,%3,%%g1
   562 -	mulscc	%%g1,%3,%%g1
   563 -	mulscc	%%g1,%3,%%g1
   564 -	mulscc	%%g1,%3,%%g1
   565 -	mulscc	%%g1,%3,%%g1
   566 -	mulscc	%%g1,%3,%%g1
   567 -	mulscc	%%g1,%3,%%g1
   568 -	mulscc	%%g1,%3,%%g1
   569 -	mulscc	%%g1,0,%%g1
   570 -	add	%%g1,%%g2,%0
   571 -	rd	%%y,%1"							\
   572 +  __asm__ ("! Inlined umul_ppmm\n"					\
   573 +	"wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr\n" \
   574 +	"sra	%3,31,%%g2	! Don't move this insn\n"		\
   575 +	"and	%2,%%g2,%%g2	! Don't move this insn\n"		\
   576 +	"andcc	%%g0,0,%%g1	! Don't move this insn\n"		\
   577 +	"mulscc	%%g1,%3,%%g1\n"						\
   578 +	"mulscc	%%g1,%3,%%g1\n"						\
   579 +	"mulscc	%%g1,%3,%%g1\n"						\
   580 +	"mulscc	%%g1,%3,%%g1\n"						\
   581 +	"mulscc	%%g1,%3,%%g1\n"						\
   582 +	"mulscc	%%g1,%3,%%g1\n"						\
   583 +	"mulscc	%%g1,%3,%%g1\n"						\
   584 +	"mulscc	%%g1,%3,%%g1\n"						\
   585 +	"mulscc	%%g1,%3,%%g1\n"						\
   586 +	"mulscc	%%g1,%3,%%g1\n"						\
   587 +	"mulscc	%%g1,%3,%%g1\n"						\
   588 +	"mulscc	%%g1,%3,%%g1\n"						\
   589 +	"mulscc	%%g1,%3,%%g1\n"						\
   590 +	"mulscc	%%g1,%3,%%g1\n"						\
   591 +	"mulscc	%%g1,%3,%%g1\n"						\
   592 +	"mulscc	%%g1,%3,%%g1\n"						\
   593 +	"mulscc	%%g1,%3,%%g1\n"						\
   594 +	"mulscc	%%g1,%3,%%g1\n"						\
   595 +	"mulscc	%%g1,%3,%%g1\n"						\
   596 +	"mulscc	%%g1,%3,%%g1\n"						\
   597 +	"mulscc	%%g1,%3,%%g1\n"						\
   598 +	"mulscc	%%g1,%3,%%g1\n"						\
   599 +	"mulscc	%%g1,%3,%%g1\n"						\
   600 +	"mulscc	%%g1,%3,%%g1\n"						\
   601 +	"mulscc	%%g1,%3,%%g1\n"						\
   602 +	"mulscc	%%g1,%3,%%g1\n"						\
   603 +	"mulscc	%%g1,%3,%%g1\n"						\
   604 +	"mulscc	%%g1,%3,%%g1\n"						\
   605 +	"mulscc	%%g1,%3,%%g1\n"						\
   606 +	"mulscc	%%g1,%3,%%g1\n"						\
   607 +	"mulscc	%%g1,%3,%%g1\n"						\
   608 +	"mulscc	%%g1,%3,%%g1\n"						\
   609 +	"mulscc	%%g1,0,%%g1\n"						\
   610 +	"add	%%g1,%%g2,%0\n"						\
   611 +	"rd	%%y,%1"							\
   612  	   : "=r" ((USItype)(w1)),					\
   613  	     "=r" ((USItype)(w0))					\
   614  	   : "%rI" ((USItype)(u)),					\
   615 @@ -1138,30 +1138,30 @@
   616  /* It's quite necessary to add this much assembler for the sparc.
   617     The default udiv_qrnnd (in C) is more than 10 times slower!  */
   618  #define udiv_qrnnd(q, r, n1, n0, d) \
   619 -  __asm__ ("! Inlined udiv_qrnnd
   620 -	mov	32,%%g1
   621 -	subcc	%1,%2,%%g0
   622 -1:	bcs	5f
   623 -	 addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb
   624 -	sub	%1,%2,%1	! this kills msb of n
   625 -	addx	%1,%1,%1	! so this can't give carry
   626 -	subcc	%%g1,1,%%g1
   627 -2:	bne	1b
   628 -	 subcc	%1,%2,%%g0
   629 -	bcs	3f
   630 -	 addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb
   631 -	b	3f
   632 -	 sub	%1,%2,%1	! this kills msb of n
   633 -4:	sub	%1,%2,%1
   634 -5:	addxcc	%1,%1,%1
   635 -	bcc	2b
   636 -	 subcc	%%g1,1,%%g1
   637 -! Got carry from n.  Subtract next step to cancel this carry.
   638 -	bne	4b
   639 -	 addcc	%0,%0,%0	! shift n1n0 and a 0-bit in lsb
   640 -	sub	%1,%2,%1
   641 -3:	xnor	%0,0,%0
   642 -	! End of inline udiv_qrnnd"					\
   643 +  __asm__ ("! Inlined udiv_qrnnd\n"					\
   644 +	"mov	32,%%g1\n"						\
   645 +	"subcc	%1,%2,%%g0\n"						\
   646 +"1:	bcs	5f\n"							\
   647 +	"addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n"	\
   648 +	"sub	%1,%2,%1	! this kills msb of n\n"		\
   649 +	"addx	%1,%1,%1	! so this can't give carry\n"		\
   650 +	"subcc	%%g1,1,%%g1\n"						\
   651 +"2:	bne	1b\n"							\
   652 +	"subcc	%1,%2,%%g0\n"						\
   653 +	"bcs	3f\n"							\
   654 +	"addxcc %0,%0,%0	! shift n1n0 and a q-bit in lsb\n"	\
   655 +	"b	3f\n"							\
   656 +	"sub	%1,%2,%1	! this kills msb of n\n"		\
   657 +"4:	sub	%1,%2,%1\n"						\
   658 +"5:	addxcc	%1,%1,%1\n"						\
   659 +	"bcc	2b\n"							\
   660 +	"subcc	%%g1,1,%%g1\n"						\
   661 +"! Got carry from n.  Subtract next step to cancel this carry.\n"	\
   662 +	"bne	4b\n"							\
   663 +	"addcc	%0,%0,%0	! shift n1n0 and a 0-bit in lsb\n"	\
   664 +	"sub	%1,%2,%1\n"						\
   665 +"3:	xnor	%0,0,%0\n"						\
   666 +	"! End of inline udiv_qrnnd"					\
   667  	   : "=&r" ((USItype)(q)),					\
   668  	     "=&r" ((USItype)(r))					\
   669  	   : "r" ((USItype)(d)),					\
   670 @@ -1179,11 +1179,11 @@
   671  #if (defined (__sparc_v9__) || (defined (__sparc__) && defined (__arch64__)) \
   672      || defined (__sparcv9)) && W_TYPE_SIZE == 64
   673  #define add_ssaaaa(sh, sl, ah, al, bh, bl)				\
   674 -  __asm__ ("addcc %r4,%5,%1
   675 -  	    add %r2,%3,%0
   676 -  	    bcs,a,pn %%xcc, 1f
   677 -  	    add %0, 1, %0
   678 -  	    1:"								\
   679 +  __asm__ ("addcc %r4,%5,%1\n"						\
   680 +  	    "add %r2,%3,%0\n"						\
   681 +  	    "bcs,a,pn %%xcc, 1f\n"					\
   682 +  	    "add %0, 1, %0\n"						\
   683 +  	    "1:"							\
   684  	   : "=r" ((UDItype)(sh)),				      	\
   685  	     "=&r" ((UDItype)(sl))				      	\
   686  	   : "r" ((UDItype)(ah)),				     	\
   687 @@ -1193,11 +1193,11 @@
   688  	   : "cc")
   689  
   690  #define sub_ddmmss(sh, sl, ah, al, bh, bl) 				\
   691 -  __asm__ ("subcc %r4,%5,%1
   692 -  	    sub %r2,%3,%0
   693 -  	    bcs,a,pn %%xcc, 1f
   694 -  	    sub %0, 1, %0
   695 -  	    1:"								\
   696 +  __asm__ ("subcc %r4,%5,%1\n"						\
   697 +  	    "sub %r2,%3,%0\n"						\
   698 +  	    "bcs,a,pn %%xcc, 1f\n"					\
   699 +  	    "sub %0, 1, %0\n"						\
   700 +  	    "1:"							\
   701  	   : "=r" ((UDItype)(sh)),				      	\
   702  	     "=&r" ((UDItype)(sl))				      	\
   703  	   : "r" ((UDItype)(ah)),				     	\
   704 @@ -1210,27 +1210,27 @@
   705    do {									\
   706  	  UDItype tmp1, tmp2, tmp3, tmp4;				\
   707  	  __asm__ __volatile__ (					\
   708 -		   "srl %7,0,%3
   709 -		    mulx %3,%6,%1
   710 -		    srlx %6,32,%2
   711 -		    mulx %2,%3,%4
   712 -		    sllx %4,32,%5
   713 -		    srl %6,0,%3
   714 -		    sub %1,%5,%5
   715 -		    srlx %5,32,%5
   716 -		    addcc %4,%5,%4
   717 -		    srlx %7,32,%5
   718 -		    mulx %3,%5,%3
   719 -		    mulx %2,%5,%5
   720 -		    sethi %%hi(0x80000000),%2
   721 -		    addcc %4,%3,%4
   722 -		    srlx %4,32,%4
   723 -		    add %2,%2,%2
   724 -		    movcc %%xcc,%%g0,%2
   725 -		    addcc %5,%4,%5
   726 -		    sllx %3,32,%3
   727 -		    add %1,%3,%1
   728 -		    add %5,%2,%0"					\
   729 +		   "srl %7,0,%3\n"					\
   730 +		    "mulx %3,%6,%1\n"					\
   731 +		    "srlx %6,32,%2\n"					\
   732 +		    "mulx %2,%3,%4\n"					\
   733 +		    "sllx %4,32,%5\n"					\
   734 +		    "srl %6,0,%3\n"					\
   735 +		    "sub %1,%5,%5\n"					\
   736 +		    "srlx %5,32,%5\n"					\
   737 +		    "addcc %4,%5,%4\n"					\
   738 +		    "srlx %7,32,%5\n"					\
   739 +		    "mulx %3,%5,%3\n"					\
   740 +		    "mulx %2,%5,%5\n"					\
   741 +		    "sethi %%hi(0x80000000),%2\n"			\
   742 +		    "addcc %4,%3,%4\n"					\
   743 +		    "srlx %4,32,%4\n"					\
   744 +		    "add %2,%2,%2\n"					\
   745 +		    "movcc %%xcc,%%g0,%2\n"				\
   746 +		    "addcc %5,%4,%5\n"					\
   747 +		    "sllx %3,32,%3\n"					\
   748 +		    "add %1,%3,%1\n"					\
   749 +		    "add %5,%2,%0"					\
   750  	   : "=r" ((UDItype)(wh)),					\
   751  	     "=&r" ((UDItype)(wl)),					\
   752  	     "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4)	\
   753 @@ -1244,8 +1244,8 @@
   754  
   755  #if defined (__vax__) && W_TYPE_SIZE == 32
   756  #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
   757 -  __asm__ ("addl2 %5,%1
   758 -	adwc %3,%0"							\
   759 +  __asm__ ("addl2 %5,%1\n"						\
   760 +	"adwc %3,%0"							\
   761  	   : "=g" ((USItype)(sh)),					\
   762  	     "=&g" ((USItype)(sl))					\
   763  	   : "%0" ((USItype)(ah)),					\
   764 @@ -1253,8 +1253,8 @@
   765  	     "%1" ((USItype)(al)),					\
   766  	     "g" ((USItype)(bl)))
   767  #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
   768 -  __asm__ ("subl2 %5,%1
   769 -	sbwc %3,%0"							\
   770 +  __asm__ ("subl2 %5,%1\n"						\
   771 +	"sbwc %3,%0"							\
   772  	   : "=g" ((USItype)(sh)),					\
   773  	     "=&g" ((USItype)(sl))					\
   774  	   : "0" ((USItype)(ah)),					\