AIX 4.3.2 warnings with 1.0.7

Albert Chin gnupg-devel at thewrittenword.com
Fri Jun 7 16:00:02 CEST 2002


On Fri, Jun 07, 2002 at 07:34:41AM -0500, Albert Chin wrote:
> AIX 4.3.2 with the IBM C compiler gives the following warnings on
> mpi/longlong.h. The patch below fixes it.
> 
> $ gmake
> ...
> gmake[2]: Entering directory `/opt/build/gnupg-1.0.7/mpi'
> xlc -DHAVE_CONFIG_H -I. -I. -I.. -I.. -I../include
> -I/opt/TWWfsw/zlib11s/include  -O2 -qmaxmem=-1 -qarch=com  -c `test -f
> mpih-sub1.c || echo './'`mpih-sub1.c
> "longlong.h", line 619.38: 1506-186 (W) String literal must be ended
> before the end of line.
> "longlong.h", line 644.14: 1506-186 (W) String literal must be ended
> before the end of line.
> "longlong.h", line 1112.33: 1506-186 (W) String literal must be ended
> before the end of line.
> "longlong.h", line 1156.37: 1506-186 (W) String literal must be ended
> before the end of line.
> ...

Ok, bad patch. Try this instead. Sorry.

-- 
albert chin (china at thewrittenword.com)

--- snip snip
--- mpi/longlong.h.orig	Fri Jun  7 07:18:26 2002
+++ mpi/longlong.h	Fri Jun  7 07:46:48 2002
@@ -115,7 +115,7 @@
  ***************************************/
 #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("add %1,%4,%5
+  __asm__ ("add %1,%4,%5                                              \n\
 	addc %0,%2,%3"                                                  \
 	   : "=r" ((USItype)(sh)),                                      \
 	    "=&r" ((USItype)(sl))                                       \
@@ -124,7 +124,7 @@
 	     "%r" ((USItype)(al)),                                      \
 	     "rI" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("sub %1,%4,%5
+  __asm__ ("sub %1,%4,%5                                              \n\
 	subc %0,%2,%3"                                                  \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -186,7 +186,7 @@
  ***************************************/
 #if defined (__arm__) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("adds        %1, %4, %5
+  __asm__ ("adds        %1, %4, %5                                    \n\
 	adc	%0, %2, %3"                                             \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -195,7 +195,7 @@
 	     "%r" ((USItype)(al)),                                      \
 	     "rI" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("subs        %1, %4, %5
+  __asm__ ("subs        %1, %4, %5                                    \n\
 	sbc	%0, %2, %3"                                             \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -205,18 +205,18 @@
 	     "rI" ((USItype)(bl)))
 #if defined __ARM_ARCH_2__ || defined __ARM_ARCH_3__
 #define umul_ppmm(xh, xl, a, b) \
-  __asm__ ("%@ Inlined umul_ppmm
-	mov	%|r0, %2, lsr #16		@ AAAA
-	mov	%|r2, %3, lsr #16		@ BBBB
-	bic	%|r1, %2, %|r0, lsl #16		@ aaaa
-	bic	%0, %3, %|r2, lsl #16		@ bbbb
-	mul	%1, %|r1, %|r2			@ aaaa * BBBB
-	mul	%|r2, %|r0, %|r2		@ AAAA * BBBB
-	mul	%|r1, %0, %|r1			@ aaaa * bbbb
-	mul	%0, %|r0, %0			@ AAAA * bbbb
-	adds	%|r0, %1, %0			@ central sum
-	addcs	%|r2, %|r2, #65536
-	adds	%1, %|r1, %|r0, lsl #16
+  __asm__ ("%@ Inlined umul_ppmm                                      \n\
+	mov	%|r0, %2, lsr #16		@ AAAA                \n\
+	mov	%|r2, %3, lsr #16		@ BBBB                \n\
+	bic	%|r1, %2, %|r0, lsl #16		@ aaaa                \n\
+	bic	%0, %3, %|r2, lsl #16		@ bbbb                \n\
+	mul	%1, %|r1, %|r2			@ aaaa * BBBB         \n\
+	mul	%|r2, %|r0, %|r2		@ AAAA * BBBB         \n\
+	mul	%|r1, %0, %|r1			@ aaaa * bbbb         \n\
+	mul	%0, %|r0, %0			@ AAAA * bbbb         \n\
+	adds	%|r0, %1, %0			@ central sum         \n\
+	addcs	%|r2, %|r2, #65536                                    \n\
+	adds	%1, %|r1, %|r0, lsl #16                                 \
 	adc	%0, %|r2, %|r0, lsr #16"                                \
 	   : "=&r" ((USItype)(xh)),                                     \
 	     "=r" ((USItype)(xl))                                       \
@@ -225,7 +225,7 @@
 	   : "r0", "r1", "r2")
 #else
 #define umul_ppmm(xh, xl, a, b) \
-  __asm__ ("%@ Inlined umul_ppmm
+  __asm__ ("%@ Inlined umul_ppmm \n\
 	umull	%r1, %r0, %r2, %r3" \
 		   : "=&r" ((USItype)(xh)), \
 		     "=r" ((USItype)(xl)) \
@@ -274,7 +274,7 @@
  ***************************************/
 #if defined (__gmicro__) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("add.w %5,%1
+  __asm__ ("add.w %5,%1                                               \n\
 	addx %3,%0"                                                     \
 	   : "=g" ((USItype)(sh)),                                      \
 	     "=&g" ((USItype)(sl))                                      \
@@ -283,7 +283,7 @@
 	     "%1" ((USItype)(al)),                                      \
 	     "g" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("sub.w %5,%1
+  __asm__ ("sub.w %5,%1                                               \n\
 	subx %3,%0"                                                     \
 	   : "=g" ((USItype)(sh)),                                      \
 	     "=&g" ((USItype)(sl))                                      \
@@ -317,7 +317,7 @@
  ***************************************/
 #if defined (__hppa) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("add %4,%5,%1
+  __asm__ ("add %4,%5,%1                                              \n\
 	addc %2,%3,%0"                                                  \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -326,7 +326,7 @@
 	     "%rM" ((USItype)(al)),                                     \
 	     "rM" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("sub %4,%5,%1
+  __asm__ ("sub %4,%5,%1                                              \n\
 	subb %2,%3,%0"                                                  \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -365,21 +365,21 @@
   do {									\
     USItype __tmp;							\
     __asm__ (								\
-       "ldi             1,%0
-	extru,= 	%1,15,16,%%r0		; Bits 31..16 zero?
-	extru,tr	%1,15,16,%1		; No.  Shift down, skip add.
-	ldo		16(%0),%0		; Yes.	Perform add.
-	extru,= 	%1,23,8,%%r0		; Bits 15..8 zero?
-	extru,tr	%1,23,8,%1		; No.  Shift down, skip add.
-	ldo		8(%0),%0		; Yes.	Perform add.
-	extru,= 	%1,27,4,%%r0		; Bits 7..4 zero?
-	extru,tr	%1,27,4,%1		; No.  Shift down, skip add.
-	ldo		4(%0),%0		; Yes.	Perform add.
-	extru,= 	%1,29,2,%%r0		; Bits 3..2 zero?
-	extru,tr	%1,29,2,%1		; No.  Shift down, skip add.
-	ldo		2(%0),%0		; Yes.	Perform add.
-	extru		%1,30,1,%1		; Extract bit 1.
-	sub		%0,%1,%0		; Subtract it.
+       "ldi             1,%0                                          \n\
+	extru,= 	%1,15,16,%%r0		; Bits 31..16 zero?   \n\
+	extru,tr	%1,15,16,%1		; No.  Shift down, skip add. \n\
+	ldo		16(%0),%0		; Yes.	Perform add.  \n\
+	extru,= 	%1,23,8,%%r0		; Bits 15..8 zero?    \n\
+	extru,tr	%1,23,8,%1		; No.  Shift down, skip add. \n\
+	ldo		8(%0),%0		; Yes.	Perform add.  \n\
+	extru,= 	%1,27,4,%%r0		; Bits 7..4 zero?     \n\
+	extru,tr	%1,27,4,%1		; No.  Shift down, skip add. \n\
+	ldo		4(%0),%0		; Yes.	Perform add.  \n\
+	extru,= 	%1,29,2,%%r0		; Bits 3..2 zero?     \n\
+	extru,tr	%1,29,2,%1		; No.  Shift down, skip add. \n\
+	ldo		2(%0),%0		; Yes.	Perform add.  \n\
+	extru		%1,30,1,%1		; Extract bit 1.      \n\
+	sub		%0,%1,%0		; Subtract it.        \n\
 	" : "=r" (count), "=r" (__tmp) : "1" (x));                      \
   } while (0)
 #endif /* hppa */
@@ -435,7 +435,7 @@
  ***************************************/
 #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("addl %5,%1
+  __asm__ ("addl %5,%1                                                \n\
 	adcl %3,%0"                                                     \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -444,7 +444,7 @@
 	     "%1" ((USItype)(al)),                                      \
 	     "g" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("subl %5,%1
+  __asm__ ("subl %5,%1                                                \n\
 	sbbl %3,%0"                                                     \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -568,7 +568,7 @@
  ***************************************/
 #if (defined (__mc68000__) || defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("add%.l %5,%1
+  __asm__ ("add%.l %5,%1                                              \n\
 	addx%.l %3,%0"                                                  \
 	   : "=d" ((USItype)(sh)),                                      \
 	     "=&d" ((USItype)(sl))                                      \
@@ -577,7 +577,7 @@
 	     "%1" ((USItype)(al)),                                      \
 	     "g" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("sub%.l %5,%1
+  __asm__ ("sub%.l %5,%1                                              \n\
 	subx%.l %3,%0"                                                  \
 	   : "=d" ((USItype)(sh)),                                      \
 	     "=&d" ((USItype)(sl))                                      \
@@ -616,27 +616,27 @@
 #else /* not mc68020 */
 #define umul_ppmm(xh, xl, a, b) \
   do { USItype __umul_tmp1, __umul_tmp2;				\
-	__asm__ ("| Inlined umul_ppmm
-	move%.l %5,%3
-	move%.l %2,%0
-	move%.w %3,%1
-	swap	%3
-	swap	%0
-	mulu	%2,%1
-	mulu	%3,%0
-	mulu	%2,%3
-	swap	%2
-	mulu	%5,%2
-	add%.l	%3,%2
-	jcc	1f
-	add%.l	%#0x10000,%0
-1:	move%.l %2,%3
-	clr%.w	%2
-	swap	%2
-	swap	%3
-	clr%.w	%3
-	add%.l	%3,%1
-	addx%.l %2,%0
+	__asm__ ("| Inlined umul_ppmm                                 \n\
+	move%.l %5,%                                                  \n\
+	move%.l %2,%0                                                 \n\
+	move%.w %3,%1                                                 \n\
+	swap	%3                                                    \n\
+	swap	%0                                                    \n\
+	mulu	%2,%1                                                 \n\
+	mulu	%3,%0                                                 \n\
+	mulu	%2,%3                                                 \n\
+	swap	%2                                                    \n\
+	mulu	%5,%2                                                 \n\
+	add%.l	%3,%2                                                 \n\
+	jcc	1f                                                    \n\
+	add%.l	%#0x10000,%0                                          \n\
+1:	move%.l %2,%3                                                 \n\
+	clr%.w	%2                                                    \n\
+	swap	%2                                                    \n\
+	swap	%3                                                    \n\
+	clr%.w	%3                                                    \n\
+	add%.l	%3,%1                                                 \n\
+	addx%.l %2,%0                                                 \n\
 	| End inlined umul_ppmm"                                        \
 	      : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)),           \
 		"=d" (__umul_tmp1), "=&d" (__umul_tmp2)                 \
@@ -653,7 +653,7 @@
  ***************************************/
 #if defined (__m88000__) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("addu.co %1,%r4,%r5
+  __asm__ ("addu.co %1,%r4,%r5                                        \n\
 	addu.ci %0,%r2,%r3"                                             \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -662,7 +662,7 @@
 	     "%rJ" ((USItype)(al)),                                     \
 	     "rJ" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("subu.co %1,%r4,%r5
+  __asm__ ("subu.co %1,%r4,%r5                                        \n\
 	subu.ci %0,%r2,%r3"                                             \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -719,8 +719,8 @@
 	     "d" ((USItype)(v)))
 #else
 #define umul_ppmm(w1, w0, u, v) \
-  __asm__ ("multu %2,%3
-	mflo %0
+  __asm__ ("multu %2,%3                                               \n\
+	mflo %0                                                       \n\
 	mfhi %1"                                                        \
 	   : "=d" ((USItype)(w0)),                                      \
 	     "=d" ((USItype)(w1))                                       \
@@ -744,8 +744,8 @@
 	     "d" ((UDItype)(v)))
 #else
 #define umul_ppmm(w1, w0, u, v) \
-  __asm__ ("dmultu %2,%3
-	mflo %0
+  __asm__ ("dmultu %2,%3                                              \n\
+	mflo %0                                                       \n\
 	mfhi %1"                                                        \
 	   : "=d" ((UDItype)(w0)),                                      \
 	     "=d" ((UDItype)(w1))                                       \
@@ -926,7 +926,7 @@
  ***************************************/
 #if defined (__pyr__) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("addw        %5,%1
+  __asm__ ("addw        %5,%1                                         \n\
 	addwc	%3,%0"                                                  \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -935,7 +935,7 @@
 	     "%1" ((USItype)(al)),                                      \
 	     "g" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("subw        %5,%1
+  __asm__ ("subw        %5,%1                                         \n\
 	subwb	%3,%0"                                                  \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -948,7 +948,7 @@
   ({union {UDItype __ll;						\
 	   struct {USItype __h, __l;} __i;				\
 	  } __xx;							\
-  __asm__ ("movw %1,%R0
+  __asm__ ("movw %1,%R0                                               \n\
 	uemul %2,%0"                                                    \
 	   : "=&r" (__xx.__ll)                                          \
 	   : "g" ((USItype) (u)),                                       \
@@ -962,7 +962,7 @@
  ***************************************/
 #if defined (__ibm032__) /* RT/ROMP */	&& W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("a %1,%5
+  __asm__ ("a %1,%5                                                   \n\
 	ae %0,%3"                                                       \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -971,7 +971,7 @@
 	     "%1" ((USItype)(al)),                                      \
 	     "r" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("s %1,%5
+  __asm__ ("s %1,%5                                                   \n\
 	se %0,%3"                                                       \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -983,25 +983,25 @@
   do {									\
     USItype __m0 = (m0), __m1 = (m1);					\
     __asm__ (								\
-       "s       r2,r2
-	mts	r10,%2
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	m	r2,%3
-	cas	%0,r2,r0
+       "s       r2,r2                                                 \n\
+	mts	r10,%2                                                \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	m	r2,%3                                                 \n\
+	cas	%0,r2,r0                                              \n\
 	mfs	r10,%1"                                                 \
 	     : "=r" ((USItype)(ph)),                                    \
 	       "=r" ((USItype)(pl))                                     \
@@ -1036,8 +1036,8 @@
 #if defined (__sh2__) && W_TYPE_SIZE == 32
 #define umul_ppmm(w1, w0, u, v) \
   __asm__ (								\
-       "dmulu.l %2,%3
-	sts	macl,%1
+       "dmulu.l %2,%3                                                 \n\
+	sts	macl,%1                                               \n\
 	sts	mach,%0"                                                \
 	   : "=r" ((USItype)(w1)),                                      \
 	     "=r" ((USItype)(w0))                                       \
@@ -1052,7 +1052,7 @@
  ***************************************/
 #if defined (__sparc__) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("addcc %r4,%5,%1
+  __asm__ ("addcc %r4,%5,%1                                           \n\
 	addx %r2,%3,%0"                                                 \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -1062,7 +1062,7 @@
 	     "rI" ((USItype)(bl))                                       \
 	   __CLOBBER_CC)
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("subcc %r4,%5,%1
+  __asm__ ("subcc %r4,%5,%1                                           \n\
 	subx %r2,%3,%0"                                                 \
 	   : "=r" ((USItype)(sh)),                                      \
 	     "=&r" ((USItype)(sl))                                      \
@@ -1109,44 +1109,44 @@
 	     "r" ((USItype)(v)))
 #define UMUL_TIME 5
 #define udiv_qrnnd(q, r, n1, n0, d) \
-  __asm__ ("! Inlined udiv_qrnnd
-	wr	%%g0,%2,%%y	! Not a delayed write for sparclite
-	tst	%%g0
-	divscc	%3,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%%g1
-	divscc	%%g1,%4,%0
-	rd	%%y,%1
-	bl,a 1f
-	add	%1,%4,%1
+  __asm__ ("! Inlined udiv_qrnnd                                      \n\
+	wr	%%g0,%2,%%y	! Not a delayed write for sparclite   \n\
+	tst	%%g0                                                  \n\
+	divscc	%3,%4,%%g1                                            \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%%g1                                          \n\
+	divscc	%%g1,%4,%0                                            \n\
+	rd	%%y,%1                                                \n\
+	bl,a 1f                                                       \n\
+	add	%1,%4,%1                                              \n\
 1:	! End of inline udiv_qrnnd"                                     \
 	   : "=r" ((USItype)(q)),                                       \
 	     "=r" ((USItype)(r))                                        \
@@ -1167,45 +1167,45 @@
 /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd.  */
 #ifndef umul_ppmm
 #define umul_ppmm(w1, w0, u, v) \
-  __asm__ ("! Inlined umul_ppmm
-	wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr
-	sra	%3,31,%%g2	! Don't move this insn
-	and	%2,%%g2,%%g2	! Don't move this insn
-	andcc	%%g0,0,%%g1	! Don't move this insn
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,%3,%%g1
-	mulscc	%%g1,0,%%g1
-	add	%%g1,%%g2,%0
+  __asm__ ("! Inlined umul_ppmm                                       \n\
+	wr	%%g0,%2,%%y	! SPARC has 0-3 delay insn after a wr \n\
+	sra	%3,31,%%g2	! Don't move this insn                \n\
+	and	%2,%%g2,%%g2	! Don't move this insn                \n\
+	andcc	%%g0,0,%%g1	! Don't move this insn                \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,%3,%%g1                                          \n\
+	mulscc	%%g1,0,%%g1                                           \n\
+	add	%%g1,%%g2,%0                                          \n\
 	rd	%%y,%1"                                                 \
 	   : "=r" ((USItype)(w1)),                                      \
 	     "=r" ((USItype)(w0))                                       \
@@ -1233,7 +1233,7 @@
  ***************************************/
 #if defined (__vax__) && W_TYPE_SIZE == 32
 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
-  __asm__ ("addl2 %5,%1
+  __asm__ ("addl2 %5,%1                                               \n\
 	adwc %3,%0"                                                     \
 	   : "=g" ((USItype)(sh)),                                      \
 	     "=&g" ((USItype)(sl))                                      \
@@ -1242,7 +1242,7 @@
 	     "%1" ((USItype)(al)),                                      \
 	     "g" ((USItype)(bl)))
 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
-  __asm__ ("subl2 %5,%1
+  __asm__ ("subl2 %5,%1                                               \n\
 	sbwc %3,%0"                                                     \
 	   : "=g" ((USItype)(sh)),                                      \
 	     "=&g" ((USItype)(sl))                                      \




More information about the Gnupg-devel mailing list