For consistency, move muls64 / mulu64 prototypes to host-utils.h
Make x86_64 optimized versions inline.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@3523 c046a42c-6fe2-441c-8c8c-71466251a162
diff --git a/host-utils.c b/host-utils.c
index a3c838f..7cd0843 100644
--- a/host-utils.c
+++ b/host-utils.c
@@ -28,6 +28,7 @@
 //#define DEBUG_MULDIV
 
 /* Long integer helpers */
+#if !defined(__x86_64__)
 static void add128 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
 {
     *plow += a;
@@ -69,17 +70,10 @@
     *phigh += v;
 }
 
-
 /* Unsigned 64x64 -> 128 multiplication */
 void mulu64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
 {
-#if defined(__x86_64__)
-    __asm__ ("mul %0\n\t"
-             : "=d" (*phigh), "=a" (*plow)
-             : "a" (a), "0" (b));
-#else
     mul64(plow, phigh, a, b);
-#endif
 #if defined(DEBUG_MULDIV)
     printf("mulu64: 0x%016llx * 0x%016llx = 0x%016llx%016llx\n",
            a, b, *phigh, *plow);
@@ -89,11 +83,6 @@
 /* Signed 64x64 -> 128 multiplication */
 void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
 {
-#if defined(__x86_64__)
-    __asm__ ("imul %0\n\t"
-             : "=d" (*phigh), "=a" (*plow)
-             : "a" (a), "0" (b));
-#else
     int sa, sb;
 
     sa = (a < 0);
@@ -106,9 +95,9 @@
     if (sa ^ sb) {
         neg128(plow, phigh);
     }
-#endif
 #if defined(DEBUG_MULDIV)
     printf("muls64: 0x%016llx * 0x%016llx = 0x%016llx%016llx\n",
            a, b, *phigh, *plow);
 #endif
 }
+#endif /* !defined(__x86_64__) */