Marcelo Tosatti | 8519947 | 2010-02-22 13:57:54 -0300 | [diff] [blame] | 1 | #ifndef __QEMU_BARRIER_H |
| 2 | #define __QEMU_BARRIER_H 1 |
| 3 | |
Jan Kiszka | 1d93f0f | 2010-06-25 16:56:49 +0200 | [diff] [blame] | 4 | /* Compiler barrier */ |
| 5 | #define barrier() asm volatile("" ::: "memory") |
| 6 | |
Michael S. Tsirkin | a281ebc | 2012-04-22 16:45:53 +0300 | [diff] [blame] | 7 | #if defined(__i386__) |
David Gibson | e225170 | 2011-09-20 12:05:21 +1000 | [diff] [blame] | 8 | |
| 9 | /* |
Michael S. Tsirkin | a821ce5 | 2012-04-23 15:46:22 +0300 | [diff] [blame] | 10 | * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops |
David Gibson | e225170 | 2011-09-20 12:05:21 +1000 | [diff] [blame] | 11 | * on x86(well, a compiler barrier only). Well, at least as long as |
| 12 | * qemu doesn't do accesses to write-combining memory or non-temporal |
| 13 | * load/stores from C code. |
| 14 | */ |
| 15 | #define smp_wmb() barrier() |
Michael S. Tsirkin | a821ce5 | 2012-04-23 15:46:22 +0300 | [diff] [blame] | 16 | #define smp_rmb() barrier() |
Michael S. Tsirkin | a281ebc | 2012-04-22 16:45:53 +0300 | [diff] [blame] | 17 | /* |
| 18 | * We use GCC builtin if it's available, as that can use |
| 19 | * mfence on 32 bit as well, e.g. if built with -march=pentium-m. |
| 20 | * However, on i386, there seem to be known bugs as recently as 4.3. |
| 21 | * */ |
| 22 | #if defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 |
| 23 | #define smp_mb() __sync_synchronize() |
| 24 | #else |
| 25 | #define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory") |
| 26 | #endif |
| 27 | |
| 28 | #elif defined(__x86_64__) |
| 29 | |
| 30 | #define smp_wmb() barrier() |
Michael S. Tsirkin | a821ce5 | 2012-04-23 15:46:22 +0300 | [diff] [blame] | 31 | #define smp_rmb() barrier() |
Michael S. Tsirkin | a281ebc | 2012-04-22 16:45:53 +0300 | [diff] [blame] | 32 | #define smp_mb() asm volatile("mfence" ::: "memory") |
David Gibson | e225170 | 2011-09-20 12:05:21 +1000 | [diff] [blame] | 33 | |
Eric Sunshine | 463ce4a | 2011-11-01 05:39:49 -0400 | [diff] [blame] | 34 | #elif defined(_ARCH_PPC) |
David Gibson | e225170 | 2011-09-20 12:05:21 +1000 | [diff] [blame] | 35 | |
| 36 | /* |
Michael S. Tsirkin | a281ebc | 2012-04-22 16:45:53 +0300 | [diff] [blame] | 37 | * We use an eieio() for wmb() on powerpc. This assumes we don't |
David Gibson | e225170 | 2011-09-20 12:05:21 +1000 | [diff] [blame] | 38 | * need to order cacheable and non-cacheable stores with respect to |
| 39 | * each other |
| 40 | */ |
| 41 | #define smp_wmb() asm volatile("eieio" ::: "memory") |
Michael S. Tsirkin | a821ce5 | 2012-04-23 15:46:22 +0300 | [diff] [blame] | 42 | |
| 43 | #if defined(__powerpc64__) |
| 44 | #define smp_rmb() asm volatile("lwsync" ::: "memory") |
| 45 | #else |
| 46 | #define smp_rmb() asm volatile("sync" ::: "memory") |
| 47 | #endif |
| 48 | |
Michael S. Tsirkin | a281ebc | 2012-04-22 16:45:53 +0300 | [diff] [blame] | 49 | #define smp_mb() asm volatile("sync" ::: "memory") |
David Gibson | e225170 | 2011-09-20 12:05:21 +1000 | [diff] [blame] | 50 | |
| 51 | #else |
| 52 | |
| 53 | /* |
| 54 | * For (host) platforms we don't have explicit barrier definitions |
| 55 | * for, we use the gcc __sync_synchronize() primitive to generate a |
| 56 | * full barrier. This should be safe on all platforms, though it may |
Michael S. Tsirkin | a821ce5 | 2012-04-23 15:46:22 +0300 | [diff] [blame] | 57 | * be overkill for wmb() and rmb(). |
David Gibson | e225170 | 2011-09-20 12:05:21 +1000 | [diff] [blame] | 58 | */ |
| 59 | #define smp_wmb() __sync_synchronize() |
Michael S. Tsirkin | a281ebc | 2012-04-22 16:45:53 +0300 | [diff] [blame] | 60 | #define smp_mb() __sync_synchronize() |
Michael S. Tsirkin | a821ce5 | 2012-04-23 15:46:22 +0300 | [diff] [blame] | 61 | #define smp_rmb() __sync_synchronize() |
David Gibson | e225170 | 2011-09-20 12:05:21 +1000 | [diff] [blame] | 62 | |
| 63 | #endif |
| 64 | |
Marcelo Tosatti | 8519947 | 2010-02-22 13:57:54 -0300 | [diff] [blame] | 65 | #endif |