Marcelo Tosatti | 8519947 | 2010-02-22 13:57:54 -0300 | [diff] [blame] | 1 | #ifndef __QEMU_BARRIER_H |
| 2 | #define __QEMU_BARRIER_H 1 |
| 3 | |
Jan Kiszka | 1d93f0f | 2010-06-25 16:56:49 +0200 | [diff] [blame] | 4 | /* Compiler barrier */ |
| 5 | #define barrier() asm volatile("" ::: "memory") |
| 6 | |
David Gibson | e225170 | 2011-09-20 12:05:21 +1000 | [diff] [blame] | 7 | #if defined(__i386__) || defined(__x86_64__) |
| 8 | |
| 9 | /* |
| 10 | * Because of the strongly ordered x86 storage model, wmb() is a nop |
| 11 | * on x86(well, a compiler barrier only). Well, at least as long as |
| 12 | * qemu doesn't do accesses to write-combining memory or non-temporal |
| 13 | * load/stores from C code. |
| 14 | */ |
| 15 | #define smp_wmb() barrier() |
| 16 | |
| 17 | #elif defined(__powerpc__) |
| 18 | |
| 19 | /* |
| 20 | * We use an eieio() for a wmb() on powerpc. This assumes we don't |
| 21 | * need to order cacheable and non-cacheable stores with respect to |
| 22 | * each other |
| 23 | */ |
| 24 | #define smp_wmb() asm volatile("eieio" ::: "memory") |
| 25 | |
| 26 | #else |
| 27 | |
| 28 | /* |
| 29 | * For (host) platforms we don't have explicit barrier definitions |
| 30 | * for, we use the gcc __sync_synchronize() primitive to generate a |
| 31 | * full barrier. This should be safe on all platforms, though it may |
| 32 | * be overkill. |
| 33 | */ |
| 34 | #define smp_wmb() __sync_synchronize() |
| 35 | |
| 36 | #endif |
| 37 | |
Marcelo Tosatti | 8519947 | 2010-02-22 13:57:54 -0300 | [diff] [blame] | 38 | #endif |