blob: 7e11197814e1e2de566f837d8c9cda409fd9f382 [file] [log] [blame]
Marcelo Tosatti85199472010-02-22 13:57:54 -03001#ifndef __QEMU_BARRIER_H
2#define __QEMU_BARRIER_H 1
3
Jan Kiszka1d93f0f2010-06-25 16:56:49 +02004/* Compiler barrier */
5#define barrier() asm volatile("" ::: "memory")
6
Michael S. Tsirkina281ebc2012-04-22 16:45:53 +03007#if defined(__i386__)
David Gibsone2251702011-09-20 12:05:21 +10008
9/*
Michael S. Tsirkina821ce52012-04-23 15:46:22 +030010 * Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
David Gibsone2251702011-09-20 12:05:21 +100011 * on x86(well, a compiler barrier only). Well, at least as long as
12 * qemu doesn't do accesses to write-combining memory or non-temporal
13 * load/stores from C code.
14 */
15#define smp_wmb() barrier()
Michael S. Tsirkina821ce52012-04-23 15:46:22 +030016#define smp_rmb() barrier()
Michael S. Tsirkina281ebc2012-04-22 16:45:53 +030017/*
18 * We use GCC builtin if it's available, as that can use
19 * mfence on 32 bit as well, e.g. if built with -march=pentium-m.
20 * However, on i386, there seem to be known bugs as recently as 4.3.
21 * */
22#if defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
23#define smp_mb() __sync_synchronize()
24#else
25#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
26#endif
27
28#elif defined(__x86_64__)
29
30#define smp_wmb() barrier()
Michael S. Tsirkina821ce52012-04-23 15:46:22 +030031#define smp_rmb() barrier()
Michael S. Tsirkina281ebc2012-04-22 16:45:53 +030032#define smp_mb() asm volatile("mfence" ::: "memory")
David Gibsone2251702011-09-20 12:05:21 +100033
Eric Sunshine463ce4a2011-11-01 05:39:49 -040034#elif defined(_ARCH_PPC)
David Gibsone2251702011-09-20 12:05:21 +100035
36/*
Michael S. Tsirkina281ebc2012-04-22 16:45:53 +030037 * We use an eieio() for wmb() on powerpc. This assumes we don't
David Gibsone2251702011-09-20 12:05:21 +100038 * need to order cacheable and non-cacheable stores with respect to
39 * each other
40 */
41#define smp_wmb() asm volatile("eieio" ::: "memory")
Michael S. Tsirkina821ce52012-04-23 15:46:22 +030042
43#if defined(__powerpc64__)
44#define smp_rmb() asm volatile("lwsync" ::: "memory")
45#else
46#define smp_rmb() asm volatile("sync" ::: "memory")
47#endif
48
Michael S. Tsirkina281ebc2012-04-22 16:45:53 +030049#define smp_mb() asm volatile("sync" ::: "memory")
David Gibsone2251702011-09-20 12:05:21 +100050
51#else
52
53/*
54 * For (host) platforms we don't have explicit barrier definitions
55 * for, we use the gcc __sync_synchronize() primitive to generate a
56 * full barrier. This should be safe on all platforms, though it may
Michael S. Tsirkina821ce52012-04-23 15:46:22 +030057 * be overkill for wmb() and rmb().
David Gibsone2251702011-09-20 12:05:21 +100058 */
59#define smp_wmb() __sync_synchronize()
Michael S. Tsirkina281ebc2012-04-22 16:45:53 +030060#define smp_mb() __sync_synchronize()
Michael S. Tsirkina821ce52012-04-23 15:46:22 +030061#define smp_rmb() __sync_synchronize()
David Gibsone2251702011-09-20 12:05:21 +100062
63#endif
64
Marcelo Tosatti85199472010-02-22 13:57:54 -030065#endif