Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Atomic operations on 64-bit quantities. |
| 3 | * |
| 4 | * Copyright (C) 2017 Red Hat, Inc. |
| 5 | * |
| 6 | * Author: Paolo Bonzini <pbonzini@redhat.com> |
| 7 | * |
| 8 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 9 | * See the COPYING file in the top-level directory. |
| 10 | */ |
| 11 | |
| 12 | #include "qemu/osdep.h" |
| 13 | #include "qemu/atomic.h" |
| 14 | #include "qemu/stats64.h" |
| 15 | #include "qemu/processor.h" |
| 16 | |
| 17 | #ifndef CONFIG_ATOMIC64 |
| 18 | static inline void stat64_rdlock(Stat64 *s) |
| 19 | { |
| 20 | /* Keep out incoming writers to avoid them starving us. */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 21 | qatomic_add(&s->lock, 2); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 22 | |
| 23 | /* If there is a concurrent writer, wait for it. */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 24 | while (qatomic_read(&s->lock) & 1) { |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 25 | cpu_relax(); |
| 26 | } |
| 27 | } |
| 28 | |
| 29 | static inline void stat64_rdunlock(Stat64 *s) |
| 30 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 31 | qatomic_sub(&s->lock, 2); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 32 | } |
| 33 | |
| 34 | static inline bool stat64_wrtrylock(Stat64 *s) |
| 35 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 36 | return qatomic_cmpxchg(&s->lock, 0, 1) == 0; |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 37 | } |
| 38 | |
| 39 | static inline void stat64_wrunlock(Stat64 *s) |
| 40 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 41 | qatomic_dec(&s->lock); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | uint64_t stat64_get(const Stat64 *s) |
| 45 | { |
| 46 | uint32_t high, low; |
| 47 | |
| 48 | stat64_rdlock((Stat64 *)s); |
| 49 | |
| 50 | /* 64-bit writes always take the lock, so we can read in |
| 51 | * any order. |
| 52 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 53 | high = qatomic_read(&s->high); |
| 54 | low = qatomic_read(&s->low); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 55 | stat64_rdunlock((Stat64 *)s); |
| 56 | |
| 57 | return ((uint64_t)high << 32) | low; |
| 58 | } |
| 59 | |
Paolo Bonzini | 7757b55 | 2023-04-27 10:47:58 +0200 | [diff] [blame] | 60 | void stat64_set(Stat64 *s, uint64_t val) |
| 61 | { |
| 62 | while (!stat64_wrtrylock(s)) { |
| 63 | cpu_relax(); |
| 64 | } |
| 65 | |
| 66 | qatomic_set(&s->high, val >> 32); |
| 67 | qatomic_set(&s->low, val); |
| 68 | stat64_wrunlock(s); |
| 69 | } |
| 70 | |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 71 | bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) |
| 72 | { |
| 73 | uint32_t old; |
| 74 | |
| 75 | if (!stat64_wrtrylock(s)) { |
| 76 | cpu_relax(); |
| 77 | return false; |
| 78 | } |
| 79 | |
| 80 | /* 64-bit reads always take the lock, so they don't care about the |
| 81 | * order of our update. By updating s->low first, we can check |
| 82 | * whether we have to carry into s->high. |
| 83 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 84 | old = qatomic_fetch_add(&s->low, low); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 85 | high += (old + low) < old; |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 86 | qatomic_add(&s->high, high); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 87 | stat64_wrunlock(s); |
| 88 | return true; |
| 89 | } |
| 90 | |
| 91 | bool stat64_min_slow(Stat64 *s, uint64_t value) |
| 92 | { |
| 93 | uint32_t high, low; |
| 94 | uint64_t orig; |
| 95 | |
| 96 | if (!stat64_wrtrylock(s)) { |
| 97 | cpu_relax(); |
| 98 | return false; |
| 99 | } |
| 100 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 101 | high = qatomic_read(&s->high); |
| 102 | low = qatomic_read(&s->low); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 103 | |
| 104 | orig = ((uint64_t)high << 32) | low; |
Max Reitz | 26a5db3 | 2017-11-15 00:22:23 +0100 | [diff] [blame] | 105 | if (value < orig) { |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 106 | /* We have to set low before high, just like stat64_min reads |
| 107 | * high before low. The value may become higher temporarily, but |
| 108 | * stat64_get does not notice (it takes the lock) and the only ill |
| 109 | * effect on stat64_min is that the slow path may be triggered |
| 110 | * unnecessarily. |
| 111 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 112 | qatomic_set(&s->low, (uint32_t)value); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 113 | smp_wmb(); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 114 | qatomic_set(&s->high, value >> 32); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 115 | } |
| 116 | stat64_wrunlock(s); |
| 117 | return true; |
| 118 | } |
| 119 | |
| 120 | bool stat64_max_slow(Stat64 *s, uint64_t value) |
| 121 | { |
| 122 | uint32_t high, low; |
| 123 | uint64_t orig; |
| 124 | |
| 125 | if (!stat64_wrtrylock(s)) { |
| 126 | cpu_relax(); |
| 127 | return false; |
| 128 | } |
| 129 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 130 | high = qatomic_read(&s->high); |
| 131 | low = qatomic_read(&s->low); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 132 | |
| 133 | orig = ((uint64_t)high << 32) | low; |
Max Reitz | 26a5db3 | 2017-11-15 00:22:23 +0100 | [diff] [blame] | 134 | if (value > orig) { |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 135 | /* We have to set low before high, just like stat64_max reads |
| 136 | * high before low. The value may become lower temporarily, but |
| 137 | * stat64_get does not notice (it takes the lock) and the only ill |
| 138 | * effect on stat64_max is that the slow path may be triggered |
| 139 | * unnecessarily. |
| 140 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 141 | qatomic_set(&s->low, (uint32_t)value); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 142 | smp_wmb(); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 143 | qatomic_set(&s->high, value >> 32); |
Paolo Bonzini | ae2d489 | 2017-06-05 14:38:59 +0200 | [diff] [blame] | 144 | } |
| 145 | stat64_wrunlock(s); |
| 146 | return true; |
| 147 | } |
| 148 | #endif |