Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 1 | /* |
| 2 | * QEMU System Emulator |
| 3 | * |
| 4 | * Copyright (c) 2003-2008 Fabrice Bellard |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to deal |
| 8 | * in the Software without restriction, including without limitation the rights |
| 9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 10 | * copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| 22 | * THE SOFTWARE. |
| 23 | */ |
| 24 | |
| 25 | #include "qemu/osdep.h" |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 26 | #include "qemu/thread.h" |
| 27 | #include "hw/core/cpu.h" |
| 28 | #include "qemu/main-loop.h" |
Philippe Mathieu-Daudé | 32cad1f | 2024-12-03 15:20:13 +0100 | [diff] [blame] | 29 | #include "system/cpus.h" |
| 30 | #include "system/cpu-throttle.h" |
Hyman Huang | 52ac968 | 2024-10-17 14:42:54 +0800 | [diff] [blame] | 31 | #include "migration.h" |
| 32 | #include "migration-stats.h" |
Denis V. Lunev | 89bccec | 2024-09-05 21:19:41 +0200 | [diff] [blame] | 33 | #include "trace.h" |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 34 | |
| 35 | /* vcpu throttling controls */ |
Hyman Huang | 52ac968 | 2024-10-17 14:42:54 +0800 | [diff] [blame] | 36 | static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer; |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 37 | static unsigned int throttle_percentage; |
Hyman Huang | 52ac968 | 2024-10-17 14:42:54 +0800 | [diff] [blame] | 38 | static bool throttle_dirty_sync_timer_active; |
| 39 | static uint64_t throttle_dirty_sync_count_prev; |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 40 | |
| 41 | #define CPU_THROTTLE_PCT_MIN 1 |
| 42 | #define CPU_THROTTLE_PCT_MAX 99 |
| 43 | #define CPU_THROTTLE_TIMESLICE_NS 10000000 |
| 44 | |
Hyman Huang | 52ac968 | 2024-10-17 14:42:54 +0800 | [diff] [blame] | 45 | /* Making sure RAMBlock dirty bitmap is synchronized every five seconds */ |
| 46 | #define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000 |
| 47 | |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 48 | static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) |
| 49 | { |
| 50 | double pct; |
| 51 | double throttle_ratio; |
| 52 | int64_t sleeptime_ns, endtime_ns; |
| 53 | |
| 54 | if (!cpu_throttle_get_percentage()) { |
| 55 | return; |
| 56 | } |
| 57 | |
| 58 | pct = (double)cpu_throttle_get_percentage() / 100; |
| 59 | throttle_ratio = pct / (1 - pct); |
| 60 | /* Add 1ns to fix double's rounding error (like 0.9999999...) */ |
| 61 | sleeptime_ns = (int64_t)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS + 1); |
| 62 | endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns; |
| 63 | while (sleeptime_ns > 0 && !cpu->stop) { |
| 64 | if (sleeptime_ns > SCALE_MS) { |
Stefan Hajnoczi | 7c754c7 | 2024-01-02 10:35:27 -0500 | [diff] [blame] | 65 | qemu_cond_timedwait_bql(cpu->halt_cond, |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 66 | sleeptime_ns / SCALE_MS); |
| 67 | } else { |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 68 | bql_unlock(); |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 69 | g_usleep(sleeptime_ns / SCALE_US); |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 70 | bql_lock(); |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 71 | } |
| 72 | sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
| 73 | } |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 74 | qatomic_set(&cpu->throttle_thread_scheduled, 0); |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | static void cpu_throttle_timer_tick(void *opaque) |
| 78 | { |
| 79 | CPUState *cpu; |
| 80 | double pct; |
| 81 | |
| 82 | /* Stop the timer if needed */ |
| 83 | if (!cpu_throttle_get_percentage()) { |
| 84 | return; |
| 85 | } |
| 86 | CPU_FOREACH(cpu) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 87 | if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) { |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 88 | async_run_on_cpu(cpu, cpu_throttle_thread, |
| 89 | RUN_ON_CPU_NULL); |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | pct = (double)cpu_throttle_get_percentage() / 100; |
| 94 | timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + |
| 95 | CPU_THROTTLE_TIMESLICE_NS / (1 - pct)); |
| 96 | } |
| 97 | |
| 98 | void cpu_throttle_set(int new_throttle_pct) |
| 99 | { |
Utkarsh Tripathi | 33c38f8 | 2020-12-31 13:13:04 +0000 | [diff] [blame] | 100 | /* |
| 101 | * boolean to store whether throttle is already active or not, |
| 102 | * before modifying throttle_percentage |
| 103 | */ |
| 104 | bool throttle_active = cpu_throttle_active(); |
| 105 | |
Denis V. Lunev | 89bccec | 2024-09-05 21:19:41 +0200 | [diff] [blame] | 106 | trace_cpu_throttle_set(new_throttle_pct); |
| 107 | |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 108 | /* Ensure throttle percentage is within valid range */ |
| 109 | new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX); |
| 110 | new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN); |
| 111 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 112 | qatomic_set(&throttle_percentage, new_throttle_pct); |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 113 | |
Utkarsh Tripathi | 33c38f8 | 2020-12-31 13:13:04 +0000 | [diff] [blame] | 114 | if (!throttle_active) { |
| 115 | cpu_throttle_timer_tick(NULL); |
| 116 | } |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | void cpu_throttle_stop(void) |
| 120 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 121 | qatomic_set(&throttle_percentage, 0); |
Hyman Huang | 52ac968 | 2024-10-17 14:42:54 +0800 | [diff] [blame] | 122 | cpu_throttle_dirty_sync_timer(false); |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | bool cpu_throttle_active(void) |
| 126 | { |
| 127 | return (cpu_throttle_get_percentage() != 0); |
| 128 | } |
| 129 | |
| 130 | int cpu_throttle_get_percentage(void) |
| 131 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 132 | return qatomic_read(&throttle_percentage); |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 133 | } |
| 134 | |
Hyman Huang | 52ac968 | 2024-10-17 14:42:54 +0800 | [diff] [blame] | 135 | void cpu_throttle_dirty_sync_timer_tick(void *opaque) |
| 136 | { |
| 137 | uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count); |
| 138 | |
| 139 | /* |
| 140 | * The first iteration copies all memory anyhow and has no |
| 141 | * effect on guest performance, therefore omit it to avoid |
| 142 | * paying extra for the sync penalty. |
| 143 | */ |
| 144 | if (sync_cnt <= 1) { |
| 145 | goto end; |
| 146 | } |
| 147 | |
| 148 | if (sync_cnt == throttle_dirty_sync_count_prev) { |
| 149 | trace_cpu_throttle_dirty_sync(); |
| 150 | WITH_RCU_READ_LOCK_GUARD() { |
| 151 | migration_bitmap_sync_precopy(false); |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | end: |
| 156 | throttle_dirty_sync_count_prev = stat64_get(&mig_stats.dirty_sync_count); |
| 157 | |
| 158 | timer_mod(throttle_dirty_sync_timer, |
| 159 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + |
| 160 | CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS); |
| 161 | } |
| 162 | |
| 163 | static bool cpu_throttle_dirty_sync_active(void) |
| 164 | { |
| 165 | return qatomic_read(&throttle_dirty_sync_timer_active); |
| 166 | } |
| 167 | |
| 168 | void cpu_throttle_dirty_sync_timer(bool enable) |
| 169 | { |
| 170 | assert(throttle_dirty_sync_timer); |
| 171 | |
| 172 | if (enable) { |
| 173 | if (!cpu_throttle_dirty_sync_active()) { |
| 174 | /* |
| 175 | * Always reset the dirty sync count cache, in case migration |
| 176 | * was cancelled once. |
| 177 | */ |
| 178 | throttle_dirty_sync_count_prev = 0; |
| 179 | timer_mod(throttle_dirty_sync_timer, |
| 180 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + |
| 181 | CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS); |
| 182 | qatomic_set(&throttle_dirty_sync_timer_active, 1); |
| 183 | } |
| 184 | } else { |
| 185 | if (cpu_throttle_dirty_sync_active()) { |
| 186 | timer_del(throttle_dirty_sync_timer); |
| 187 | qatomic_set(&throttle_dirty_sync_timer_active, 0); |
| 188 | } |
| 189 | } |
| 190 | } |
| 191 | |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 192 | void cpu_throttle_init(void) |
| 193 | { |
| 194 | throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, |
| 195 | cpu_throttle_timer_tick, NULL); |
Hyman Huang | 52ac968 | 2024-10-17 14:42:54 +0800 | [diff] [blame] | 196 | throttle_dirty_sync_timer = |
| 197 | timer_new_ms(QEMU_CLOCK_VIRTUAL_RT, |
| 198 | cpu_throttle_dirty_sync_timer_tick, NULL); |
Claudio Fontana | b0c3cf9 | 2020-06-29 11:35:03 +0200 | [diff] [blame] | 199 | } |