ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 1 | /* |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 2 | * General purpose implementation of a simple periodic countdown timer. |
| 3 | * |
| 4 | * Copyright (c) 2007 CodeSourcery. |
| 5 | * |
Matthew Fernandez | 8e31bf3 | 2011-06-26 12:21:35 +1000 | [diff] [blame] | 6 | * This code is licensed under the GNU LGPL. |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 7 | */ |
Markus Armbruster | d645427 | 2019-08-12 07:23:45 +0200 | [diff] [blame] | 8 | |
Peter Maydell | 18c86e2 | 2016-01-26 18:17:29 +0000 | [diff] [blame] | 9 | #include "qemu/osdep.h" |
Paolo Bonzini | 83c9f4c | 2013-02-04 15:40:22 +0100 | [diff] [blame] | 10 | #include "hw/ptimer.h" |
Markus Armbruster | d645427 | 2019-08-12 07:23:45 +0200 | [diff] [blame] | 11 | #include "migration/vmstate.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 12 | #include "qemu/host-utils.h" |
Philippe Mathieu-Daudé | 5b5968c | 2022-12-19 18:09:43 +0100 | [diff] [blame] | 13 | #include "exec/replay-core.h" |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 14 | #include "sysemu/cpu-timers.h" |
Dmitry Osipenko | 2a8b587 | 2016-09-22 18:13:07 +0100 | [diff] [blame] | 15 | #include "sysemu/qtest.h" |
Marc-André Lureau | 072bdb0 | 2017-01-27 12:55:51 +0400 | [diff] [blame] | 16 | #include "block/aio.h" |
Peter Maydell | ad140da | 2021-01-28 11:41:21 +0000 | [diff] [blame] | 17 | #include "hw/clock.h" |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 18 | |
Dmitry Osipenko | 22471b8 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 19 | #define DELTA_ADJUST 1 |
| 20 | #define DELTA_NO_ADJUST -1 |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 21 | |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 22 | struct ptimer_state |
| 23 | { |
Juan Quintela | 852f771 | 2010-12-01 23:51:14 +0100 | [diff] [blame] | 24 | uint8_t enabled; /* 0 = disabled, 1 = periodic, 2 = oneshot. */ |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 25 | uint64_t limit; |
| 26 | uint64_t delta; |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 27 | uint32_t period_frac; |
| 28 | int64_t period; |
| 29 | int64_t last_event; |
| 30 | int64_t next_event; |
Dmitry Osipenko | e7ea81c | 2016-09-22 18:13:06 +0100 | [diff] [blame] | 31 | uint8_t policy_mask; |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 32 | QEMUTimer *timer; |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 33 | ptimer_cb callback; |
| 34 | void *callback_opaque; |
| 35 | /* |
| 36 | * These track whether we're in a transaction block, and if we |
| 37 | * need to do a timer reload when the block finishes. They don't |
| 38 | * need to be migrated because migration can never happen in the |
| 39 | * middle of a transaction block. |
| 40 | */ |
| 41 | bool in_transaction; |
| 42 | bool need_reload; |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 43 | }; |
| 44 | |
| 45 | /* Use a bottom-half routine to avoid reentrancy issues. */ |
| 46 | static void ptimer_trigger(ptimer_state *s) |
| 47 | { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 48 | s->callback(s->callback_opaque); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 49 | } |
| 50 | |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 51 | static void ptimer_reload(ptimer_state *s, int delta_adjust) |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 52 | { |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 53 | uint32_t period_frac; |
| 54 | uint64_t period; |
| 55 | uint64_t delta; |
Peter Maydell | 086ede3 | 2018-07-09 14:51:34 +0100 | [diff] [blame] | 56 | bool suppress_trigger = false; |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 57 | |
Peter Maydell | 086ede3 | 2018-07-09 14:51:34 +0100 | [diff] [blame] | 58 | /* |
| 59 | * Note that if delta_adjust is 0 then we must be here because of |
| 60 | * a count register write or timer start, not because of timer expiry. |
| 61 | * In that case the policy might require us to suppress the timer trigger |
| 62 | * that we would otherwise generate for a zero delta. |
| 63 | */ |
| 64 | if (delta_adjust == 0 && |
| 65 | (s->policy_mask & PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT)) { |
| 66 | suppress_trigger = true; |
| 67 | } |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 68 | if (s->delta == 0 && !(s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER) |
Peter Maydell | 086ede3 | 2018-07-09 14:51:34 +0100 | [diff] [blame] | 69 | && !suppress_trigger) { |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 70 | ptimer_trigger(s); |
Dmitry Osipenko | 22471b8 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 71 | } |
| 72 | |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 73 | /* |
| 74 | * Note that ptimer_trigger() might call the device callback function, |
| 75 | * which can then modify timer state, so we must not cache any fields |
| 76 | * from ptimer_state until after we have called it. |
| 77 | */ |
| 78 | delta = s->delta; |
| 79 | period = s->period; |
| 80 | period_frac = s->period_frac; |
| 81 | |
Dmitry Osipenko | 3f6e6a1 | 2016-10-24 16:26:52 +0100 | [diff] [blame] | 82 | if (delta == 0 && !(s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_RELOAD)) { |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 83 | delta = s->delta = s->limit; |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 84 | } |
Dmitry Osipenko | ef0a998 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 85 | |
| 86 | if (s->period == 0) { |
Dmitry Osipenko | 2a8b587 | 2016-09-22 18:13:07 +0100 | [diff] [blame] | 87 | if (!qtest_enabled()) { |
| 88 | fprintf(stderr, "Timer with period zero, disabling\n"); |
| 89 | } |
Dmitry Osipenko | 780d23e | 2016-09-22 18:13:06 +0100 | [diff] [blame] | 90 | timer_del(s->timer); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 91 | s->enabled = 0; |
| 92 | return; |
| 93 | } |
| 94 | |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 95 | if (s->policy_mask & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) { |
Dmitry Osipenko | 22471b8 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 96 | if (delta_adjust != DELTA_NO_ADJUST) { |
| 97 | delta += delta_adjust; |
| 98 | } |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 99 | } |
| 100 | |
Dmitry Osipenko | ef0a998 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 101 | if (delta == 0 && (s->policy_mask & PTIMER_POLICY_CONTINUOUS_TRIGGER)) { |
| 102 | if (s->enabled == 1 && s->limit == 0) { |
| 103 | delta = 1; |
| 104 | } |
| 105 | } |
| 106 | |
Dmitry Osipenko | 22471b8 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 107 | if (delta == 0 && (s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER)) { |
| 108 | if (delta_adjust != DELTA_NO_ADJUST) { |
| 109 | delta = 1; |
| 110 | } |
| 111 | } |
| 112 | |
Dmitry Osipenko | 3f6e6a1 | 2016-10-24 16:26:52 +0100 | [diff] [blame] | 113 | if (delta == 0 && (s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_RELOAD)) { |
| 114 | if (s->enabled == 1 && s->limit != 0) { |
| 115 | delta = 1; |
| 116 | } |
| 117 | } |
| 118 | |
Dmitry Osipenko | ef0a998 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 119 | if (delta == 0) { |
Peter Maydell | 68d59c6 | 2020-10-15 16:18:28 +0100 | [diff] [blame] | 120 | if (s->enabled == 0) { |
| 121 | /* trigger callback disabled the timer already */ |
| 122 | return; |
| 123 | } |
Dmitry Osipenko | ef0a998 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 124 | if (!qtest_enabled()) { |
| 125 | fprintf(stderr, "Timer with delta zero, disabling\n"); |
| 126 | } |
| 127 | timer_del(s->timer); |
| 128 | s->enabled = 0; |
| 129 | return; |
| 130 | } |
| 131 | |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 132 | /* |
| 133 | * Artificially limit timeout rate to something |
| 134 | * achievable under QEMU. Otherwise, QEMU spends all |
| 135 | * its time generating timer interrupts, and there |
| 136 | * is no forward progress. |
| 137 | * About ten microseconds is the fastest that really works |
| 138 | * on the current generation of host machines. |
| 139 | */ |
| 140 | |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 141 | if (s->enabled == 1 && (delta * period < 10000) && |
| 142 | !icount_enabled() && !qtest_enabled()) { |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 143 | period = 10000 / delta; |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 144 | period_frac = 0; |
| 145 | } |
| 146 | |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 147 | s->last_event = s->next_event; |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 148 | s->next_event = s->last_event + delta * period; |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 149 | if (period_frac) { |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 150 | s->next_event += ((int64_t)period_frac * delta) >> 32; |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 151 | } |
Alex Bligh | bc72ad6 | 2013-08-21 16:03:08 +0100 | [diff] [blame] | 152 | timer_mod(s->timer, s->next_event); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | static void ptimer_tick(void *opaque) |
| 156 | { |
| 157 | ptimer_state *s = (ptimer_state *)opaque; |
Dmitry Osipenko | 3f6e6a1 | 2016-10-24 16:26:52 +0100 | [diff] [blame] | 158 | bool trigger = true; |
| 159 | |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 160 | /* |
| 161 | * We perform all the tick actions within a begin/commit block |
| 162 | * because the callback function that ptimer_trigger() calls |
| 163 | * might make calls into the ptimer APIs that provoke another |
| 164 | * trigger, and we want that to cause the callback function |
| 165 | * to be called iteratively, not recursively. |
| 166 | */ |
| 167 | ptimer_transaction_begin(s); |
| 168 | |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 169 | if (s->enabled == 2) { |
Dmitry Osipenko | 3f6e6a1 | 2016-10-24 16:26:52 +0100 | [diff] [blame] | 170 | s->delta = 0; |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 171 | s->enabled = 0; |
| 172 | } else { |
Dmitry Osipenko | ef0a998 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 173 | int delta_adjust = DELTA_ADJUST; |
| 174 | |
Dmitry Osipenko | 3f6e6a1 | 2016-10-24 16:26:52 +0100 | [diff] [blame] | 175 | if (s->delta == 0 || s->limit == 0) { |
Dmitry Osipenko | ef0a998 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 176 | /* If a "continuous trigger" policy is not used and limit == 0, |
Dmitry Osipenko | 3f6e6a1 | 2016-10-24 16:26:52 +0100 | [diff] [blame] | 177 | we should error out. delta == 0 means that this tick is |
| 178 | caused by a "no immediate reload" policy, so it shouldn't |
| 179 | be adjusted. */ |
Dmitry Osipenko | 22471b8 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 180 | delta_adjust = DELTA_NO_ADJUST; |
Dmitry Osipenko | ef0a998 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 181 | } |
| 182 | |
Dmitry Osipenko | 3f6e6a1 | 2016-10-24 16:26:52 +0100 | [diff] [blame] | 183 | if (!(s->policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER)) { |
| 184 | /* Avoid re-trigger on deferred reload if "no immediate trigger" |
| 185 | policy isn't used. */ |
| 186 | trigger = (delta_adjust == DELTA_ADJUST); |
| 187 | } |
| 188 | |
| 189 | s->delta = s->limit; |
| 190 | |
Dmitry Osipenko | ef0a998 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 191 | ptimer_reload(s, delta_adjust); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 192 | } |
Dmitry Osipenko | 3f6e6a1 | 2016-10-24 16:26:52 +0100 | [diff] [blame] | 193 | |
| 194 | if (trigger) { |
| 195 | ptimer_trigger(s); |
| 196 | } |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 197 | |
| 198 | ptimer_transaction_commit(s); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 199 | } |
| 200 | |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 201 | uint64_t ptimer_get_count(ptimer_state *s) |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 202 | { |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 203 | uint64_t counter; |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 204 | |
Dmitry Osipenko | ef0a998 | 2016-10-24 16:26:51 +0100 | [diff] [blame] | 205 | if (s->enabled && s->delta != 0) { |
Dmitry Osipenko | 5a50307 | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 206 | int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); |
| 207 | int64_t next = s->next_event; |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 208 | int64_t last = s->last_event; |
Dmitry Osipenko | 5a50307 | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 209 | bool expired = (now - next >= 0); |
| 210 | bool oneshot = (s->enabled == 2); |
| 211 | |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 212 | /* Figure out the current counter value. */ |
Dmitry Osipenko | 56215da | 2016-07-14 16:51:36 +0100 | [diff] [blame] | 213 | if (expired) { |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 214 | /* Prevent timer underflowing if it should already have |
| 215 | triggered. */ |
| 216 | counter = 0; |
| 217 | } else { |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 218 | uint64_t rem; |
| 219 | uint64_t div; |
pbrook | d0a981b | 2009-03-31 14:34:24 +0000 | [diff] [blame] | 220 | int clz1, clz2; |
| 221 | int shift; |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 222 | uint32_t period_frac = s->period_frac; |
| 223 | uint64_t period = s->period; |
| 224 | |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 225 | if (!oneshot && (s->delta * period < 10000) && |
| 226 | !icount_enabled() && !qtest_enabled()) { |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 227 | period = 10000 / s->delta; |
| 228 | period_frac = 0; |
| 229 | } |
pbrook | d0a981b | 2009-03-31 14:34:24 +0000 | [diff] [blame] | 230 | |
| 231 | /* We need to divide time by period, where time is stored in |
| 232 | rem (64-bit integer) and period is stored in period/period_frac |
| 233 | (64.32 fixed point). |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 234 | |
pbrook | d0a981b | 2009-03-31 14:34:24 +0000 | [diff] [blame] | 235 | Doing full precision division is hard, so scale values and |
| 236 | do a 64-bit division. The result should be rounded down, |
| 237 | so that the rounding error never causes the timer to go |
| 238 | backwards. |
| 239 | */ |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 240 | |
Dmitry Osipenko | 56215da | 2016-07-14 16:51:36 +0100 | [diff] [blame] | 241 | rem = next - now; |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 242 | div = period; |
pbrook | d0a981b | 2009-03-31 14:34:24 +0000 | [diff] [blame] | 243 | |
| 244 | clz1 = clz64(rem); |
| 245 | clz2 = clz64(div); |
| 246 | shift = clz1 < clz2 ? clz1 : clz2; |
| 247 | |
| 248 | rem <<= shift; |
| 249 | div <<= shift; |
| 250 | if (shift >= 32) { |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 251 | div |= ((uint64_t)period_frac << (shift - 32)); |
pbrook | d0a981b | 2009-03-31 14:34:24 +0000 | [diff] [blame] | 252 | } else { |
| 253 | if (shift != 0) |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 254 | div |= (period_frac >> (32 - shift)); |
pbrook | d0a981b | 2009-03-31 14:34:24 +0000 | [diff] [blame] | 255 | /* Look at remaining bits of period_frac and round div up if |
| 256 | necessary. */ |
Dmitry Osipenko | e91171e | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 257 | if ((uint32_t)(period_frac << shift)) |
pbrook | d0a981b | 2009-03-31 14:34:24 +0000 | [diff] [blame] | 258 | div += 1; |
| 259 | } |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 260 | counter = rem / div; |
Dmitry Osipenko | 2b5c032 | 2016-10-24 16:26:50 +0100 | [diff] [blame] | 261 | |
| 262 | if (s->policy_mask & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) { |
| 263 | /* Before wrapping around, timer should stay with counter = 0 |
| 264 | for a one period. */ |
| 265 | if (!oneshot && s->delta == s->limit) { |
| 266 | if (now == last) { |
| 267 | /* Counter == delta here, check whether it was |
| 268 | adjusted and if it was, then right now it is |
| 269 | that "one period". */ |
| 270 | if (counter == s->limit + DELTA_ADJUST) { |
| 271 | return 0; |
| 272 | } |
| 273 | } else if (counter == s->limit) { |
| 274 | /* Since the counter is rounded down and now != last, |
| 275 | the counter == limit means that delta was adjusted |
| 276 | by +1 and right now it is that adjusted period. */ |
| 277 | return 0; |
| 278 | } |
| 279 | } |
| 280 | } |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 281 | } |
Dmitry Osipenko | 5580ea4 | 2016-10-24 16:26:52 +0100 | [diff] [blame] | 282 | |
| 283 | if (s->policy_mask & PTIMER_POLICY_NO_COUNTER_ROUND_DOWN) { |
| 284 | /* If now == last then delta == limit, i.e. the counter already |
| 285 | represents the correct value. It would be rounded down a 1ns |
| 286 | later. */ |
| 287 | if (now != last) { |
| 288 | counter += 1; |
| 289 | } |
| 290 | } |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 291 | } else { |
| 292 | counter = s->delta; |
| 293 | } |
| 294 | return counter; |
| 295 | } |
| 296 | |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 297 | void ptimer_set_count(ptimer_state *s, uint64_t count) |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 298 | { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 299 | assert(s->in_transaction); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 300 | s->delta = count; |
| 301 | if (s->enabled) { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 302 | s->need_reload = true; |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 303 | } |
| 304 | } |
| 305 | |
| 306 | void ptimer_run(ptimer_state *s, int oneshot) |
| 307 | { |
Dmitry Osipenko | 869e92b | 2016-06-06 16:59:31 +0100 | [diff] [blame] | 308 | bool was_disabled = !s->enabled; |
| 309 | |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 310 | assert(s->in_transaction); |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 311 | |
Dmitry Osipenko | 869e92b | 2016-06-06 16:59:31 +0100 | [diff] [blame] | 312 | if (was_disabled && s->period == 0) { |
Dmitry Osipenko | 2a8b587 | 2016-09-22 18:13:07 +0100 | [diff] [blame] | 313 | if (!qtest_enabled()) { |
| 314 | fprintf(stderr, "Timer with period zero, disabling\n"); |
| 315 | } |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 316 | return; |
| 317 | } |
| 318 | s->enabled = oneshot ? 2 : 1; |
Dmitry Osipenko | 869e92b | 2016-06-06 16:59:31 +0100 | [diff] [blame] | 319 | if (was_disabled) { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 320 | s->need_reload = true; |
Dmitry Osipenko | 869e92b | 2016-06-06 16:59:31 +0100 | [diff] [blame] | 321 | } |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 322 | } |
| 323 | |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 324 | /* Pause a timer. Note that this may cause it to "lose" time, even if it |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 325 | is immediately restarted. */ |
| 326 | void ptimer_stop(ptimer_state *s) |
| 327 | { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 328 | assert(s->in_transaction); |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 329 | |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 330 | if (!s->enabled) |
| 331 | return; |
| 332 | |
| 333 | s->delta = ptimer_get_count(s); |
Alex Bligh | bc72ad6 | 2013-08-21 16:03:08 +0100 | [diff] [blame] | 334 | timer_del(s->timer); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 335 | s->enabled = 0; |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 336 | s->need_reload = false; |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 337 | } |
| 338 | |
| 339 | /* Set counter increment interval in nanoseconds. */ |
| 340 | void ptimer_set_period(ptimer_state *s, int64_t period) |
| 341 | { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 342 | assert(s->in_transaction); |
Dmitry Osipenko | 7ef6e3c | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 343 | s->delta = ptimer_get_count(s); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 344 | s->period = period; |
| 345 | s->period_frac = 0; |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 346 | if (s->enabled) { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 347 | s->need_reload = true; |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 348 | } |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 349 | } |
| 350 | |
Peter Maydell | ad140da | 2021-01-28 11:41:21 +0000 | [diff] [blame] | 351 | /* Set counter increment interval from a Clock */ |
| 352 | void ptimer_set_period_from_clock(ptimer_state *s, const Clock *clk, |
| 353 | unsigned int divisor) |
| 354 | { |
| 355 | /* |
| 356 | * The raw clock period is a 64-bit value in units of 2^-32 ns; |
| 357 | * put another way it's a 32.32 fixed-point ns value. Our internal |
| 358 | * representation of the period is 64.32 fixed point ns, so |
| 359 | * the conversion is simple. |
| 360 | */ |
| 361 | uint64_t raw_period = clock_get(clk); |
| 362 | uint64_t period_frac; |
| 363 | |
| 364 | assert(s->in_transaction); |
| 365 | s->delta = ptimer_get_count(s); |
| 366 | s->period = extract64(raw_period, 32, 32); |
| 367 | period_frac = extract64(raw_period, 0, 32); |
| 368 | /* |
| 369 | * divisor specifies a possible frequency divisor between the |
| 370 | * clock and the timer, so it is a multiplier on the period. |
| 371 | * We do the multiply after splitting the raw period out into |
| 372 | * period and frac to avoid having to do a 32*64->96 multiply. |
| 373 | */ |
| 374 | s->period *= divisor; |
| 375 | period_frac *= divisor; |
| 376 | s->period += extract64(period_frac, 32, 32); |
| 377 | s->period_frac = (uint32_t)period_frac; |
| 378 | |
| 379 | if (s->enabled) { |
| 380 | s->need_reload = true; |
| 381 | } |
| 382 | } |
| 383 | |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 384 | /* Set counter frequency in Hz. */ |
| 385 | void ptimer_set_freq(ptimer_state *s, uint32_t freq) |
| 386 | { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 387 | assert(s->in_transaction); |
Dmitry Osipenko | 7ef6e3c | 2016-06-06 16:59:30 +0100 | [diff] [blame] | 388 | s->delta = ptimer_get_count(s); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 389 | s->period = 1000000000ll / freq; |
| 390 | s->period_frac = (1000000000ll << 32) / freq; |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 391 | if (s->enabled) { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 392 | s->need_reload = true; |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 393 | } |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | /* Set the initial countdown value. If reload is nonzero then also set |
| 397 | count = limit. */ |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 398 | void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload) |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 399 | { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 400 | assert(s->in_transaction); |
pbrook | 423f074 | 2007-05-23 00:06:54 +0000 | [diff] [blame] | 401 | s->limit = limit; |
| 402 | if (reload) |
| 403 | s->delta = limit; |
pbrook | 62ea5b0 | 2007-06-03 10:44:47 +0000 | [diff] [blame] | 404 | if (s->enabled && reload) { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 405 | s->need_reload = true; |
blueswir1 | 8d05ea8 | 2007-05-24 19:48:41 +0000 | [diff] [blame] | 406 | } |
| 407 | } |
| 408 | |
Dmitry Osipenko | 578c4b2 | 2016-06-06 16:59:31 +0100 | [diff] [blame] | 409 | uint64_t ptimer_get_limit(ptimer_state *s) |
| 410 | { |
| 411 | return s->limit; |
| 412 | } |
| 413 | |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 414 | void ptimer_transaction_begin(ptimer_state *s) |
| 415 | { |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 416 | assert(!s->in_transaction); |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 417 | s->in_transaction = true; |
| 418 | s->need_reload = false; |
| 419 | } |
| 420 | |
| 421 | void ptimer_transaction_commit(ptimer_state *s) |
| 422 | { |
| 423 | assert(s->in_transaction); |
| 424 | /* |
| 425 | * We must loop here because ptimer_reload() can call the callback |
| 426 | * function, which might then update ptimer state in a way that |
| 427 | * means we need to do another reload and possibly another callback. |
| 428 | * A disabled timer never needs reloading (and if we don't check |
| 429 | * this then we loop forever if ptimer_reload() disables the timer). |
| 430 | */ |
| 431 | while (s->need_reload && s->enabled) { |
| 432 | s->need_reload = false; |
| 433 | s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); |
| 434 | ptimer_reload(s, 0); |
| 435 | } |
| 436 | /* Now we've finished reload we can leave the transaction block. */ |
| 437 | s->in_transaction = false; |
| 438 | } |
| 439 | |
Juan Quintela | 852f771 | 2010-12-01 23:51:14 +0100 | [diff] [blame] | 440 | const VMStateDescription vmstate_ptimer = { |
Blue Swirl | 55a6e51f | 2009-08-31 19:30:15 +0000 | [diff] [blame] | 441 | .name = "ptimer", |
Juan Quintela | 852f771 | 2010-12-01 23:51:14 +0100 | [diff] [blame] | 442 | .version_id = 1, |
| 443 | .minimum_version_id = 1, |
Richard Henderson | f55f1a4 | 2023-12-21 14:16:00 +1100 | [diff] [blame] | 444 | .fields = (const VMStateField[]) { |
Juan Quintela | 852f771 | 2010-12-01 23:51:14 +0100 | [diff] [blame] | 445 | VMSTATE_UINT8(enabled, ptimer_state), |
| 446 | VMSTATE_UINT64(limit, ptimer_state), |
| 447 | VMSTATE_UINT64(delta, ptimer_state), |
| 448 | VMSTATE_UINT32(period_frac, ptimer_state), |
| 449 | VMSTATE_INT64(period, ptimer_state), |
| 450 | VMSTATE_INT64(last_event, ptimer_state), |
| 451 | VMSTATE_INT64(next_event, ptimer_state), |
Paolo Bonzini | e720677 | 2015-01-08 10:18:59 +0100 | [diff] [blame] | 452 | VMSTATE_TIMER_PTR(timer, ptimer_state), |
Juan Quintela | 852f771 | 2010-12-01 23:51:14 +0100 | [diff] [blame] | 453 | VMSTATE_END_OF_LIST() |
| 454 | } |
Blue Swirl | 55a6e51f | 2009-08-31 19:30:15 +0000 | [diff] [blame] | 455 | }; |
| 456 | |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 457 | ptimer_state *ptimer_init(ptimer_cb callback, void *callback_opaque, |
| 458 | uint8_t policy_mask) |
| 459 | { |
| 460 | ptimer_state *s; |
| 461 | |
Peter Maydell | af2a580 | 2019-11-11 13:44:16 +0000 | [diff] [blame] | 462 | /* The callback function is mandatory. */ |
Peter Maydell | 78b6eaa | 2019-10-08 18:17:21 +0100 | [diff] [blame] | 463 | assert(callback); |
| 464 | |
| 465 | s = g_new0(ptimer_state, 1); |
| 466 | s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ptimer_tick, s); |
| 467 | s->policy_mask = policy_mask; |
| 468 | s->callback = callback; |
| 469 | s->callback_opaque = callback_opaque; |
| 470 | |
| 471 | /* |
| 472 | * These two policies are incompatible -- trigger-on-decrement implies |
| 473 | * a timer trigger when the count becomes 0, but no-immediate-trigger |
| 474 | * implies a trigger when the count stops being 0. |
| 475 | */ |
| 476 | assert(!((policy_mask & PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT) && |
| 477 | (policy_mask & PTIMER_POLICY_NO_IMMEDIATE_TRIGGER))); |
| 478 | return s; |
| 479 | } |
| 480 | |
Marc-André Lureau | 072bdb0 | 2017-01-27 12:55:51 +0400 | [diff] [blame] | 481 | void ptimer_free(ptimer_state *s) |
| 482 | { |
Marc-André Lureau | 072bdb0 | 2017-01-27 12:55:51 +0400 | [diff] [blame] | 483 | timer_free(s->timer); |
| 484 | g_free(s); |
| 485 | } |