Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Simple trace backend |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2010 |
| 5 | * |
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 7 | * the COPYING file in the top-level directory. |
| 8 | * |
| 9 | */ |
| 10 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 11 | #include "qemu/osdep.h" |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 12 | #ifndef _WIN32 |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 13 | #include <pthread.h> |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 14 | #endif |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 15 | #include "qemu/timer.h" |
Lluís | e485897 | 2011-08-31 20:31:03 +0200 | [diff] [blame] | 16 | #include "trace/control.h" |
Lluís Vilanova | b618c28 | 2014-01-14 16:52:55 +0100 | [diff] [blame] | 17 | #include "trace/simple.h" |
Alistair Francis | 2ab4b13 | 2017-09-11 12:52:50 -0700 | [diff] [blame] | 18 | #include "qemu/error-report.h" |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 19 | |
Daniel P. Berrange | ef4c9fc | 2016-10-04 14:35:49 +0100 | [diff] [blame] | 20 | /** Trace file header event ID, picked to avoid conflict with real event IDs */ |
| 21 | #define HEADER_EVENT_ID (~(uint64_t)0) |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 22 | |
| 23 | /** Trace file magic number */ |
| 24 | #define HEADER_MAGIC 0xf2b177cb0aa429b4ULL |
| 25 | |
| 26 | /** Trace file version number, bump if format changes */ |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 27 | #define HEADER_VERSION 4 |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 28 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 29 | /** Records were dropped event ID */ |
| 30 | #define DROPPED_EVENT_ID (~(uint64_t)0 - 1) |
| 31 | |
| 32 | /** Trace record is valid */ |
| 33 | #define TRACE_RECORD_VALID ((uint64_t)1 << 63) |
| 34 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 35 | /* |
| 36 | * Trace records are written out by a dedicated thread. The thread waits for |
| 37 | * records to become available, writes them out, and then waits again. |
| 38 | */ |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 39 | static CompatGMutex trace_lock; |
| 40 | static CompatGCond trace_available_cond; |
| 41 | static CompatGCond trace_empty_cond; |
Stefan Hajnoczi | 4a0e671 | 2013-02-12 14:34:05 +0100 | [diff] [blame] | 42 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 43 | static bool trace_available; |
| 44 | static bool trace_writeout_enabled; |
| 45 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 46 | enum { |
| 47 | TRACE_BUF_LEN = 4096 * 64, |
| 48 | TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4, |
| 49 | }; |
| 50 | |
| 51 | uint8_t trace_buf[TRACE_BUF_LEN]; |
Stefan Hajnoczi | 30d9408 | 2013-02-12 14:34:04 +0100 | [diff] [blame] | 52 | static volatile gint trace_idx; |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 53 | static unsigned int writeout_idx; |
Stefan Hajnoczi | 30d9408 | 2013-02-12 14:34:04 +0100 | [diff] [blame] | 54 | static volatile gint dropped_events; |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 55 | static uint32_t trace_pid; |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 56 | static FILE *trace_fp; |
Stefan Weil | 4552e41 | 2012-08-13 21:51:16 +0200 | [diff] [blame] | 57 | static char *trace_file_name; |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 58 | |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 59 | #define TRACE_RECORD_TYPE_MAPPING 0 |
| 60 | #define TRACE_RECORD_TYPE_EVENT 1 |
| 61 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 62 | /* * Trace buffer entry */ |
| 63 | typedef struct { |
Daniel P. Berrange | ef4c9fc | 2016-10-04 14:35:49 +0100 | [diff] [blame] | 64 | uint64_t event; /* event ID value */ |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 65 | uint64_t timestamp_ns; |
| 66 | uint32_t length; /* in bytes */ |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 67 | uint32_t pid; |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 68 | uint64_t arguments[]; |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 69 | } TraceRecord; |
| 70 | |
| 71 | typedef struct { |
| 72 | uint64_t header_event_id; /* HEADER_EVENT_ID */ |
| 73 | uint64_t header_magic; /* HEADER_MAGIC */ |
| 74 | uint64_t header_version; /* HEADER_VERSION */ |
Harsh Prateek Bora | 8ae601e | 2012-07-20 18:52:12 +0530 | [diff] [blame] | 75 | } TraceLogHeader; |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 76 | |
| 77 | |
| 78 | static void read_from_buffer(unsigned int idx, void *dataptr, size_t size); |
| 79 | static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size); |
| 80 | |
| 81 | static void clear_buffer_range(unsigned int idx, size_t len) |
| 82 | { |
| 83 | uint32_t num = 0; |
| 84 | while (num < len) { |
| 85 | if (idx >= TRACE_BUF_LEN) { |
| 86 | idx = idx % TRACE_BUF_LEN; |
| 87 | } |
| 88 | trace_buf[idx++] = 0; |
| 89 | num++; |
| 90 | } |
| 91 | } |
Stefan Hajnoczi | c5ceb52 | 2010-07-13 09:26:33 +0100 | [diff] [blame] | 92 | /** |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 93 | * Read a trace record from the trace buffer |
| 94 | * |
| 95 | * @idx Trace buffer index |
| 96 | * @record Trace record to fill |
| 97 | * |
| 98 | * Returns false if the record is not valid. |
Stefan Hajnoczi | c5ceb52 | 2010-07-13 09:26:33 +0100 | [diff] [blame] | 99 | */ |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 100 | static bool get_trace_record(unsigned int idx, TraceRecord **recordptr) |
Prerna Saxena | 9410b56 | 2010-07-13 09:26:32 +0100 | [diff] [blame] | 101 | { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 102 | uint64_t event_flag = 0; |
| 103 | TraceRecord record; |
| 104 | /* read the event flag to see if its a valid record */ |
| 105 | read_from_buffer(idx, &record, sizeof(event_flag)); |
| 106 | |
| 107 | if (!(record.event & TRACE_RECORD_VALID)) { |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 108 | return false; |
Prerna Saxena | 9410b56 | 2010-07-13 09:26:32 +0100 | [diff] [blame] | 109 | } |
| 110 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 111 | smp_rmb(); /* read memory barrier before accessing record */ |
| 112 | /* read the record header to know record length */ |
| 113 | read_from_buffer(idx, &record, sizeof(TraceRecord)); |
Stefan Weil | cb8d4c8 | 2016-03-23 15:59:57 +0100 | [diff] [blame] | 114 | *recordptr = malloc(record.length); /* don't use g_malloc, can deadlock when traced */ |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 115 | /* make a copy of record to avoid being overwritten */ |
| 116 | read_from_buffer(idx, *recordptr, record.length); |
| 117 | smp_rmb(); /* memory barrier before clearing valid flag */ |
| 118 | (*recordptr)->event &= ~TRACE_RECORD_VALID; |
| 119 | /* clear the trace buffer range for consumed record otherwise any byte |
| 120 | * with its MSB set may be considered as a valid event id when the writer |
| 121 | * thread crosses this range of buffer again. |
| 122 | */ |
| 123 | clear_buffer_range(idx, record.length); |
Stefan Hajnoczi | c5ceb52 | 2010-07-13 09:26:33 +0100 | [diff] [blame] | 124 | return true; |
Prerna Saxena | 9410b56 | 2010-07-13 09:26:32 +0100 | [diff] [blame] | 125 | } |
| 126 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 127 | /** |
| 128 | * Kick writeout thread |
| 129 | * |
| 130 | * @wait Whether to wait for writeout thread to complete |
| 131 | */ |
| 132 | static void flush_trace_file(bool wait) |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 133 | { |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 134 | g_mutex_lock(&trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 135 | trace_available = true; |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 136 | g_cond_signal(&trace_available_cond); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 137 | |
| 138 | if (wait) { |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 139 | g_cond_wait(&trace_empty_cond, &trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 140 | } |
| 141 | |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 142 | g_mutex_unlock(&trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 143 | } |
| 144 | |
| 145 | static void wait_for_trace_records_available(void) |
| 146 | { |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 147 | g_mutex_lock(&trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 148 | while (!(trace_available && trace_writeout_enabled)) { |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 149 | g_cond_signal(&trace_empty_cond); |
| 150 | g_cond_wait(&trace_available_cond, &trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 151 | } |
| 152 | trace_available = false; |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 153 | g_mutex_unlock(&trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 154 | } |
| 155 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 156 | static gpointer writeout_thread(gpointer opaque) |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 157 | { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 158 | TraceRecord *recordptr; |
| 159 | union { |
| 160 | TraceRecord rec; |
| 161 | uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)]; |
| 162 | } dropped; |
| 163 | unsigned int idx = 0; |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 164 | int dropped_count; |
Blue Swirl | 0caf448 | 2011-07-23 21:21:14 +0000 | [diff] [blame] | 165 | size_t unused __attribute__ ((unused)); |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 166 | uint64_t type = TRACE_RECORD_TYPE_EVENT; |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 167 | |
| 168 | for (;;) { |
| 169 | wait_for_trace_records_available(); |
| 170 | |
Markus Armbruster | e722d70 | 2013-01-25 16:43:38 +0100 | [diff] [blame] | 171 | if (g_atomic_int_get(&dropped_events)) { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 172 | dropped.rec.event = DROPPED_EVENT_ID, |
| 173 | dropped.rec.timestamp_ns = get_clock(); |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 174 | dropped.rec.length = sizeof(TraceRecord) + sizeof(uint64_t), |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 175 | dropped.rec.pid = trace_pid; |
Markus Armbruster | b6b2c96 | 2013-01-25 16:43:39 +0100 | [diff] [blame] | 176 | do { |
Markus Armbruster | e722d70 | 2013-01-25 16:43:38 +0100 | [diff] [blame] | 177 | dropped_count = g_atomic_int_get(&dropped_events); |
Markus Armbruster | b6b2c96 | 2013-01-25 16:43:39 +0100 | [diff] [blame] | 178 | } while (!g_atomic_int_compare_and_exchange(&dropped_events, |
| 179 | dropped_count, 0)); |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 180 | dropped.rec.arguments[0] = dropped_count; |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 181 | unused = fwrite(&type, sizeof(type), 1, trace_fp); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 182 | unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp); |
Stefan Hajnoczi | c5ceb52 | 2010-07-13 09:26:33 +0100 | [diff] [blame] | 183 | } |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 184 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 185 | while (get_trace_record(idx, &recordptr)) { |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 186 | unused = fwrite(&type, sizeof(type), 1, trace_fp); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 187 | unused = fwrite(recordptr, recordptr->length, 1, trace_fp); |
| 188 | writeout_idx += recordptr->length; |
Stefan Weil | cb8d4c8 | 2016-03-23 15:59:57 +0100 | [diff] [blame] | 189 | free(recordptr); /* don't use g_free, can deadlock when traced */ |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 190 | idx = writeout_idx % TRACE_BUF_LEN; |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | fflush(trace_fp); |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 194 | } |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 195 | return NULL; |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 196 | } |
| 197 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 198 | void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val) |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 199 | { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 200 | rec->rec_off = write_to_buffer(rec->rec_off, &val, sizeof(uint64_t)); |
| 201 | } |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 202 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 203 | void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen) |
| 204 | { |
| 205 | /* Write string length first */ |
| 206 | rec->rec_off = write_to_buffer(rec->rec_off, &slen, sizeof(slen)); |
| 207 | /* Write actual string now */ |
| 208 | rec->rec_off = write_to_buffer(rec->rec_off, (void*)s, slen); |
| 209 | } |
| 210 | |
Daniel P. Berrange | ef4c9fc | 2016-10-04 14:35:49 +0100 | [diff] [blame] | 211 | int trace_record_start(TraceBufferRecord *rec, uint32_t event, size_t datasize) |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 212 | { |
| 213 | unsigned int idx, rec_off, old_idx, new_idx; |
| 214 | uint32_t rec_len = sizeof(TraceRecord) + datasize; |
Lluís Vilanova | 60481e2 | 2013-03-05 14:47:55 +0100 | [diff] [blame] | 215 | uint64_t event_u64 = event; |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 216 | uint64_t timestamp_ns = get_clock(); |
| 217 | |
Markus Armbruster | b6b2c96 | 2013-01-25 16:43:39 +0100 | [diff] [blame] | 218 | do { |
Markus Armbruster | e722d70 | 2013-01-25 16:43:38 +0100 | [diff] [blame] | 219 | old_idx = g_atomic_int_get(&trace_idx); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 220 | smp_rmb(); |
| 221 | new_idx = old_idx + rec_len; |
| 222 | |
| 223 | if (new_idx - writeout_idx > TRACE_BUF_LEN) { |
| 224 | /* Trace Buffer Full, Event dropped ! */ |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 225 | g_atomic_int_inc(&dropped_events); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 226 | return -ENOSPC; |
| 227 | } |
Markus Armbruster | b6b2c96 | 2013-01-25 16:43:39 +0100 | [diff] [blame] | 228 | } while (!g_atomic_int_compare_and_exchange(&trace_idx, old_idx, new_idx)); |
Prerna Saxena | 22890ab | 2010-06-24 17:04:53 +0530 | [diff] [blame] | 229 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 230 | idx = old_idx % TRACE_BUF_LEN; |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 231 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 232 | rec_off = idx; |
Lluís Vilanova | 60481e2 | 2013-03-05 14:47:55 +0100 | [diff] [blame] | 233 | rec_off = write_to_buffer(rec_off, &event_u64, sizeof(event_u64)); |
Harsh Prateek Bora | 83d35d3 | 2012-07-20 18:52:13 +0530 | [diff] [blame] | 234 | rec_off = write_to_buffer(rec_off, ×tamp_ns, sizeof(timestamp_ns)); |
| 235 | rec_off = write_to_buffer(rec_off, &rec_len, sizeof(rec_len)); |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 236 | rec_off = write_to_buffer(rec_off, &trace_pid, sizeof(trace_pid)); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 237 | |
| 238 | rec->tbuf_idx = idx; |
| 239 | rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN; |
| 240 | return 0; |
| 241 | } |
| 242 | |
| 243 | static void read_from_buffer(unsigned int idx, void *dataptr, size_t size) |
| 244 | { |
| 245 | uint8_t *data_ptr = dataptr; |
| 246 | uint32_t x = 0; |
| 247 | while (x < size) { |
| 248 | if (idx >= TRACE_BUF_LEN) { |
| 249 | idx = idx % TRACE_BUF_LEN; |
| 250 | } |
| 251 | data_ptr[x++] = trace_buf[idx++]; |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size) |
| 256 | { |
| 257 | uint8_t *data_ptr = dataptr; |
| 258 | uint32_t x = 0; |
| 259 | while (x < size) { |
| 260 | if (idx >= TRACE_BUF_LEN) { |
| 261 | idx = idx % TRACE_BUF_LEN; |
| 262 | } |
| 263 | trace_buf[idx++] = data_ptr[x++]; |
| 264 | } |
| 265 | return idx; /* most callers wants to know where to write next */ |
| 266 | } |
| 267 | |
| 268 | void trace_record_finish(TraceBufferRecord *rec) |
| 269 | { |
Harsh Prateek Bora | db8894f | 2012-07-20 18:52:15 +0530 | [diff] [blame] | 270 | TraceRecord record; |
| 271 | read_from_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord)); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 272 | smp_wmb(); /* write barrier before marking as valid */ |
Harsh Prateek Bora | db8894f | 2012-07-20 18:52:15 +0530 | [diff] [blame] | 273 | record.event |= TRACE_RECORD_VALID; |
| 274 | write_to_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord)); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 275 | |
Stefan Hajnoczi | 30d9408 | 2013-02-12 14:34:04 +0100 | [diff] [blame] | 276 | if (((unsigned int)g_atomic_int_get(&trace_idx) - writeout_idx) |
Markus Armbruster | e722d70 | 2013-01-25 16:43:38 +0100 | [diff] [blame] | 277 | > TRACE_BUF_FLUSH_THRESHOLD) { |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 278 | flush_trace_file(false); |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 279 | } |
| 280 | } |
| 281 | |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 282 | static int st_write_event_mapping(void) |
| 283 | { |
| 284 | uint64_t type = TRACE_RECORD_TYPE_MAPPING; |
| 285 | TraceEventIter iter; |
| 286 | TraceEvent *ev; |
| 287 | |
| 288 | trace_event_iter_init(&iter, NULL); |
| 289 | while ((ev = trace_event_iter_next(&iter)) != NULL) { |
| 290 | uint64_t id = trace_event_get_id(ev); |
| 291 | const char *name = trace_event_get_name(ev); |
| 292 | uint32_t len = strlen(name); |
| 293 | if (fwrite(&type, sizeof(type), 1, trace_fp) != 1 || |
| 294 | fwrite(&id, sizeof(id), 1, trace_fp) != 1 || |
| 295 | fwrite(&len, sizeof(len), 1, trace_fp) != 1 || |
| 296 | fwrite(name, len, 1, trace_fp) != 1) { |
| 297 | return -1; |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | return 0; |
| 302 | } |
| 303 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 304 | void st_set_trace_file_enabled(bool enable) |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 305 | { |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 306 | if (enable == !!trace_fp) { |
| 307 | return; /* no change */ |
| 308 | } |
| 309 | |
| 310 | /* Halt trace writeout */ |
| 311 | flush_trace_file(true); |
| 312 | trace_writeout_enabled = false; |
| 313 | flush_trace_file(true); |
| 314 | |
| 315 | if (enable) { |
Harsh Prateek Bora | 8ae601e | 2012-07-20 18:52:12 +0530 | [diff] [blame] | 316 | static const TraceLogHeader header = { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 317 | .header_event_id = HEADER_EVENT_ID, |
| 318 | .header_magic = HEADER_MAGIC, |
| 319 | /* Older log readers will check for version at next location */ |
| 320 | .header_version = HEADER_VERSION, |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 321 | }; |
| 322 | |
Stefan Hajnoczi | 6c2a407 | 2011-09-05 18:31:21 +0100 | [diff] [blame] | 323 | trace_fp = fopen(trace_file_name, "wb"); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 324 | if (!trace_fp) { |
| 325 | return; |
| 326 | } |
| 327 | |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 328 | if (fwrite(&header, sizeof header, 1, trace_fp) != 1 || |
| 329 | st_write_event_mapping() < 0) { |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 330 | fclose(trace_fp); |
| 331 | trace_fp = NULL; |
| 332 | return; |
| 333 | } |
| 334 | |
| 335 | /* Resume trace writeout */ |
| 336 | trace_writeout_enabled = true; |
| 337 | flush_trace_file(false); |
| 338 | } else { |
| 339 | fclose(trace_fp); |
| 340 | trace_fp = NULL; |
| 341 | } |
| 342 | } |
| 343 | |
| 344 | /** |
| 345 | * Set the name of a trace file |
| 346 | * |
| 347 | * @file The trace file name or NULL for the default name-<pid> set at |
| 348 | * config time |
| 349 | */ |
Paolo Bonzini | 41fc57e | 2016-01-07 16:55:24 +0300 | [diff] [blame] | 350 | void st_set_trace_file(const char *file) |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 351 | { |
| 352 | st_set_trace_file_enabled(false); |
| 353 | |
Stefan Weil | 4552e41 | 2012-08-13 21:51:16 +0200 | [diff] [blame] | 354 | g_free(trace_file_name); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 355 | |
| 356 | if (!file) { |
Stefan Weil | 857a0e3 | 2015-03-11 22:08:56 +0100 | [diff] [blame] | 357 | /* Type cast needed for Windows where getpid() returns an int. */ |
| 358 | trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE, (pid_t)getpid()); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 359 | } else { |
Stefan Weil | 4552e41 | 2012-08-13 21:51:16 +0200 | [diff] [blame] | 360 | trace_file_name = g_strdup_printf("%s", file); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 361 | } |
| 362 | |
| 363 | st_set_trace_file_enabled(true); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 364 | } |
| 365 | |
| 366 | void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...)) |
| 367 | { |
| 368 | stream_printf(stream, "Trace file \"%s\" %s.\n", |
| 369 | trace_file_name, trace_fp ? "on" : "off"); |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 370 | } |
Prerna Saxena | 22890ab | 2010-06-24 17:04:53 +0530 | [diff] [blame] | 371 | |
Lluís | fc76410 | 2011-08-31 20:31:18 +0200 | [diff] [blame] | 372 | void st_flush_trace_buffer(void) |
| 373 | { |
| 374 | flush_trace_file(true); |
| 375 | } |
| 376 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 377 | /* Helper function to create a thread with signals blocked. Use glib's |
| 378 | * portable threads since QEMU abstractions cannot be used due to reentrancy in |
| 379 | * the tracer. Also note the signal masking on POSIX hosts so that the thread |
| 380 | * does not steal signals when the rest of the program wants them blocked. |
| 381 | */ |
| 382 | static GThread *trace_thread_create(GThreadFunc fn) |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 383 | { |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 384 | GThread *thread; |
| 385 | #ifndef _WIN32 |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 386 | sigset_t set, oldset; |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 387 | |
| 388 | sigfillset(&set); |
| 389 | pthread_sigmask(SIG_SETMASK, &set, &oldset); |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 390 | #endif |
Stefan Hajnoczi | 4a0e671 | 2013-02-12 14:34:05 +0100 | [diff] [blame] | 391 | |
Stefan Hajnoczi | 4a0e671 | 2013-02-12 14:34:05 +0100 | [diff] [blame] | 392 | thread = g_thread_new("trace-thread", fn, NULL); |
Stefan Hajnoczi | 4a0e671 | 2013-02-12 14:34:05 +0100 | [diff] [blame] | 393 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 394 | #ifndef _WIN32 |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 395 | pthread_sigmask(SIG_SETMASK, &oldset, NULL); |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 396 | #endif |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 397 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 398 | return thread; |
| 399 | } |
| 400 | |
Paolo Bonzini | 41fc57e | 2016-01-07 16:55:24 +0300 | [diff] [blame] | 401 | bool st_init(void) |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 402 | { |
| 403 | GThread *thread; |
| 404 | |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 405 | trace_pid = getpid(); |
| 406 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 407 | thread = trace_thread_create(writeout_thread); |
| 408 | if (!thread) { |
Alistair Francis | 2ab4b13 | 2017-09-11 12:52:50 -0700 | [diff] [blame] | 409 | warn_report("unable to initialize simple trace backend"); |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 410 | return false; |
| 411 | } |
| 412 | |
| 413 | atexit(st_flush_trace_buffer); |
Stefan Hajnoczi | 31d3c9b | 2011-03-13 20:14:30 +0000 | [diff] [blame] | 414 | return true; |
Prerna Saxena | 22890ab | 2010-06-24 17:04:53 +0530 | [diff] [blame] | 415 | } |