Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Simple trace backend |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2010 |
| 5 | * |
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 7 | * the COPYING file in the top-level directory. |
| 8 | * |
| 9 | */ |
| 10 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 11 | #include "qemu/osdep.h" |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 12 | #ifndef _WIN32 |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 13 | #include <pthread.h> |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 14 | #endif |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 15 | #include "qemu/timer.h" |
Lluís | e485897 | 2011-08-31 20:31:03 +0200 | [diff] [blame] | 16 | #include "trace/control.h" |
Lluís Vilanova | b618c28 | 2014-01-14 16:52:55 +0100 | [diff] [blame] | 17 | #include "trace/simple.h" |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 18 | |
Daniel P. Berrange | ef4c9fc | 2016-10-04 14:35:49 +0100 | [diff] [blame] | 19 | /** Trace file header event ID, picked to avoid conflict with real event IDs */ |
| 20 | #define HEADER_EVENT_ID (~(uint64_t)0) |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 21 | |
| 22 | /** Trace file magic number */ |
| 23 | #define HEADER_MAGIC 0xf2b177cb0aa429b4ULL |
| 24 | |
| 25 | /** Trace file version number, bump if format changes */ |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 26 | #define HEADER_VERSION 4 |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 27 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 28 | /** Records were dropped event ID */ |
| 29 | #define DROPPED_EVENT_ID (~(uint64_t)0 - 1) |
| 30 | |
| 31 | /** Trace record is valid */ |
| 32 | #define TRACE_RECORD_VALID ((uint64_t)1 << 63) |
| 33 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 34 | /* |
| 35 | * Trace records are written out by a dedicated thread. The thread waits for |
| 36 | * records to become available, writes them out, and then waits again. |
| 37 | */ |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 38 | static CompatGMutex trace_lock; |
| 39 | static CompatGCond trace_available_cond; |
| 40 | static CompatGCond trace_empty_cond; |
Stefan Hajnoczi | 4a0e671 | 2013-02-12 14:34:05 +0100 | [diff] [blame] | 41 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 42 | static bool trace_available; |
| 43 | static bool trace_writeout_enabled; |
| 44 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 45 | enum { |
| 46 | TRACE_BUF_LEN = 4096 * 64, |
| 47 | TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4, |
| 48 | }; |
| 49 | |
| 50 | uint8_t trace_buf[TRACE_BUF_LEN]; |
Stefan Hajnoczi | 30d9408 | 2013-02-12 14:34:04 +0100 | [diff] [blame] | 51 | static volatile gint trace_idx; |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 52 | static unsigned int writeout_idx; |
Stefan Hajnoczi | 30d9408 | 2013-02-12 14:34:04 +0100 | [diff] [blame] | 53 | static volatile gint dropped_events; |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 54 | static uint32_t trace_pid; |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 55 | static FILE *trace_fp; |
Stefan Weil | 4552e41 | 2012-08-13 21:51:16 +0200 | [diff] [blame] | 56 | static char *trace_file_name; |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 57 | |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 58 | #define TRACE_RECORD_TYPE_MAPPING 0 |
| 59 | #define TRACE_RECORD_TYPE_EVENT 1 |
| 60 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 61 | /* * Trace buffer entry */ |
| 62 | typedef struct { |
Daniel P. Berrange | ef4c9fc | 2016-10-04 14:35:49 +0100 | [diff] [blame] | 63 | uint64_t event; /* event ID value */ |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 64 | uint64_t timestamp_ns; |
| 65 | uint32_t length; /* in bytes */ |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 66 | uint32_t pid; |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 67 | uint64_t arguments[]; |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 68 | } TraceRecord; |
| 69 | |
| 70 | typedef struct { |
| 71 | uint64_t header_event_id; /* HEADER_EVENT_ID */ |
| 72 | uint64_t header_magic; /* HEADER_MAGIC */ |
| 73 | uint64_t header_version; /* HEADER_VERSION */ |
Harsh Prateek Bora | 8ae601e | 2012-07-20 18:52:12 +0530 | [diff] [blame] | 74 | } TraceLogHeader; |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 75 | |
| 76 | |
| 77 | static void read_from_buffer(unsigned int idx, void *dataptr, size_t size); |
| 78 | static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size); |
| 79 | |
| 80 | static void clear_buffer_range(unsigned int idx, size_t len) |
| 81 | { |
| 82 | uint32_t num = 0; |
| 83 | while (num < len) { |
| 84 | if (idx >= TRACE_BUF_LEN) { |
| 85 | idx = idx % TRACE_BUF_LEN; |
| 86 | } |
| 87 | trace_buf[idx++] = 0; |
| 88 | num++; |
| 89 | } |
| 90 | } |
Stefan Hajnoczi | c5ceb52 | 2010-07-13 09:26:33 +0100 | [diff] [blame] | 91 | /** |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 92 | * Read a trace record from the trace buffer |
| 93 | * |
| 94 | * @idx Trace buffer index |
| 95 | * @record Trace record to fill |
| 96 | * |
| 97 | * Returns false if the record is not valid. |
Stefan Hajnoczi | c5ceb52 | 2010-07-13 09:26:33 +0100 | [diff] [blame] | 98 | */ |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 99 | static bool get_trace_record(unsigned int idx, TraceRecord **recordptr) |
Prerna Saxena | 9410b56 | 2010-07-13 09:26:32 +0100 | [diff] [blame] | 100 | { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 101 | uint64_t event_flag = 0; |
| 102 | TraceRecord record; |
| 103 | /* read the event flag to see if its a valid record */ |
| 104 | read_from_buffer(idx, &record, sizeof(event_flag)); |
| 105 | |
| 106 | if (!(record.event & TRACE_RECORD_VALID)) { |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 107 | return false; |
Prerna Saxena | 9410b56 | 2010-07-13 09:26:32 +0100 | [diff] [blame] | 108 | } |
| 109 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 110 | smp_rmb(); /* read memory barrier before accessing record */ |
| 111 | /* read the record header to know record length */ |
| 112 | read_from_buffer(idx, &record, sizeof(TraceRecord)); |
Stefan Weil | cb8d4c8 | 2016-03-23 15:59:57 +0100 | [diff] [blame] | 113 | *recordptr = malloc(record.length); /* don't use g_malloc, can deadlock when traced */ |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 114 | /* make a copy of record to avoid being overwritten */ |
| 115 | read_from_buffer(idx, *recordptr, record.length); |
| 116 | smp_rmb(); /* memory barrier before clearing valid flag */ |
| 117 | (*recordptr)->event &= ~TRACE_RECORD_VALID; |
| 118 | /* clear the trace buffer range for consumed record otherwise any byte |
| 119 | * with its MSB set may be considered as a valid event id when the writer |
| 120 | * thread crosses this range of buffer again. |
| 121 | */ |
| 122 | clear_buffer_range(idx, record.length); |
Stefan Hajnoczi | c5ceb52 | 2010-07-13 09:26:33 +0100 | [diff] [blame] | 123 | return true; |
Prerna Saxena | 9410b56 | 2010-07-13 09:26:32 +0100 | [diff] [blame] | 124 | } |
| 125 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 126 | /** |
| 127 | * Kick writeout thread |
| 128 | * |
| 129 | * @wait Whether to wait for writeout thread to complete |
| 130 | */ |
| 131 | static void flush_trace_file(bool wait) |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 132 | { |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 133 | g_mutex_lock(&trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 134 | trace_available = true; |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 135 | g_cond_signal(&trace_available_cond); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 136 | |
| 137 | if (wait) { |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 138 | g_cond_wait(&trace_empty_cond, &trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 139 | } |
| 140 | |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 141 | g_mutex_unlock(&trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | static void wait_for_trace_records_available(void) |
| 145 | { |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 146 | g_mutex_lock(&trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 147 | while (!(trace_available && trace_writeout_enabled)) { |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 148 | g_cond_signal(&trace_empty_cond); |
| 149 | g_cond_wait(&trace_available_cond, &trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 150 | } |
| 151 | trace_available = false; |
Michael Tokarev | 86946a2 | 2014-05-08 12:30:46 +0400 | [diff] [blame] | 152 | g_mutex_unlock(&trace_lock); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 153 | } |
| 154 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 155 | static gpointer writeout_thread(gpointer opaque) |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 156 | { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 157 | TraceRecord *recordptr; |
| 158 | union { |
| 159 | TraceRecord rec; |
| 160 | uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)]; |
| 161 | } dropped; |
| 162 | unsigned int idx = 0; |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 163 | int dropped_count; |
Blue Swirl | 0caf448 | 2011-07-23 21:21:14 +0000 | [diff] [blame] | 164 | size_t unused __attribute__ ((unused)); |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 165 | uint64_t type = TRACE_RECORD_TYPE_EVENT; |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 166 | |
| 167 | for (;;) { |
| 168 | wait_for_trace_records_available(); |
| 169 | |
Markus Armbruster | e722d70 | 2013-01-25 16:43:38 +0100 | [diff] [blame] | 170 | if (g_atomic_int_get(&dropped_events)) { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 171 | dropped.rec.event = DROPPED_EVENT_ID, |
| 172 | dropped.rec.timestamp_ns = get_clock(); |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 173 | dropped.rec.length = sizeof(TraceRecord) + sizeof(uint64_t), |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 174 | dropped.rec.pid = trace_pid; |
Markus Armbruster | b6b2c96 | 2013-01-25 16:43:39 +0100 | [diff] [blame] | 175 | do { |
Markus Armbruster | e722d70 | 2013-01-25 16:43:38 +0100 | [diff] [blame] | 176 | dropped_count = g_atomic_int_get(&dropped_events); |
Markus Armbruster | b6b2c96 | 2013-01-25 16:43:39 +0100 | [diff] [blame] | 177 | } while (!g_atomic_int_compare_and_exchange(&dropped_events, |
| 178 | dropped_count, 0)); |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 179 | dropped.rec.arguments[0] = dropped_count; |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 180 | unused = fwrite(&type, sizeof(type), 1, trace_fp); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 181 | unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp); |
Stefan Hajnoczi | c5ceb52 | 2010-07-13 09:26:33 +0100 | [diff] [blame] | 182 | } |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 183 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 184 | while (get_trace_record(idx, &recordptr)) { |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 185 | unused = fwrite(&type, sizeof(type), 1, trace_fp); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 186 | unused = fwrite(recordptr, recordptr->length, 1, trace_fp); |
| 187 | writeout_idx += recordptr->length; |
Stefan Weil | cb8d4c8 | 2016-03-23 15:59:57 +0100 | [diff] [blame] | 188 | free(recordptr); /* don't use g_free, can deadlock when traced */ |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 189 | idx = writeout_idx % TRACE_BUF_LEN; |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | fflush(trace_fp); |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 193 | } |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 194 | return NULL; |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 195 | } |
| 196 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 197 | void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val) |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 198 | { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 199 | rec->rec_off = write_to_buffer(rec->rec_off, &val, sizeof(uint64_t)); |
| 200 | } |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 201 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 202 | void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen) |
| 203 | { |
| 204 | /* Write string length first */ |
| 205 | rec->rec_off = write_to_buffer(rec->rec_off, &slen, sizeof(slen)); |
| 206 | /* Write actual string now */ |
| 207 | rec->rec_off = write_to_buffer(rec->rec_off, (void*)s, slen); |
| 208 | } |
| 209 | |
Daniel P. Berrange | ef4c9fc | 2016-10-04 14:35:49 +0100 | [diff] [blame] | 210 | int trace_record_start(TraceBufferRecord *rec, uint32_t event, size_t datasize) |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 211 | { |
| 212 | unsigned int idx, rec_off, old_idx, new_idx; |
| 213 | uint32_t rec_len = sizeof(TraceRecord) + datasize; |
Lluís Vilanova | 60481e2 | 2013-03-05 14:47:55 +0100 | [diff] [blame] | 214 | uint64_t event_u64 = event; |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 215 | uint64_t timestamp_ns = get_clock(); |
| 216 | |
Markus Armbruster | b6b2c96 | 2013-01-25 16:43:39 +0100 | [diff] [blame] | 217 | do { |
Markus Armbruster | e722d70 | 2013-01-25 16:43:38 +0100 | [diff] [blame] | 218 | old_idx = g_atomic_int_get(&trace_idx); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 219 | smp_rmb(); |
| 220 | new_idx = old_idx + rec_len; |
| 221 | |
| 222 | if (new_idx - writeout_idx > TRACE_BUF_LEN) { |
| 223 | /* Trace Buffer Full, Event dropped ! */ |
Markus Armbruster | fb3a508 | 2013-01-25 16:43:37 +0100 | [diff] [blame] | 224 | g_atomic_int_inc(&dropped_events); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 225 | return -ENOSPC; |
| 226 | } |
Markus Armbruster | b6b2c96 | 2013-01-25 16:43:39 +0100 | [diff] [blame] | 227 | } while (!g_atomic_int_compare_and_exchange(&trace_idx, old_idx, new_idx)); |
Prerna Saxena | 22890ab | 2010-06-24 17:04:53 +0530 | [diff] [blame] | 228 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 229 | idx = old_idx % TRACE_BUF_LEN; |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 230 | |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 231 | rec_off = idx; |
Lluís Vilanova | 60481e2 | 2013-03-05 14:47:55 +0100 | [diff] [blame] | 232 | rec_off = write_to_buffer(rec_off, &event_u64, sizeof(event_u64)); |
Harsh Prateek Bora | 83d35d3 | 2012-07-20 18:52:13 +0530 | [diff] [blame] | 233 | rec_off = write_to_buffer(rec_off, ×tamp_ns, sizeof(timestamp_ns)); |
| 234 | rec_off = write_to_buffer(rec_off, &rec_len, sizeof(rec_len)); |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 235 | rec_off = write_to_buffer(rec_off, &trace_pid, sizeof(trace_pid)); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 236 | |
| 237 | rec->tbuf_idx = idx; |
| 238 | rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN; |
| 239 | return 0; |
| 240 | } |
| 241 | |
| 242 | static void read_from_buffer(unsigned int idx, void *dataptr, size_t size) |
| 243 | { |
| 244 | uint8_t *data_ptr = dataptr; |
| 245 | uint32_t x = 0; |
| 246 | while (x < size) { |
| 247 | if (idx >= TRACE_BUF_LEN) { |
| 248 | idx = idx % TRACE_BUF_LEN; |
| 249 | } |
| 250 | data_ptr[x++] = trace_buf[idx++]; |
| 251 | } |
| 252 | } |
| 253 | |
| 254 | static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size) |
| 255 | { |
| 256 | uint8_t *data_ptr = dataptr; |
| 257 | uint32_t x = 0; |
| 258 | while (x < size) { |
| 259 | if (idx >= TRACE_BUF_LEN) { |
| 260 | idx = idx % TRACE_BUF_LEN; |
| 261 | } |
| 262 | trace_buf[idx++] = data_ptr[x++]; |
| 263 | } |
| 264 | return idx; /* most callers wants to know where to write next */ |
| 265 | } |
| 266 | |
| 267 | void trace_record_finish(TraceBufferRecord *rec) |
| 268 | { |
Harsh Prateek Bora | db8894f | 2012-07-20 18:52:15 +0530 | [diff] [blame] | 269 | TraceRecord record; |
| 270 | read_from_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord)); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 271 | smp_wmb(); /* write barrier before marking as valid */ |
Harsh Prateek Bora | db8894f | 2012-07-20 18:52:15 +0530 | [diff] [blame] | 272 | record.event |= TRACE_RECORD_VALID; |
| 273 | write_to_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord)); |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 274 | |
Stefan Hajnoczi | 30d9408 | 2013-02-12 14:34:04 +0100 | [diff] [blame] | 275 | if (((unsigned int)g_atomic_int_get(&trace_idx) - writeout_idx) |
Markus Armbruster | e722d70 | 2013-01-25 16:43:38 +0100 | [diff] [blame] | 276 | > TRACE_BUF_FLUSH_THRESHOLD) { |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 277 | flush_trace_file(false); |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 278 | } |
| 279 | } |
| 280 | |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 281 | static int st_write_event_mapping(void) |
| 282 | { |
| 283 | uint64_t type = TRACE_RECORD_TYPE_MAPPING; |
| 284 | TraceEventIter iter; |
| 285 | TraceEvent *ev; |
| 286 | |
| 287 | trace_event_iter_init(&iter, NULL); |
| 288 | while ((ev = trace_event_iter_next(&iter)) != NULL) { |
| 289 | uint64_t id = trace_event_get_id(ev); |
| 290 | const char *name = trace_event_get_name(ev); |
| 291 | uint32_t len = strlen(name); |
| 292 | if (fwrite(&type, sizeof(type), 1, trace_fp) != 1 || |
| 293 | fwrite(&id, sizeof(id), 1, trace_fp) != 1 || |
| 294 | fwrite(&len, sizeof(len), 1, trace_fp) != 1 || |
| 295 | fwrite(name, len, 1, trace_fp) != 1) { |
| 296 | return -1; |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | return 0; |
| 301 | } |
| 302 | |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 303 | void st_set_trace_file_enabled(bool enable) |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 304 | { |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 305 | if (enable == !!trace_fp) { |
| 306 | return; /* no change */ |
| 307 | } |
| 308 | |
| 309 | /* Halt trace writeout */ |
| 310 | flush_trace_file(true); |
| 311 | trace_writeout_enabled = false; |
| 312 | flush_trace_file(true); |
| 313 | |
| 314 | if (enable) { |
Harsh Prateek Bora | 8ae601e | 2012-07-20 18:52:12 +0530 | [diff] [blame] | 315 | static const TraceLogHeader header = { |
Harsh Prateek Bora | 62bab73 | 2012-07-18 15:15:59 +0530 | [diff] [blame] | 316 | .header_event_id = HEADER_EVENT_ID, |
| 317 | .header_magic = HEADER_MAGIC, |
| 318 | /* Older log readers will check for version at next location */ |
| 319 | .header_version = HEADER_VERSION, |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 320 | }; |
| 321 | |
Stefan Hajnoczi | 6c2a407 | 2011-09-05 18:31:21 +0100 | [diff] [blame] | 322 | trace_fp = fopen(trace_file_name, "wb"); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 323 | if (!trace_fp) { |
| 324 | return; |
| 325 | } |
| 326 | |
Daniel P. Berrange | 7f1b588 | 2016-10-04 14:35:50 +0100 | [diff] [blame] | 327 | if (fwrite(&header, sizeof header, 1, trace_fp) != 1 || |
| 328 | st_write_event_mapping() < 0) { |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 329 | fclose(trace_fp); |
| 330 | trace_fp = NULL; |
| 331 | return; |
| 332 | } |
| 333 | |
| 334 | /* Resume trace writeout */ |
| 335 | trace_writeout_enabled = true; |
| 336 | flush_trace_file(false); |
| 337 | } else { |
| 338 | fclose(trace_fp); |
| 339 | trace_fp = NULL; |
| 340 | } |
| 341 | } |
| 342 | |
| 343 | /** |
| 344 | * Set the name of a trace file |
| 345 | * |
| 346 | * @file The trace file name or NULL for the default name-<pid> set at |
| 347 | * config time |
| 348 | */ |
Paolo Bonzini | 41fc57e | 2016-01-07 16:55:24 +0300 | [diff] [blame] | 349 | void st_set_trace_file(const char *file) |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 350 | { |
| 351 | st_set_trace_file_enabled(false); |
| 352 | |
Stefan Weil | 4552e41 | 2012-08-13 21:51:16 +0200 | [diff] [blame] | 353 | g_free(trace_file_name); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 354 | |
| 355 | if (!file) { |
Stefan Weil | 857a0e3 | 2015-03-11 22:08:56 +0100 | [diff] [blame] | 356 | /* Type cast needed for Windows where getpid() returns an int. */ |
| 357 | trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE, (pid_t)getpid()); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 358 | } else { |
Stefan Weil | 4552e41 | 2012-08-13 21:51:16 +0200 | [diff] [blame] | 359 | trace_file_name = g_strdup_printf("%s", file); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 360 | } |
| 361 | |
| 362 | st_set_trace_file_enabled(true); |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 363 | } |
| 364 | |
| 365 | void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...)) |
| 366 | { |
| 367 | stream_printf(stream, "Trace file \"%s\" %s.\n", |
| 368 | trace_file_name, trace_fp ? "on" : "off"); |
Stefan Hajnoczi | 26f7227 | 2010-05-22 19:24:51 +0100 | [diff] [blame] | 369 | } |
Prerna Saxena | 22890ab | 2010-06-24 17:04:53 +0530 | [diff] [blame] | 370 | |
Lluís | fc76410 | 2011-08-31 20:31:18 +0200 | [diff] [blame] | 371 | void st_flush_trace_buffer(void) |
| 372 | { |
| 373 | flush_trace_file(true); |
| 374 | } |
| 375 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 376 | /* Helper function to create a thread with signals blocked. Use glib's |
| 377 | * portable threads since QEMU abstractions cannot be used due to reentrancy in |
| 378 | * the tracer. Also note the signal masking on POSIX hosts so that the thread |
| 379 | * does not steal signals when the rest of the program wants them blocked. |
| 380 | */ |
| 381 | static GThread *trace_thread_create(GThreadFunc fn) |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 382 | { |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 383 | GThread *thread; |
| 384 | #ifndef _WIN32 |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 385 | sigset_t set, oldset; |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 386 | |
| 387 | sigfillset(&set); |
| 388 | pthread_sigmask(SIG_SETMASK, &set, &oldset); |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 389 | #endif |
Stefan Hajnoczi | 4a0e671 | 2013-02-12 14:34:05 +0100 | [diff] [blame] | 390 | |
Stefan Hajnoczi | 4a0e671 | 2013-02-12 14:34:05 +0100 | [diff] [blame] | 391 | thread = g_thread_new("trace-thread", fn, NULL); |
Stefan Hajnoczi | 4a0e671 | 2013-02-12 14:34:05 +0100 | [diff] [blame] | 392 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 393 | #ifndef _WIN32 |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 394 | pthread_sigmask(SIG_SETMASK, &oldset, NULL); |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 395 | #endif |
Stefan Hajnoczi | 0b5538c | 2011-02-26 18:38:39 +0000 | [diff] [blame] | 396 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 397 | return thread; |
| 398 | } |
| 399 | |
Paolo Bonzini | 41fc57e | 2016-01-07 16:55:24 +0300 | [diff] [blame] | 400 | bool st_init(void) |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 401 | { |
| 402 | GThread *thread; |
| 403 | |
Stefan Hajnoczi | 26896cb | 2014-05-07 19:24:10 +0200 | [diff] [blame] | 404 | trace_pid = getpid(); |
| 405 | |
Stefan Hajnoczi | 85aff15 | 2011-09-05 08:30:17 +0100 | [diff] [blame] | 406 | thread = trace_thread_create(writeout_thread); |
| 407 | if (!thread) { |
| 408 | fprintf(stderr, "warning: unable to initialize simple trace backend\n"); |
| 409 | return false; |
| 410 | } |
| 411 | |
| 412 | atexit(st_flush_trace_buffer); |
Stefan Hajnoczi | 31d3c9b | 2011-03-13 20:14:30 +0000 | [diff] [blame] | 413 | return true; |
Prerna Saxena | 22890ab | 2010-06-24 17:04:53 +0530 | [diff] [blame] | 414 | } |