Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 1 | /* |
| 2 | * CXL Event processing |
| 3 | * |
| 4 | * Copyright(C) 2023 Intel Corporation. |
| 5 | * |
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. See the |
| 7 | * COPYING file in the top-level directory. |
| 8 | */ |
| 9 | |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 10 | #include "qemu/osdep.h" |
Peter Maydell | ad5a8c4 | 2024-01-25 16:34:06 +0000 | [diff] [blame] | 11 | |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 12 | #include "qemu/bswap.h" |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 13 | #include "qemu/error-report.h" |
Ira Weiny | 6676bb9 | 2023-05-30 14:36:00 +0100 | [diff] [blame] | 14 | #include "hw/pci/msi.h" |
| 15 | #include "hw/pci/msix.h" |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 16 | #include "hw/cxl/cxl.h" |
| 17 | #include "hw/cxl/cxl_events.h" |
| 18 | |
| 19 | /* Artificial limit on the number of events a log can hold */ |
| 20 | #define CXL_TEST_EVENT_OVERFLOW 8 |
| 21 | |
| 22 | static void reset_overflow(CXLEventLog *log) |
| 23 | { |
| 24 | log->overflow_err_count = 0; |
| 25 | log->first_overflow_timestamp = 0; |
| 26 | log->last_overflow_timestamp = 0; |
| 27 | } |
| 28 | |
Ira Weiny | 6676bb9 | 2023-05-30 14:36:00 +0100 | [diff] [blame] | 29 | void cxl_event_init(CXLDeviceState *cxlds, int start_msg_num) |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 30 | { |
| 31 | CXLEventLog *log; |
| 32 | int i; |
| 33 | |
| 34 | for (i = 0; i < CXL_EVENT_TYPE_MAX; i++) { |
| 35 | log = &cxlds->event_logs[i]; |
| 36 | log->next_handle = 1; |
| 37 | log->overflow_err_count = 0; |
| 38 | log->first_overflow_timestamp = 0; |
| 39 | log->last_overflow_timestamp = 0; |
Ira Weiny | 6676bb9 | 2023-05-30 14:36:00 +0100 | [diff] [blame] | 40 | log->irq_enabled = false; |
| 41 | log->irq_vec = start_msg_num++; |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 42 | qemu_mutex_init(&log->lock); |
| 43 | QSIMPLEQ_INIT(&log->events); |
| 44 | } |
Ira Weiny | 6676bb9 | 2023-05-30 14:36:00 +0100 | [diff] [blame] | 45 | |
| 46 | /* Override -- Dynamic Capacity uses the same vector as info */ |
| 47 | cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP].irq_vec = |
| 48 | cxlds->event_logs[CXL_EVENT_TYPE_INFO].irq_vec; |
| 49 | |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | static CXLEvent *cxl_event_get_head(CXLEventLog *log) |
| 53 | { |
| 54 | return QSIMPLEQ_FIRST(&log->events); |
| 55 | } |
| 56 | |
| 57 | static CXLEvent *cxl_event_get_next(CXLEvent *entry) |
| 58 | { |
| 59 | return QSIMPLEQ_NEXT(entry, node); |
| 60 | } |
| 61 | |
| 62 | static int cxl_event_count(CXLEventLog *log) |
| 63 | { |
| 64 | CXLEvent *event; |
| 65 | int rc = 0; |
| 66 | |
| 67 | QSIMPLEQ_FOREACH(event, &log->events, node) { |
| 68 | rc++; |
| 69 | } |
| 70 | |
| 71 | return rc; |
| 72 | } |
| 73 | |
| 74 | static bool cxl_event_empty(CXLEventLog *log) |
| 75 | { |
| 76 | return QSIMPLEQ_EMPTY(&log->events); |
| 77 | } |
| 78 | |
| 79 | static void cxl_event_delete_head(CXLDeviceState *cxlds, |
| 80 | CXLEventLogType log_type, |
| 81 | CXLEventLog *log) |
| 82 | { |
| 83 | CXLEvent *entry = cxl_event_get_head(log); |
| 84 | |
| 85 | reset_overflow(log); |
| 86 | QSIMPLEQ_REMOVE_HEAD(&log->events, node); |
| 87 | if (cxl_event_empty(log)) { |
| 88 | cxl_event_set_status(cxlds, log_type, false); |
| 89 | } |
| 90 | g_free(entry); |
| 91 | } |
| 92 | |
| 93 | /* |
| 94 | * return true if an interrupt should be generated as a result |
| 95 | * of inserting this event. |
| 96 | */ |
| 97 | bool cxl_event_insert(CXLDeviceState *cxlds, CXLEventLogType log_type, |
| 98 | CXLEventRecordRaw *event) |
| 99 | { |
| 100 | uint64_t time; |
| 101 | CXLEventLog *log; |
| 102 | CXLEvent *entry; |
| 103 | |
| 104 | if (log_type >= CXL_EVENT_TYPE_MAX) { |
| 105 | return false; |
| 106 | } |
| 107 | |
| 108 | time = cxl_device_get_timestamp(cxlds); |
| 109 | |
| 110 | log = &cxlds->event_logs[log_type]; |
| 111 | |
| 112 | QEMU_LOCK_GUARD(&log->lock); |
| 113 | |
| 114 | if (cxl_event_count(log) >= CXL_TEST_EVENT_OVERFLOW) { |
| 115 | if (log->overflow_err_count == 0) { |
| 116 | log->first_overflow_timestamp = time; |
| 117 | } |
| 118 | log->overflow_err_count++; |
| 119 | log->last_overflow_timestamp = time; |
| 120 | return false; |
| 121 | } |
| 122 | |
| 123 | entry = g_new0(CXLEvent, 1); |
| 124 | |
| 125 | memcpy(&entry->data, event, sizeof(*event)); |
| 126 | |
| 127 | entry->data.hdr.handle = cpu_to_le16(log->next_handle); |
| 128 | log->next_handle++; |
| 129 | /* 0 handle is never valid */ |
| 130 | if (log->next_handle == 0) { |
| 131 | log->next_handle++; |
| 132 | } |
| 133 | entry->data.hdr.timestamp = cpu_to_le64(time); |
| 134 | |
| 135 | QSIMPLEQ_INSERT_TAIL(&log->events, entry, node); |
| 136 | cxl_event_set_status(cxlds, log_type, true); |
| 137 | |
| 138 | /* Count went from 0 to 1 */ |
| 139 | return cxl_event_count(log) == 1; |
| 140 | } |
| 141 | |
Hyeonggon Yoo | 7d65874 | 2024-07-05 13:06:42 +0100 | [diff] [blame] | 142 | void cxl_discard_all_event_records(CXLDeviceState *cxlds) |
| 143 | { |
| 144 | CXLEventLogType log_type; |
| 145 | CXLEventLog *log; |
| 146 | |
| 147 | for (log_type = 0; log_type < CXL_EVENT_TYPE_MAX; log_type++) { |
| 148 | log = &cxlds->event_logs[log_type]; |
| 149 | while (!cxl_event_empty(log)) { |
| 150 | cxl_event_delete_head(cxlds, log_type, log); |
| 151 | } |
| 152 | } |
| 153 | } |
| 154 | |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 155 | CXLRetCode cxl_event_get_records(CXLDeviceState *cxlds, CXLGetEventPayload *pl, |
| 156 | uint8_t log_type, int max_recs, |
Jonathan Cameron | 6f59274 | 2023-10-23 17:07:51 +0100 | [diff] [blame] | 157 | size_t *len) |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 158 | { |
| 159 | CXLEventLog *log; |
| 160 | CXLEvent *entry; |
| 161 | uint16_t nr; |
| 162 | |
| 163 | if (log_type >= CXL_EVENT_TYPE_MAX) { |
| 164 | return CXL_MBOX_INVALID_INPUT; |
| 165 | } |
| 166 | |
| 167 | log = &cxlds->event_logs[log_type]; |
| 168 | |
| 169 | QEMU_LOCK_GUARD(&log->lock); |
| 170 | |
| 171 | entry = cxl_event_get_head(log); |
| 172 | for (nr = 0; entry && nr < max_recs; nr++) { |
| 173 | memcpy(&pl->records[nr], &entry->data, CXL_EVENT_RECORD_SIZE); |
| 174 | entry = cxl_event_get_next(entry); |
| 175 | } |
| 176 | |
| 177 | if (!cxl_event_empty(log)) { |
| 178 | pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS; |
| 179 | } |
| 180 | |
| 181 | if (log->overflow_err_count) { |
| 182 | pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW; |
| 183 | pl->overflow_err_count = cpu_to_le16(log->overflow_err_count); |
Jonathan Cameron | b342489 | 2023-10-23 15:02:09 +0100 | [diff] [blame] | 184 | pl->first_overflow_timestamp = |
| 185 | cpu_to_le64(log->first_overflow_timestamp); |
| 186 | pl->last_overflow_timestamp = |
| 187 | cpu_to_le64(log->last_overflow_timestamp); |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | pl->record_count = cpu_to_le16(nr); |
| 191 | *len = CXL_EVENT_PAYLOAD_HDR_SIZE + (CXL_EVENT_RECORD_SIZE * nr); |
| 192 | |
| 193 | return CXL_MBOX_SUCCESS; |
| 194 | } |
| 195 | |
Jonathan Cameron | b342489 | 2023-10-23 15:02:09 +0100 | [diff] [blame] | 196 | CXLRetCode cxl_event_clear_records(CXLDeviceState *cxlds, |
| 197 | CXLClearEventPayload *pl) |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 198 | { |
| 199 | CXLEventLog *log; |
| 200 | uint8_t log_type; |
| 201 | CXLEvent *entry; |
| 202 | int nr; |
| 203 | |
| 204 | log_type = pl->event_log; |
| 205 | |
| 206 | if (log_type >= CXL_EVENT_TYPE_MAX) { |
| 207 | return CXL_MBOX_INVALID_INPUT; |
| 208 | } |
| 209 | |
| 210 | log = &cxlds->event_logs[log_type]; |
| 211 | |
| 212 | QEMU_LOCK_GUARD(&log->lock); |
| 213 | /* |
Michael Tokarev | 9b4b4e5 | 2023-07-14 14:32:24 +0300 | [diff] [blame] | 214 | * Must iterate the queue twice. |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 215 | * "The device shall verify the event record handles specified in the input |
| 216 | * payload are in temporal order. If the device detects an older event |
| 217 | * record that will not be cleared when Clear Event Records is executed, |
| 218 | * the device shall return the Invalid Handle return code and shall not |
| 219 | * clear any of the specified event records." |
Jonathan Cameron | 8700ee1 | 2024-01-26 12:16:36 +0000 | [diff] [blame] | 220 | * -- CXL r3.1 Section 8.2.9.2.3: Clear Event Records (0101h) |
Ira Weiny | 22d7e3b | 2023-05-30 14:35:59 +0100 | [diff] [blame] | 221 | */ |
| 222 | entry = cxl_event_get_head(log); |
| 223 | for (nr = 0; entry && nr < pl->nr_recs; nr++) { |
| 224 | uint16_t handle = pl->handle[nr]; |
| 225 | |
| 226 | /* NOTE: Both handles are little endian. */ |
| 227 | if (handle == 0 || entry->data.hdr.handle != handle) { |
| 228 | return CXL_MBOX_INVALID_INPUT; |
| 229 | } |
| 230 | entry = cxl_event_get_next(entry); |
| 231 | } |
| 232 | |
| 233 | entry = cxl_event_get_head(log); |
| 234 | for (nr = 0; entry && nr < pl->nr_recs; nr++) { |
| 235 | cxl_event_delete_head(cxlds, log_type, log); |
| 236 | entry = cxl_event_get_head(log); |
| 237 | } |
| 238 | |
| 239 | return CXL_MBOX_SUCCESS; |
| 240 | } |
Ira Weiny | 6676bb9 | 2023-05-30 14:36:00 +0100 | [diff] [blame] | 241 | |
| 242 | void cxl_event_irq_assert(CXLType3Dev *ct3d) |
| 243 | { |
| 244 | CXLDeviceState *cxlds = &ct3d->cxl_dstate; |
| 245 | PCIDevice *pdev = &ct3d->parent_obj; |
| 246 | int i; |
| 247 | |
| 248 | for (i = 0; i < CXL_EVENT_TYPE_MAX; i++) { |
| 249 | CXLEventLog *log = &cxlds->event_logs[i]; |
| 250 | |
| 251 | if (!log->irq_enabled || cxl_event_empty(log)) { |
| 252 | continue; |
| 253 | } |
| 254 | |
| 255 | /* Notifies interrupt, legacy IRQ is not supported */ |
| 256 | if (msix_enabled(pdev)) { |
| 257 | msix_notify(pdev, log->irq_vec); |
| 258 | } else if (msi_enabled(pdev)) { |
| 259 | msi_notify(pdev, log->irq_vec); |
| 260 | } |
| 261 | } |
| 262 | } |