Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 1 | /* |
| 2 | * NUMA parameter parsing routines |
| 3 | * |
| 4 | * Copyright (c) 2014 Fujitsu Ltd. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to deal |
| 8 | * in the Software without restriction, including without limitation the rights |
| 9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 10 | * copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| 22 | * THE SOFTWARE. |
| 23 | */ |
| 24 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 25 | #include "qemu/osdep.h" |
Liu Jingqi | 9b12dfa | 2019-12-13 09:19:23 +0800 | [diff] [blame] | 26 | #include "qemu/units.h" |
Markus Armbruster | b58c5c2 | 2019-08-12 07:23:55 +0200 | [diff] [blame] | 27 | #include "sysemu/hostmem.h" |
Eduardo Habkost | e35704b | 2015-02-08 16:51:16 -0200 | [diff] [blame] | 28 | #include "sysemu/numa.h" |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 29 | #include "exec/cpu-common.h" |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 30 | #include "exec/ramlist.h" |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 31 | #include "qemu/bitmap.h" |
Wanlong Gao | 2b631ec | 2014-05-14 17:43:06 +0800 | [diff] [blame] | 32 | #include "qemu/error-report.h" |
Markus Armbruster | e688df6 | 2018-02-01 12:18:31 +0100 | [diff] [blame] | 33 | #include "qapi/error.h" |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 34 | #include "qapi/opts-visitor.h" |
Markus Armbruster | 8ac25c8 | 2019-06-19 22:10:41 +0200 | [diff] [blame] | 35 | #include "qapi/qapi-visit-machine.h" |
Eduardo Habkost | f8123f2 | 2019-07-02 18:57:26 -0300 | [diff] [blame] | 36 | #include "sysemu/qtest.h" |
Markus Armbruster | 2e5b09f | 2019-07-09 17:20:52 +0200 | [diff] [blame] | 37 | #include "hw/core/cpu.h" |
zhanghailiang | 5b009e4 | 2014-11-04 19:49:30 +0800 | [diff] [blame] | 38 | #include "hw/mem/pc-dimm.h" |
Markus Armbruster | d645427 | 2019-08-12 07:23:45 +0200 | [diff] [blame] | 39 | #include "migration/vmstate.h" |
Markus Armbruster | 12e9493 | 2019-08-12 07:23:52 +0200 | [diff] [blame] | 40 | #include "hw/boards.h" |
David Hildenbrand | 2cc0e2e | 2018-04-23 18:51:16 +0200 | [diff] [blame] | 41 | #include "hw/mem/memory-device.h" |
Eduardo Habkost | 7dcd1d7 | 2015-02-08 16:51:20 -0200 | [diff] [blame] | 42 | #include "qemu/option.h" |
| 43 | #include "qemu/config-file.h" |
Igor Mammedov | cc00188 | 2017-10-12 11:39:58 +0200 | [diff] [blame] | 44 | #include "qemu/cutils.h" |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 45 | |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 46 | QemuOptsList qemu_numa_opts = { |
| 47 | .name = "numa", |
| 48 | .implied_opt_name = "type", |
| 49 | .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head), |
| 50 | .desc = { { 0 } } /* validated with OptsVisitor */ |
| 51 | }; |
| 52 | |
Igor Mammedov | b69239e | 2019-07-02 10:07:44 -0400 | [diff] [blame] | 53 | static int have_memdevs; |
Igor Mammedov | 6b61c2c | 2020-02-19 11:08:39 -0500 | [diff] [blame] | 54 | bool numa_uses_legacy_mem(void) |
| 55 | { |
| 56 | return !have_memdevs; |
| 57 | } |
| 58 | |
Igor Mammedov | b69239e | 2019-07-02 10:07:44 -0400 | [diff] [blame] | 59 | static int have_mem; |
Eduardo Habkost | 25712ff | 2015-02-08 16:51:19 -0200 | [diff] [blame] | 60 | static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one. |
| 61 | * For all nodes, nodeid < max_numa_nodeid |
| 62 | */ |
Bharata B Rao | e75e2a1 | 2015-06-29 13:50:27 +0530 | [diff] [blame] | 63 | |
Igor Mammedov | 64c2a8f | 2017-05-10 13:29:49 +0200 | [diff] [blame] | 64 | static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, |
Igor Mammedov | cc00188 | 2017-10-12 11:39:58 +0200 | [diff] [blame] | 65 | Error **errp) |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 66 | { |
Markus Armbruster | a22528b | 2018-10-17 10:26:39 +0200 | [diff] [blame] | 67 | Error *err = NULL; |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 68 | uint16_t nodenr; |
| 69 | uint16List *cpus = NULL; |
Igor Mammedov | 64c2a8f | 2017-05-10 13:29:49 +0200 | [diff] [blame] | 70 | MachineClass *mc = MACHINE_GET_CLASS(ms); |
Like Xu | 5cc8767 | 2019-05-19 04:54:21 +0800 | [diff] [blame] | 71 | unsigned int max_cpus = ms->smp.max_cpus; |
Tao Xu | 7e721e7 | 2019-08-09 14:57:24 +0800 | [diff] [blame] | 72 | NodeInfo *numa_info = ms->numa_state->nodes; |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 73 | |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 74 | if (node->has_nodeid) { |
| 75 | nodenr = node->nodeid; |
| 76 | } else { |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 77 | nodenr = ms->numa_state->num_nodes; |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 78 | } |
| 79 | |
| 80 | if (nodenr >= MAX_NODES) { |
| 81 | error_setg(errp, "Max number of NUMA nodes reached: %" |
Gonglei | 01bbbcf | 2015-02-25 12:22:30 +0800 | [diff] [blame] | 82 | PRIu16 "", nodenr); |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 83 | return; |
| 84 | } |
| 85 | |
Eduardo Habkost | 1945b9d | 2014-06-26 18:33:19 -0300 | [diff] [blame] | 86 | if (numa_info[nodenr].present) { |
| 87 | error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); |
| 88 | return; |
| 89 | } |
| 90 | |
Michal Privoznik | 294aa04 | 2021-07-07 15:40:30 +0200 | [diff] [blame] | 91 | /* |
| 92 | * If not set the initiator, set it to MAX_NODES. And if |
| 93 | * HMAT is enabled and this node has no cpus, QEMU will raise error. |
| 94 | */ |
| 95 | numa_info[nodenr].initiator = MAX_NODES; |
| 96 | if (node->has_initiator) { |
| 97 | if (!ms->numa_state->hmat_enabled) { |
| 98 | error_setg(errp, "ACPI Heterogeneous Memory Attribute Table " |
| 99 | "(HMAT) is disabled, enable it with -machine hmat=on " |
| 100 | "before using any of hmat specific options"); |
| 101 | return; |
| 102 | } |
| 103 | |
| 104 | if (node->initiator >= MAX_NODES) { |
| 105 | error_report("The initiator id %" PRIu16 " expects an integer " |
| 106 | "between 0 and %d", node->initiator, |
| 107 | MAX_NODES - 1); |
| 108 | return; |
| 109 | } |
| 110 | |
| 111 | numa_info[nodenr].initiator = node->initiator; |
| 112 | } |
| 113 | |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 114 | for (cpus = node->cpus; cpus; cpus = cpus->next) { |
Igor Mammedov | 7c88e65 | 2017-05-10 13:29:50 +0200 | [diff] [blame] | 115 | CpuInstanceProperties props; |
Eduardo Habkost | 8979c94 | 2015-02-09 17:28:52 -0200 | [diff] [blame] | 116 | if (cpus->value >= max_cpus) { |
| 117 | error_setg(errp, |
| 118 | "CPU index (%" PRIu16 ")" |
| 119 | " should be smaller than maxcpus (%d)", |
| 120 | cpus->value, max_cpus); |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 121 | return; |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 122 | } |
Igor Mammedov | 7c88e65 | 2017-05-10 13:29:50 +0200 | [diff] [blame] | 123 | props = mc->cpu_index_to_instance_props(ms, cpus->value); |
| 124 | props.node_id = nodenr; |
| 125 | props.has_node_id = true; |
Markus Armbruster | a22528b | 2018-10-17 10:26:39 +0200 | [diff] [blame] | 126 | machine_set_cpu_numa_node(ms, &props, &err); |
| 127 | if (err) { |
| 128 | error_propagate(errp, err); |
| 129 | return; |
| 130 | } |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 131 | } |
| 132 | |
Markus Armbruster | fe8ac1f | 2022-11-04 17:06:57 +0100 | [diff] [blame] | 133 | have_memdevs = have_memdevs || node->memdev; |
| 134 | have_mem = have_mem || node->has_mem; |
| 135 | if ((node->has_mem && have_memdevs) || (node->memdev && have_mem)) { |
Igor Mammedov | b69239e | 2019-07-02 10:07:44 -0400 | [diff] [blame] | 136 | error_setg(errp, "numa configuration should use either mem= or memdev=," |
| 137 | "mixing both is not allowed"); |
Paolo Bonzini | 7febe36 | 2014-05-14 17:43:17 +0800 | [diff] [blame] | 138 | return; |
| 139 | } |
| 140 | |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 141 | if (node->has_mem) { |
Igor Mammedov | 32a354d | 2020-06-09 09:56:35 -0400 | [diff] [blame] | 142 | if (!mc->numa_mem_supported) { |
| 143 | error_setg(errp, "Parameter -numa node,mem is not supported by this" |
| 144 | " machine type"); |
| 145 | error_append_hint(errp, "Use -numa node,memdev instead\n"); |
| 146 | return; |
| 147 | } |
| 148 | |
Igor Mammedov | cc00188 | 2017-10-12 11:39:58 +0200 | [diff] [blame] | 149 | numa_info[nodenr].node_mem = node->mem; |
Eduardo Habkost | f8123f2 | 2019-07-02 18:57:26 -0300 | [diff] [blame] | 150 | if (!qtest_enabled()) { |
| 151 | warn_report("Parameter -numa node,mem is deprecated," |
| 152 | " use -numa node,memdev instead"); |
| 153 | } |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 154 | } |
Markus Armbruster | fe8ac1f | 2022-11-04 17:06:57 +0100 | [diff] [blame] | 155 | if (node->memdev) { |
Paolo Bonzini | 7febe36 | 2014-05-14 17:43:17 +0800 | [diff] [blame] | 156 | Object *o; |
| 157 | o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); |
| 158 | if (!o) { |
| 159 | error_setg(errp, "memdev=%s is ambiguous", node->memdev); |
| 160 | return; |
| 161 | } |
| 162 | |
| 163 | object_ref(o); |
Marc-André Lureau | 61d7c14 | 2017-06-07 20:36:30 +0400 | [diff] [blame] | 164 | numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL); |
Paolo Bonzini | 7febe36 | 2014-05-14 17:43:17 +0800 | [diff] [blame] | 165 | numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); |
| 166 | } |
Tao Xu | 244b3f4 | 2019-12-13 09:19:22 +0800 | [diff] [blame] | 167 | |
Eduardo Habkost | 1af878e | 2014-06-26 18:33:18 -0300 | [diff] [blame] | 168 | numa_info[nodenr].present = true; |
| 169 | max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 170 | ms->numa_state->num_nodes++; |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 171 | } |
| 172 | |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 173 | static |
| 174 | void parse_numa_distance(MachineState *ms, NumaDistOptions *dist, Error **errp) |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 175 | { |
| 176 | uint16_t src = dist->src; |
| 177 | uint16_t dst = dist->dst; |
| 178 | uint8_t val = dist->val; |
Tao Xu | 7e721e7 | 2019-08-09 14:57:24 +0800 | [diff] [blame] | 179 | NodeInfo *numa_info = ms->numa_state->nodes; |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 180 | |
| 181 | if (src >= MAX_NODES || dst >= MAX_NODES) { |
Igor Mammedov | 74f38e9 | 2018-05-16 17:06:14 +0200 | [diff] [blame] | 182 | error_setg(errp, "Parameter '%s' expects an integer between 0 and %d", |
| 183 | src >= MAX_NODES ? "src" : "dst", MAX_NODES - 1); |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 184 | return; |
| 185 | } |
| 186 | |
| 187 | if (!numa_info[src].present || !numa_info[dst].present) { |
| 188 | error_setg(errp, "Source/Destination NUMA node is missing. " |
| 189 | "Please use '-numa node' option to declare it first."); |
| 190 | return; |
| 191 | } |
| 192 | |
| 193 | if (val < NUMA_DISTANCE_MIN) { |
| 194 | error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, " |
| 195 | "it shouldn't be less than %d.", |
| 196 | val, NUMA_DISTANCE_MIN); |
| 197 | return; |
| 198 | } |
| 199 | |
| 200 | if (src == dst && val != NUMA_DISTANCE_MIN) { |
| 201 | error_setg(errp, "Local distance of node %d should be %d.", |
| 202 | src, NUMA_DISTANCE_MIN); |
| 203 | return; |
| 204 | } |
| 205 | |
| 206 | numa_info[src].distance[dst] = val; |
Tao Xu | 118154b | 2019-08-09 14:57:23 +0800 | [diff] [blame] | 207 | ms->numa_state->have_numa_distance = true; |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 208 | } |
| 209 | |
Liu Jingqi | 9b12dfa | 2019-12-13 09:19:23 +0800 | [diff] [blame] | 210 | void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node, |
| 211 | Error **errp) |
| 212 | { |
| 213 | int i, first_bit, last_bit; |
| 214 | uint64_t max_entry, temp_base, bitmap_copy; |
| 215 | NodeInfo *numa_info = numa_state->nodes; |
| 216 | HMAT_LB_Info *hmat_lb = |
| 217 | numa_state->hmat_lb[node->hierarchy][node->data_type]; |
| 218 | HMAT_LB_Data lb_data = {}; |
| 219 | HMAT_LB_Data *lb_temp; |
| 220 | |
| 221 | /* Error checking */ |
| 222 | if (node->initiator > numa_state->num_nodes) { |
| 223 | error_setg(errp, "Invalid initiator=%d, it should be less than %d", |
| 224 | node->initiator, numa_state->num_nodes); |
| 225 | return; |
| 226 | } |
| 227 | if (node->target > numa_state->num_nodes) { |
| 228 | error_setg(errp, "Invalid target=%d, it should be less than %d", |
| 229 | node->target, numa_state->num_nodes); |
| 230 | return; |
| 231 | } |
| 232 | if (!numa_info[node->initiator].has_cpu) { |
| 233 | error_setg(errp, "Invalid initiator=%d, it isn't an " |
| 234 | "initiator proximity domain", node->initiator); |
| 235 | return; |
| 236 | } |
| 237 | if (!numa_info[node->target].present) { |
| 238 | error_setg(errp, "The target=%d should point to an existing node", |
| 239 | node->target); |
| 240 | return; |
| 241 | } |
| 242 | |
| 243 | if (!hmat_lb) { |
| 244 | hmat_lb = g_malloc0(sizeof(*hmat_lb)); |
| 245 | numa_state->hmat_lb[node->hierarchy][node->data_type] = hmat_lb; |
| 246 | hmat_lb->list = g_array_new(false, true, sizeof(HMAT_LB_Data)); |
| 247 | } |
| 248 | hmat_lb->hierarchy = node->hierarchy; |
| 249 | hmat_lb->data_type = node->data_type; |
| 250 | lb_data.initiator = node->initiator; |
| 251 | lb_data.target = node->target; |
| 252 | |
| 253 | if (node->data_type <= HMATLB_DATA_TYPE_WRITE_LATENCY) { |
| 254 | /* Input latency data */ |
| 255 | |
| 256 | if (!node->has_latency) { |
| 257 | error_setg(errp, "Missing 'latency' option"); |
| 258 | return; |
| 259 | } |
| 260 | if (node->has_bandwidth) { |
| 261 | error_setg(errp, "Invalid option 'bandwidth' since " |
| 262 | "the data type is latency"); |
| 263 | return; |
| 264 | } |
| 265 | |
| 266 | /* Detect duplicate configuration */ |
| 267 | for (i = 0; i < hmat_lb->list->len; i++) { |
| 268 | lb_temp = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); |
| 269 | |
| 270 | if (node->initiator == lb_temp->initiator && |
| 271 | node->target == lb_temp->target) { |
| 272 | error_setg(errp, "Duplicate configuration of the latency for " |
| 273 | "initiator=%d and target=%d", node->initiator, |
| 274 | node->target); |
| 275 | return; |
| 276 | } |
| 277 | } |
| 278 | |
| 279 | hmat_lb->base = hmat_lb->base ? hmat_lb->base : UINT64_MAX; |
| 280 | |
| 281 | if (node->latency) { |
| 282 | /* Calculate the temporary base and compressed latency */ |
| 283 | max_entry = node->latency; |
| 284 | temp_base = 1; |
| 285 | while (QEMU_IS_ALIGNED(max_entry, 10)) { |
| 286 | max_entry /= 10; |
| 287 | temp_base *= 10; |
| 288 | } |
| 289 | |
| 290 | /* Calculate the max compressed latency */ |
| 291 | temp_base = MIN(hmat_lb->base, temp_base); |
| 292 | max_entry = node->latency / hmat_lb->base; |
| 293 | max_entry = MAX(hmat_lb->range_bitmap, max_entry); |
| 294 | |
| 295 | /* |
| 296 | * For latency hmat_lb->range_bitmap record the max compressed |
| 297 | * latency which should be less than 0xFFFF (UINT16_MAX) |
| 298 | */ |
| 299 | if (max_entry >= UINT16_MAX) { |
| 300 | error_setg(errp, "Latency %" PRIu64 " between initiator=%d and " |
| 301 | "target=%d should not differ from previously entered " |
| 302 | "min or max values on more than %d", node->latency, |
| 303 | node->initiator, node->target, UINT16_MAX - 1); |
| 304 | return; |
| 305 | } else { |
| 306 | hmat_lb->base = temp_base; |
| 307 | hmat_lb->range_bitmap = max_entry; |
| 308 | } |
| 309 | |
| 310 | /* |
| 311 | * Set lb_info_provided bit 0 as 1, |
| 312 | * latency information is provided |
| 313 | */ |
| 314 | numa_info[node->target].lb_info_provided |= BIT(0); |
| 315 | } |
| 316 | lb_data.data = node->latency; |
| 317 | } else if (node->data_type >= HMATLB_DATA_TYPE_ACCESS_BANDWIDTH) { |
| 318 | /* Input bandwidth data */ |
| 319 | if (!node->has_bandwidth) { |
| 320 | error_setg(errp, "Missing 'bandwidth' option"); |
| 321 | return; |
| 322 | } |
| 323 | if (node->has_latency) { |
| 324 | error_setg(errp, "Invalid option 'latency' since " |
| 325 | "the data type is bandwidth"); |
| 326 | return; |
| 327 | } |
| 328 | if (!QEMU_IS_ALIGNED(node->bandwidth, MiB)) { |
| 329 | error_setg(errp, "Bandwidth %" PRIu64 " between initiator=%d and " |
| 330 | "target=%d should be 1MB aligned", node->bandwidth, |
| 331 | node->initiator, node->target); |
| 332 | return; |
| 333 | } |
| 334 | |
| 335 | /* Detect duplicate configuration */ |
| 336 | for (i = 0; i < hmat_lb->list->len; i++) { |
| 337 | lb_temp = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); |
| 338 | |
| 339 | if (node->initiator == lb_temp->initiator && |
| 340 | node->target == lb_temp->target) { |
| 341 | error_setg(errp, "Duplicate configuration of the bandwidth for " |
| 342 | "initiator=%d and target=%d", node->initiator, |
| 343 | node->target); |
| 344 | return; |
| 345 | } |
| 346 | } |
| 347 | |
| 348 | hmat_lb->base = hmat_lb->base ? hmat_lb->base : 1; |
| 349 | |
| 350 | if (node->bandwidth) { |
| 351 | /* Keep bitmap unchanged when bandwidth out of range */ |
| 352 | bitmap_copy = hmat_lb->range_bitmap; |
| 353 | bitmap_copy |= node->bandwidth; |
| 354 | first_bit = ctz64(bitmap_copy); |
| 355 | temp_base = UINT64_C(1) << first_bit; |
| 356 | max_entry = node->bandwidth / temp_base; |
| 357 | last_bit = 64 - clz64(bitmap_copy); |
| 358 | |
| 359 | /* |
| 360 | * For bandwidth, first_bit record the base unit of bandwidth bits, |
| 361 | * last_bit record the last bit of the max bandwidth. The max |
| 362 | * compressed bandwidth should be less than 0xFFFF (UINT16_MAX) |
| 363 | */ |
| 364 | if ((last_bit - first_bit) > UINT16_BITS || |
| 365 | max_entry >= UINT16_MAX) { |
| 366 | error_setg(errp, "Bandwidth %" PRIu64 " between initiator=%d " |
| 367 | "and target=%d should not differ from previously " |
| 368 | "entered values on more than %d", node->bandwidth, |
| 369 | node->initiator, node->target, UINT16_MAX - 1); |
| 370 | return; |
| 371 | } else { |
| 372 | hmat_lb->base = temp_base; |
| 373 | hmat_lb->range_bitmap = bitmap_copy; |
| 374 | } |
| 375 | |
| 376 | /* |
| 377 | * Set lb_info_provided bit 1 as 1, |
| 378 | * bandwidth information is provided |
| 379 | */ |
| 380 | numa_info[node->target].lb_info_provided |= BIT(1); |
| 381 | } |
| 382 | lb_data.data = node->bandwidth; |
| 383 | } else { |
| 384 | assert(0); |
| 385 | } |
| 386 | |
| 387 | g_array_append_val(hmat_lb->list, lb_data); |
| 388 | } |
| 389 | |
Liu Jingqi | c412a48 | 2019-12-13 09:19:24 +0800 | [diff] [blame] | 390 | void parse_numa_hmat_cache(MachineState *ms, NumaHmatCacheOptions *node, |
| 391 | Error **errp) |
| 392 | { |
| 393 | int nb_numa_nodes = ms->numa_state->num_nodes; |
| 394 | NodeInfo *numa_info = ms->numa_state->nodes; |
| 395 | NumaHmatCacheOptions *hmat_cache = NULL; |
| 396 | |
| 397 | if (node->node_id >= nb_numa_nodes) { |
| 398 | error_setg(errp, "Invalid node-id=%" PRIu32 ", it should be less " |
| 399 | "than %d", node->node_id, nb_numa_nodes); |
| 400 | return; |
| 401 | } |
| 402 | |
| 403 | if (numa_info[node->node_id].lb_info_provided != (BIT(0) | BIT(1))) { |
| 404 | error_setg(errp, "The latency and bandwidth information of " |
| 405 | "node-id=%" PRIu32 " should be provided before memory side " |
| 406 | "cache attributes", node->node_id); |
| 407 | return; |
| 408 | } |
| 409 | |
| 410 | if (node->level < 1 || node->level >= HMAT_LB_LEVELS) { |
| 411 | error_setg(errp, "Invalid level=%" PRIu8 ", it should be larger than 0 " |
| 412 | "and less than or equal to %d", node->level, |
| 413 | HMAT_LB_LEVELS - 1); |
| 414 | return; |
| 415 | } |
| 416 | |
| 417 | assert(node->associativity < HMAT_CACHE_ASSOCIATIVITY__MAX); |
| 418 | assert(node->policy < HMAT_CACHE_WRITE_POLICY__MAX); |
| 419 | if (ms->numa_state->hmat_cache[node->node_id][node->level]) { |
| 420 | error_setg(errp, "Duplicate configuration of the side cache for " |
| 421 | "node-id=%" PRIu32 " and level=%" PRIu8, |
| 422 | node->node_id, node->level); |
| 423 | return; |
| 424 | } |
| 425 | |
| 426 | if ((node->level > 1) && |
Igor Mammedov | 1b5e843 | 2020-10-06 11:00:02 -0400 | [diff] [blame] | 427 | ms->numa_state->hmat_cache[node->node_id][node->level - 1] == NULL) { |
| 428 | error_setg(errp, "Cache level=%u shall be defined first", |
| 429 | node->level - 1); |
| 430 | return; |
| 431 | } |
| 432 | |
| 433 | if ((node->level > 1) && |
Igor Mammedov | fadb055 | 2020-08-21 06:05:19 -0400 | [diff] [blame] | 434 | (node->size <= |
Liu Jingqi | c412a48 | 2019-12-13 09:19:24 +0800 | [diff] [blame] | 435 | ms->numa_state->hmat_cache[node->node_id][node->level - 1]->size)) { |
| 436 | error_setg(errp, "Invalid size=%" PRIu64 ", the size of level=%" PRIu8 |
Igor Mammedov | fadb055 | 2020-08-21 06:05:19 -0400 | [diff] [blame] | 437 | " should be larger than the size(%" PRIu64 ") of " |
Liu Jingqi | c412a48 | 2019-12-13 09:19:24 +0800 | [diff] [blame] | 438 | "level=%u", node->size, node->level, |
| 439 | ms->numa_state->hmat_cache[node->node_id] |
| 440 | [node->level - 1]->size, |
| 441 | node->level - 1); |
| 442 | return; |
| 443 | } |
| 444 | |
| 445 | if ((node->level < HMAT_LB_LEVELS - 1) && |
| 446 | ms->numa_state->hmat_cache[node->node_id][node->level + 1] && |
Igor Mammedov | fadb055 | 2020-08-21 06:05:19 -0400 | [diff] [blame] | 447 | (node->size >= |
Liu Jingqi | c412a48 | 2019-12-13 09:19:24 +0800 | [diff] [blame] | 448 | ms->numa_state->hmat_cache[node->node_id][node->level + 1]->size)) { |
| 449 | error_setg(errp, "Invalid size=%" PRIu64 ", the size of level=%" PRIu8 |
Igor Mammedov | fadb055 | 2020-08-21 06:05:19 -0400 | [diff] [blame] | 450 | " should be less than the size(%" PRIu64 ") of " |
Liu Jingqi | c412a48 | 2019-12-13 09:19:24 +0800 | [diff] [blame] | 451 | "level=%u", node->size, node->level, |
| 452 | ms->numa_state->hmat_cache[node->node_id] |
| 453 | [node->level + 1]->size, |
| 454 | node->level + 1); |
| 455 | return; |
| 456 | } |
| 457 | |
| 458 | hmat_cache = g_malloc0(sizeof(*hmat_cache)); |
| 459 | memcpy(hmat_cache, node, sizeof(*hmat_cache)); |
| 460 | ms->numa_state->hmat_cache[node->node_id][node->level] = hmat_cache; |
| 461 | } |
| 462 | |
Igor Mammedov | 3319b4e | 2018-05-04 10:37:40 +0200 | [diff] [blame] | 463 | void set_numa_options(MachineState *ms, NumaOptions *object, Error **errp) |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 464 | { |
Igor Mammedov | 5275db5 | 2019-12-12 13:48:55 +0100 | [diff] [blame] | 465 | if (!ms->numa_state) { |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 466 | error_setg(errp, "NUMA is not supported by this machine-type"); |
Markus Armbruster | 992861f | 2020-07-07 18:06:04 +0200 | [diff] [blame] | 467 | return; |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 468 | } |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 469 | |
Eric Blake | 1fd5d4f | 2015-10-26 16:34:59 -0600 | [diff] [blame] | 470 | switch (object->type) { |
Markus Armbruster | d081a49 | 2017-02-21 21:46:26 +0100 | [diff] [blame] | 471 | case NUMA_OPTIONS_TYPE_NODE: |
Markus Armbruster | 992861f | 2020-07-07 18:06:04 +0200 | [diff] [blame] | 472 | parse_numa_node(ms, &object->u.node, errp); |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 473 | break; |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 474 | case NUMA_OPTIONS_TYPE_DIST: |
Markus Armbruster | 992861f | 2020-07-07 18:06:04 +0200 | [diff] [blame] | 475 | parse_numa_distance(ms, &object->u.dist, errp); |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 476 | break; |
Igor Mammedov | 419fcde | 2017-05-10 13:30:01 +0200 | [diff] [blame] | 477 | case NUMA_OPTIONS_TYPE_CPU: |
| 478 | if (!object->u.cpu.has_node_id) { |
Markus Armbruster | 992861f | 2020-07-07 18:06:04 +0200 | [diff] [blame] | 479 | error_setg(errp, "Missing mandatory node-id property"); |
| 480 | return; |
Igor Mammedov | 419fcde | 2017-05-10 13:30:01 +0200 | [diff] [blame] | 481 | } |
Tao Xu | 7e721e7 | 2019-08-09 14:57:24 +0800 | [diff] [blame] | 482 | if (!ms->numa_state->nodes[object->u.cpu.node_id].present) { |
Markus Armbruster | 992861f | 2020-07-07 18:06:04 +0200 | [diff] [blame] | 483 | error_setg(errp, "Invalid node-id=%" PRId64 ", NUMA node must be " |
| 484 | "defined with -numa node,nodeid=ID before it's used with " |
| 485 | "-numa cpu,node-id=ID", object->u.cpu.node_id); |
| 486 | return; |
Igor Mammedov | 419fcde | 2017-05-10 13:30:01 +0200 | [diff] [blame] | 487 | } |
| 488 | |
Markus Armbruster | 992861f | 2020-07-07 18:06:04 +0200 | [diff] [blame] | 489 | machine_set_cpu_numa_node(ms, |
| 490 | qapi_NumaCpuOptions_base(&object->u.cpu), |
| 491 | errp); |
Igor Mammedov | 419fcde | 2017-05-10 13:30:01 +0200 | [diff] [blame] | 492 | break; |
Liu Jingqi | 9b12dfa | 2019-12-13 09:19:23 +0800 | [diff] [blame] | 493 | case NUMA_OPTIONS_TYPE_HMAT_LB: |
| 494 | if (!ms->numa_state->hmat_enabled) { |
| 495 | error_setg(errp, "ACPI Heterogeneous Memory Attribute Table " |
| 496 | "(HMAT) is disabled, enable it with -machine hmat=on " |
| 497 | "before using any of hmat specific options"); |
| 498 | return; |
| 499 | } |
| 500 | |
Markus Armbruster | 992861f | 2020-07-07 18:06:04 +0200 | [diff] [blame] | 501 | parse_numa_hmat_lb(ms->numa_state, &object->u.hmat_lb, errp); |
Liu Jingqi | 9b12dfa | 2019-12-13 09:19:23 +0800 | [diff] [blame] | 502 | break; |
Liu Jingqi | c412a48 | 2019-12-13 09:19:24 +0800 | [diff] [blame] | 503 | case NUMA_OPTIONS_TYPE_HMAT_CACHE: |
| 504 | if (!ms->numa_state->hmat_enabled) { |
| 505 | error_setg(errp, "ACPI Heterogeneous Memory Attribute Table " |
| 506 | "(HMAT) is disabled, enable it with -machine hmat=on " |
| 507 | "before using any of hmat specific options"); |
| 508 | return; |
| 509 | } |
| 510 | |
Markus Armbruster | 992861f | 2020-07-07 18:06:04 +0200 | [diff] [blame] | 511 | parse_numa_hmat_cache(ms, &object->u.hmat_cache, errp); |
Liu Jingqi | c412a48 | 2019-12-13 09:19:24 +0800 | [diff] [blame] | 512 | break; |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 513 | default: |
| 514 | abort(); |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 515 | } |
Igor Mammedov | 3319b4e | 2018-05-04 10:37:40 +0200 | [diff] [blame] | 516 | } |
| 517 | |
Markus Armbruster | 4f7ec69 | 2018-10-17 10:26:52 +0200 | [diff] [blame] | 518 | static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) |
Igor Mammedov | 3319b4e | 2018-05-04 10:37:40 +0200 | [diff] [blame] | 519 | { |
| 520 | NumaOptions *object = NULL; |
| 521 | MachineState *ms = MACHINE(opaque); |
| 522 | Error *err = NULL; |
| 523 | Visitor *v = opts_visitor_new(opts); |
| 524 | |
Markus Armbruster | b11a093 | 2020-07-07 18:06:07 +0200 | [diff] [blame] | 525 | visit_type_NumaOptions(v, NULL, &object, errp); |
Igor Mammedov | 3319b4e | 2018-05-04 10:37:40 +0200 | [diff] [blame] | 526 | visit_free(v); |
Markus Armbruster | b11a093 | 2020-07-07 18:06:07 +0200 | [diff] [blame] | 527 | if (!object) { |
| 528 | return -1; |
Igor Mammedov | 3319b4e | 2018-05-04 10:37:40 +0200 | [diff] [blame] | 529 | } |
| 530 | |
| 531 | /* Fix up legacy suffix-less format */ |
| 532 | if ((object->type == NUMA_OPTIONS_TYPE_NODE) && object->u.node.has_mem) { |
| 533 | const char *mem_str = qemu_opt_get(opts, "mem"); |
| 534 | qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem); |
| 535 | } |
| 536 | |
| 537 | set_numa_options(ms, object, &err); |
| 538 | |
Eric Blake | 96a1616 | 2016-02-23 14:14:33 -0700 | [diff] [blame] | 539 | qapi_free_NumaOptions(object); |
Marc-André Lureau | 157e94e | 2016-07-13 02:39:13 +0200 | [diff] [blame] | 540 | if (err) { |
Markus Armbruster | 4f7ec69 | 2018-10-17 10:26:52 +0200 | [diff] [blame] | 541 | error_propagate(errp, err); |
Marc-André Lureau | 157e94e | 2016-07-13 02:39:13 +0200 | [diff] [blame] | 542 | return -1; |
| 543 | } |
Wanlong Gao | 0042109 | 2014-05-14 17:43:08 +0800 | [diff] [blame] | 544 | |
Marc-André Lureau | 157e94e | 2016-07-13 02:39:13 +0200 | [diff] [blame] | 545 | return 0; |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 546 | } |
| 547 | |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 548 | /* If all node pair distances are symmetric, then only distances |
| 549 | * in one direction are enough. If there is even one asymmetric |
| 550 | * pair, though, then all distances must be provided. The |
| 551 | * distance from a node to itself is always NUMA_DISTANCE_MIN, |
| 552 | * so providing it is never necessary. |
| 553 | */ |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 554 | static void validate_numa_distance(MachineState *ms) |
Eduardo Habkost | 3ef7197 | 2015-02-09 17:32:04 -0200 | [diff] [blame] | 555 | { |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 556 | int src, dst; |
| 557 | bool is_asymmetrical = false; |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 558 | int nb_numa_nodes = ms->numa_state->num_nodes; |
Tao Xu | 7e721e7 | 2019-08-09 14:57:24 +0800 | [diff] [blame] | 559 | NodeInfo *numa_info = ms->numa_state->nodes; |
Eduardo Habkost | 3ef7197 | 2015-02-09 17:32:04 -0200 | [diff] [blame] | 560 | |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 561 | for (src = 0; src < nb_numa_nodes; src++) { |
| 562 | for (dst = src; dst < nb_numa_nodes; dst++) { |
| 563 | if (numa_info[src].distance[dst] == 0 && |
| 564 | numa_info[dst].distance[src] == 0) { |
| 565 | if (src != dst) { |
| 566 | error_report("The distance between node %d and %d is " |
| 567 | "missing, at least one distance value " |
| 568 | "between each nodes should be provided.", |
| 569 | src, dst); |
| 570 | exit(EXIT_FAILURE); |
| 571 | } |
| 572 | } |
Eduardo Habkost | 3ef7197 | 2015-02-09 17:32:04 -0200 | [diff] [blame] | 573 | |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 574 | if (numa_info[src].distance[dst] != 0 && |
| 575 | numa_info[dst].distance[src] != 0 && |
| 576 | numa_info[src].distance[dst] != |
| 577 | numa_info[dst].distance[src]) { |
| 578 | is_asymmetrical = true; |
| 579 | } |
Eduardo Habkost | 3ef7197 | 2015-02-09 17:32:04 -0200 | [diff] [blame] | 580 | } |
Eduardo Habkost | 3ef7197 | 2015-02-09 17:32:04 -0200 | [diff] [blame] | 581 | } |
Eduardo Habkost | 549fc54 | 2015-02-09 17:35:04 -0200 | [diff] [blame] | 582 | |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 583 | if (is_asymmetrical) { |
| 584 | for (src = 0; src < nb_numa_nodes; src++) { |
| 585 | for (dst = 0; dst < nb_numa_nodes; dst++) { |
| 586 | if (src != dst && numa_info[src].distance[dst] == 0) { |
| 587 | error_report("At least one asymmetrical pair of " |
| 588 | "distances is given, please provide distances " |
| 589 | "for both directions of all node pairs."); |
| 590 | exit(EXIT_FAILURE); |
| 591 | } |
| 592 | } |
| 593 | } |
Eduardo Habkost | 549fc54 | 2015-02-09 17:35:04 -0200 | [diff] [blame] | 594 | } |
Eduardo Habkost | 3ef7197 | 2015-02-09 17:32:04 -0200 | [diff] [blame] | 595 | } |
| 596 | |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 597 | static void complete_init_numa_distance(MachineState *ms) |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 598 | { |
| 599 | int src, dst; |
Tao Xu | 7e721e7 | 2019-08-09 14:57:24 +0800 | [diff] [blame] | 600 | NodeInfo *numa_info = ms->numa_state->nodes; |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 601 | |
| 602 | /* Fixup NUMA distance by symmetric policy because if it is an |
| 603 | * asymmetric distance table, it should be a complete table and |
| 604 | * there would not be any missing distance except local node, which |
| 605 | * is verified by validate_numa_distance above. |
| 606 | */ |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 607 | for (src = 0; src < ms->numa_state->num_nodes; src++) { |
| 608 | for (dst = 0; dst < ms->numa_state->num_nodes; dst++) { |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 609 | if (numa_info[src].distance[dst] == 0) { |
| 610 | if (src == dst) { |
| 611 | numa_info[src].distance[dst] = NUMA_DISTANCE_MIN; |
| 612 | } else { |
| 613 | numa_info[src].distance[dst] = numa_info[dst].distance[src]; |
| 614 | } |
| 615 | } |
| 616 | } |
| 617 | } |
| 618 | } |
| 619 | |
Igor Mammedov | 6b61c2c | 2020-02-19 11:08:39 -0500 | [diff] [blame] | 620 | static void numa_init_memdev_container(MachineState *ms, MemoryRegion *ram) |
| 621 | { |
| 622 | int i; |
| 623 | uint64_t addr = 0; |
| 624 | |
| 625 | for (i = 0; i < ms->numa_state->num_nodes; i++) { |
| 626 | uint64_t size = ms->numa_state->nodes[i].node_mem; |
| 627 | HostMemoryBackend *backend = ms->numa_state->nodes[i].node_memdev; |
| 628 | if (!backend) { |
| 629 | continue; |
| 630 | } |
| 631 | MemoryRegion *seg = machine_consume_memdev(ms, backend); |
| 632 | memory_region_add_subregion(ram, addr, seg); |
| 633 | addr += size; |
| 634 | } |
| 635 | } |
| 636 | |
Igor Mammedov | 7a3099f | 2018-05-04 10:37:39 +0200 | [diff] [blame] | 637 | void numa_complete_configuration(MachineState *ms) |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 638 | { |
Eduardo Habkost | 12d6e46 | 2014-06-26 18:33:20 -0300 | [diff] [blame] | 639 | int i; |
Igor Mammedov | ea089ee | 2017-05-10 13:29:45 +0200 | [diff] [blame] | 640 | MachineClass *mc = MACHINE_GET_CLASS(ms); |
Tao Xu | 7e721e7 | 2019-08-09 14:57:24 +0800 | [diff] [blame] | 641 | NodeInfo *numa_info = ms->numa_state->nodes; |
Eduardo Habkost | 12d6e46 | 2014-06-26 18:33:20 -0300 | [diff] [blame] | 642 | |
Dou Liyang | 7b8be49 | 2017-11-14 10:34:01 +0800 | [diff] [blame] | 643 | /* |
David Hildenbrand | 195784a | 2020-06-26 09:22:48 +0200 | [diff] [blame] | 644 | * If memory hotplug is enabled (slot > 0) or memory devices are enabled |
Paolo Bonzini | b326b6e | 2020-10-28 06:24:22 -0400 | [diff] [blame] | 645 | * (ms->maxram_size > ms->ram_size) but without '-numa' options explicitly on |
David Hildenbrand | 195784a | 2020-06-26 09:22:48 +0200 | [diff] [blame] | 646 | * CLI, guests will break. |
Dou Liyang | 7b8be49 | 2017-11-14 10:34:01 +0800 | [diff] [blame] | 647 | * |
| 648 | * Windows: won't enable memory hotplug without SRAT table at all |
| 649 | * |
| 650 | * Linux: if QEMU is started with initial memory all below 4Gb |
| 651 | * and no SRAT table present, guest kernel will use nommu DMA ops, |
| 652 | * which breaks 32bit hw drivers when memory is hotplugged and |
| 653 | * guest tries to use it with that drivers. |
| 654 | * |
| 655 | * Enable NUMA implicitly by adding a new NUMA node automatically. |
Tao Xu | 0533ef5 | 2019-09-05 16:32:38 +0800 | [diff] [blame] | 656 | * |
| 657 | * Or if MachineClass::auto_enable_numa is true and no NUMA nodes, |
| 658 | * assume there is just one node with whole RAM. |
Dou Liyang | 7b8be49 | 2017-11-14 10:34:01 +0800 | [diff] [blame] | 659 | */ |
Tao Xu | 0533ef5 | 2019-09-05 16:32:38 +0800 | [diff] [blame] | 660 | if (ms->numa_state->num_nodes == 0 && |
David Hildenbrand | 195784a | 2020-06-26 09:22:48 +0200 | [diff] [blame] | 661 | ((ms->ram_slots && mc->auto_enable_numa_with_memhp) || |
| 662 | (ms->maxram_size > ms->ram_size && mc->auto_enable_numa_with_memdev) || |
| 663 | mc->auto_enable_numa)) { |
Dou Liyang | 7b8be49 | 2017-11-14 10:34:01 +0800 | [diff] [blame] | 664 | NumaNodeOptions node = { }; |
Markus Armbruster | a22528b | 2018-10-17 10:26:39 +0200 | [diff] [blame] | 665 | parse_numa_node(ms, &node, &error_abort); |
Paolo Bonzini | b326b6e | 2020-10-28 06:24:22 -0400 | [diff] [blame] | 666 | numa_info[0].node_mem = ms->ram_size; |
Dou Liyang | 7b8be49 | 2017-11-14 10:34:01 +0800 | [diff] [blame] | 667 | } |
| 668 | |
Eduardo Habkost | 12d6e46 | 2014-06-26 18:33:20 -0300 | [diff] [blame] | 669 | assert(max_numa_nodeid <= MAX_NODES); |
| 670 | |
| 671 | /* No support for sparse NUMA node IDs yet: */ |
| 672 | for (i = max_numa_nodeid - 1; i >= 0; i--) { |
| 673 | /* Report large node IDs first, to make mistakes easier to spot */ |
| 674 | if (!numa_info[i].present) { |
| 675 | error_report("numa: Node ID missing: %d", i); |
| 676 | exit(1); |
| 677 | } |
| 678 | } |
| 679 | |
| 680 | /* This must be always true if all nodes are present: */ |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 681 | assert(ms->numa_state->num_nodes == max_numa_nodeid); |
Eduardo Habkost | 12d6e46 | 2014-06-26 18:33:20 -0300 | [diff] [blame] | 682 | |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 683 | if (ms->numa_state->num_nodes > 0) { |
Wanlong Gao | 2b631ec | 2014-05-14 17:43:06 +0800 | [diff] [blame] | 684 | uint64_t numa_total; |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 685 | |
Wanlong Gao | 2b631ec | 2014-05-14 17:43:06 +0800 | [diff] [blame] | 686 | numa_total = 0; |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 687 | for (i = 0; i < ms->numa_state->num_nodes; i++) { |
Wanlong Gao | 8c85901 | 2014-05-14 17:43:07 +0800 | [diff] [blame] | 688 | numa_total += numa_info[i].node_mem; |
Wanlong Gao | 2b631ec | 2014-05-14 17:43:06 +0800 | [diff] [blame] | 689 | } |
Paolo Bonzini | b326b6e | 2020-10-28 06:24:22 -0400 | [diff] [blame] | 690 | if (numa_total != ms->ram_size) { |
Hu Tao | c68233a | 2014-08-04 16:16:09 +0800 | [diff] [blame] | 691 | error_report("total memory for NUMA nodes (0x%" PRIx64 ")" |
| 692 | " should equal RAM size (0x" RAM_ADDR_FMT ")", |
Paolo Bonzini | b326b6e | 2020-10-28 06:24:22 -0400 | [diff] [blame] | 693 | numa_total, ms->ram_size); |
Wanlong Gao | 2b631ec | 2014-05-14 17:43:06 +0800 | [diff] [blame] | 694 | exit(1); |
| 695 | } |
| 696 | |
Igor Mammedov | 6b61c2c | 2020-02-19 11:08:39 -0500 | [diff] [blame] | 697 | if (!numa_uses_legacy_mem() && mc->default_ram_id) { |
Paolo Bonzini | 26f88d8 | 2022-04-14 12:52:59 -0400 | [diff] [blame] | 698 | if (ms->memdev) { |
Igor Mammedov | ea81f98 | 2020-05-11 10:11:03 -0400 | [diff] [blame] | 699 | error_report("'-machine memory-backend' and '-numa memdev'" |
| 700 | " properties are mutually exclusive"); |
| 701 | exit(1); |
| 702 | } |
Igor Mammedov | 6b61c2c | 2020-02-19 11:08:39 -0500 | [diff] [blame] | 703 | ms->ram = g_new(MemoryRegion, 1); |
| 704 | memory_region_init(ms->ram, OBJECT(ms), mc->default_ram_id, |
Paolo Bonzini | b326b6e | 2020-10-28 06:24:22 -0400 | [diff] [blame] | 705 | ms->ram_size); |
Igor Mammedov | 6b61c2c | 2020-02-19 11:08:39 -0500 | [diff] [blame] | 706 | numa_init_memdev_container(ms, ms->ram); |
| 707 | } |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 708 | /* QEMU needs at least all unique node pair distances to build |
| 709 | * the whole NUMA distance table. QEMU treats the distance table |
| 710 | * as symmetric by default, i.e. distance A->B == distance B->A. |
| 711 | * Thus, QEMU is able to complete the distance table |
| 712 | * initialization even though only distance A->B is provided and |
| 713 | * distance B->A is not. QEMU knows the distance of a node to |
| 714 | * itself is always 10, so A->A distances may be omitted. When |
| 715 | * the distances of two nodes of a pair differ, i.e. distance |
| 716 | * A->B != distance B->A, then that means the distance table is |
| 717 | * asymmetric. In this case, the distances for both directions |
| 718 | * of all node pairs are required. |
| 719 | */ |
Tao Xu | 118154b | 2019-08-09 14:57:23 +0800 | [diff] [blame] | 720 | if (ms->numa_state->have_numa_distance) { |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 721 | /* Validate enough NUMA distance information was provided. */ |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 722 | validate_numa_distance(ms); |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 723 | |
| 724 | /* Validation succeeded, now fill in any missing distances. */ |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 725 | complete_init_numa_distance(ms); |
He Chen | 0f20343 | 2017-04-27 10:35:58 +0800 | [diff] [blame] | 726 | } |
Wanlong Gao | 96d0e26 | 2014-05-14 17:43:05 +0800 | [diff] [blame] | 727 | } |
| 728 | } |
| 729 | |
Igor Mammedov | 7a3099f | 2018-05-04 10:37:39 +0200 | [diff] [blame] | 730 | void parse_numa_opts(MachineState *ms) |
| 731 | { |
Markus Armbruster | 4f7ec69 | 2018-10-17 10:26:52 +0200 | [diff] [blame] | 732 | qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, &error_fatal); |
Igor Mammedov | 7a3099f | 2018-05-04 10:37:39 +0200 | [diff] [blame] | 733 | } |
| 734 | |
Igor Mammedov | a0ceb64 | 2017-05-30 18:23:56 +0200 | [diff] [blame] | 735 | void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp) |
| 736 | { |
Igor Mammedov | a0ceb64 | 2017-05-30 18:23:56 +0200 | [diff] [blame] | 737 | int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort); |
| 738 | |
Igor Mammedov | a0ceb64 | 2017-05-30 18:23:56 +0200 | [diff] [blame] | 739 | if (node_id == CPU_UNSET_NUMA_NODE_ID) { |
| 740 | /* due to bug in libvirt, it doesn't pass node-id from props on |
| 741 | * device_add as expected, so we have to fix it up here */ |
Igor Mammedov | d41f3e7 | 2017-05-30 18:23:58 +0200 | [diff] [blame] | 742 | if (slot->props.has_node_id) { |
Markus Armbruster | 5325cc3 | 2020-07-07 18:05:54 +0200 | [diff] [blame] | 743 | object_property_set_int(OBJECT(dev), "node-id", |
| 744 | slot->props.node_id, errp); |
Igor Mammedov | d41f3e7 | 2017-05-30 18:23:58 +0200 | [diff] [blame] | 745 | } |
| 746 | } else if (node_id != slot->props.node_id) { |
Laurent Vivier | a5bf9fb | 2019-05-29 18:07:47 +0200 | [diff] [blame] | 747 | error_setg(errp, "invalid node-id, must be %"PRId64, |
| 748 | slot->props.node_id); |
Igor Mammedov | a0ceb64 | 2017-05-30 18:23:56 +0200 | [diff] [blame] | 749 | } |
| 750 | } |
| 751 | |
Vadim Galitsyn | 31959e8 | 2017-08-29 17:30:20 +0200 | [diff] [blame] | 752 | static void numa_stat_memory_devices(NumaNodeMem node_mem[]) |
zhanghailiang | 5b009e4 | 2014-11-04 19:49:30 +0800 | [diff] [blame] | 753 | { |
David Hildenbrand | 2cc0e2e | 2018-04-23 18:51:16 +0200 | [diff] [blame] | 754 | MemoryDeviceInfoList *info_list = qmp_memory_device_list(); |
zhanghailiang | 5b009e4 | 2014-11-04 19:49:30 +0800 | [diff] [blame] | 755 | MemoryDeviceInfoList *info; |
Vadim Galitsyn | 31959e8 | 2017-08-29 17:30:20 +0200 | [diff] [blame] | 756 | PCDIMMDeviceInfo *pcdimm_info; |
David Hildenbrand | cae02c3 | 2019-06-19 15:19:06 +0530 | [diff] [blame] | 757 | VirtioPMEMDeviceInfo *vpi; |
David Hildenbrand | 16647a8 | 2020-06-26 09:22:41 +0200 | [diff] [blame] | 758 | VirtioMEMDeviceInfo *vmi; |
Paolo Bonzini | bd989ed | 2021-11-10 13:29:03 +0100 | [diff] [blame] | 759 | SgxEPCDeviceInfo *se; |
zhanghailiang | 5b009e4 | 2014-11-04 19:49:30 +0800 | [diff] [blame] | 760 | |
zhanghailiang | 5b009e4 | 2014-11-04 19:49:30 +0800 | [diff] [blame] | 761 | for (info = info_list; info; info = info->next) { |
| 762 | MemoryDeviceInfo *value = info->value; |
| 763 | |
| 764 | if (value) { |
Eric Blake | 1fd5d4f | 2015-10-26 16:34:59 -0600 | [diff] [blame] | 765 | switch (value->type) { |
Haozhong Zhang | 6388e18 | 2018-03-11 11:02:12 +0800 | [diff] [blame] | 766 | case MEMORY_DEVICE_INFO_KIND_DIMM: |
Haozhong Zhang | 6388e18 | 2018-03-11 11:02:12 +0800 | [diff] [blame] | 767 | case MEMORY_DEVICE_INFO_KIND_NVDIMM: |
David Hildenbrand | cae02c3 | 2019-06-19 15:19:06 +0530 | [diff] [blame] | 768 | pcdimm_info = value->type == MEMORY_DEVICE_INFO_KIND_DIMM ? |
| 769 | value->u.dimm.data : value->u.nvdimm.data; |
Vadim Galitsyn | 31959e8 | 2017-08-29 17:30:20 +0200 | [diff] [blame] | 770 | node_mem[pcdimm_info->node].node_mem += pcdimm_info->size; |
David Hildenbrand | 178003e | 2018-06-22 16:40:45 +0200 | [diff] [blame] | 771 | node_mem[pcdimm_info->node].node_plugged_mem += |
| 772 | pcdimm_info->size; |
David Hildenbrand | cae02c3 | 2019-06-19 15:19:06 +0530 | [diff] [blame] | 773 | break; |
| 774 | case MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM: |
| 775 | vpi = value->u.virtio_pmem.data; |
| 776 | /* TODO: once we support numa, assign to right node */ |
| 777 | node_mem[0].node_mem += vpi->size; |
| 778 | node_mem[0].node_plugged_mem += vpi->size; |
| 779 | break; |
David Hildenbrand | 16647a8 | 2020-06-26 09:22:41 +0200 | [diff] [blame] | 780 | case MEMORY_DEVICE_INFO_KIND_VIRTIO_MEM: |
| 781 | vmi = value->u.virtio_mem.data; |
| 782 | node_mem[vmi->node].node_mem += vmi->size; |
| 783 | node_mem[vmi->node].node_plugged_mem += vmi->size; |
| 784 | break; |
Paolo Bonzini | bd989ed | 2021-11-10 13:29:03 +0100 | [diff] [blame] | 785 | case MEMORY_DEVICE_INFO_KIND_SGX_EPC: |
| 786 | se = value->u.sgx_epc.data; |
Yang Zhong | 1105812 | 2021-11-01 12:20:05 -0400 | [diff] [blame] | 787 | node_mem[se->node].node_mem += se->size; |
| 788 | node_mem[se->node].node_plugged_mem = 0; |
Paolo Bonzini | bd989ed | 2021-11-10 13:29:03 +0100 | [diff] [blame] | 789 | break; |
David Hildenbrand | cae02c3 | 2019-06-19 15:19:06 +0530 | [diff] [blame] | 790 | default: |
| 791 | g_assert_not_reached(); |
zhanghailiang | 5b009e4 | 2014-11-04 19:49:30 +0800 | [diff] [blame] | 792 | } |
| 793 | } |
| 794 | } |
| 795 | qapi_free_MemoryDeviceInfoList(info_list); |
| 796 | } |
| 797 | |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 798 | void query_numa_node_mem(NumaNodeMem node_mem[], MachineState *ms) |
zhanghailiang | 5b009e4 | 2014-11-04 19:49:30 +0800 | [diff] [blame] | 799 | { |
| 800 | int i; |
| 801 | |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 802 | if (ms->numa_state == NULL || ms->numa_state->num_nodes <= 0) { |
zhanghailiang | 5b009e4 | 2014-11-04 19:49:30 +0800 | [diff] [blame] | 803 | return; |
| 804 | } |
| 805 | |
| 806 | numa_stat_memory_devices(node_mem); |
Tao Xu | aa57020 | 2019-08-09 14:57:22 +0800 | [diff] [blame] | 807 | for (i = 0; i < ms->numa_state->num_nodes; i++) { |
Tao Xu | 7e721e7 | 2019-08-09 14:57:24 +0800 | [diff] [blame] | 808 | node_mem[i].node_mem += ms->numa_state->nodes[i].node_mem; |
zhanghailiang | 5b009e4 | 2014-11-04 19:49:30 +0800 | [diff] [blame] | 809 | } |
| 810 | } |
| 811 | |
David Hildenbrand | 082851a | 2021-04-29 13:26:59 +0200 | [diff] [blame] | 812 | static int ram_block_notify_add_single(RAMBlock *rb, void *opaque) |
| 813 | { |
| 814 | const ram_addr_t max_size = qemu_ram_get_max_length(rb); |
David Hildenbrand | 8f44304 | 2021-04-29 13:27:00 +0200 | [diff] [blame] | 815 | const ram_addr_t size = qemu_ram_get_used_length(rb); |
David Hildenbrand | 082851a | 2021-04-29 13:26:59 +0200 | [diff] [blame] | 816 | void *host = qemu_ram_get_host_addr(rb); |
| 817 | RAMBlockNotifier *notifier = opaque; |
| 818 | |
| 819 | if (host) { |
David Hildenbrand | 8f44304 | 2021-04-29 13:27:00 +0200 | [diff] [blame] | 820 | notifier->ram_block_added(notifier, host, size, max_size); |
David Hildenbrand | 082851a | 2021-04-29 13:26:59 +0200 | [diff] [blame] | 821 | } |
| 822 | return 0; |
| 823 | } |
| 824 | |
Stefan Hajnoczi | 1f0fea3 | 2022-10-13 14:58:58 -0400 | [diff] [blame] | 825 | static int ram_block_notify_remove_single(RAMBlock *rb, void *opaque) |
| 826 | { |
| 827 | const ram_addr_t max_size = qemu_ram_get_max_length(rb); |
| 828 | const ram_addr_t size = qemu_ram_get_used_length(rb); |
| 829 | void *host = qemu_ram_get_host_addr(rb); |
| 830 | RAMBlockNotifier *notifier = opaque; |
| 831 | |
| 832 | if (host) { |
| 833 | notifier->ram_block_removed(notifier, host, size, max_size); |
| 834 | } |
| 835 | return 0; |
| 836 | } |
| 837 | |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 838 | void ram_block_notifier_add(RAMBlockNotifier *n) |
| 839 | { |
| 840 | QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next); |
David Hildenbrand | e15c7d1 | 2021-04-29 13:27:01 +0200 | [diff] [blame] | 841 | |
David Hildenbrand | 082851a | 2021-04-29 13:26:59 +0200 | [diff] [blame] | 842 | /* Notify about all existing ram blocks. */ |
David Hildenbrand | e15c7d1 | 2021-04-29 13:27:01 +0200 | [diff] [blame] | 843 | if (n->ram_block_added) { |
| 844 | qemu_ram_foreach_block(ram_block_notify_add_single, n); |
| 845 | } |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 846 | } |
| 847 | |
| 848 | void ram_block_notifier_remove(RAMBlockNotifier *n) |
| 849 | { |
| 850 | QLIST_REMOVE(n, next); |
Stefan Hajnoczi | 1f0fea3 | 2022-10-13 14:58:58 -0400 | [diff] [blame] | 851 | |
| 852 | if (n->ram_block_removed) { |
| 853 | qemu_ram_foreach_block(ram_block_notify_remove_single, n); |
| 854 | } |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 855 | } |
| 856 | |
David Hildenbrand | 8f44304 | 2021-04-29 13:27:00 +0200 | [diff] [blame] | 857 | void ram_block_notify_add(void *host, size_t size, size_t max_size) |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 858 | { |
| 859 | RAMBlockNotifier *notifier; |
Stefan Hajnoczi | 4fdd0a1 | 2022-10-13 14:59:03 -0400 | [diff] [blame] | 860 | RAMBlockNotifier *next; |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 861 | |
Stefan Hajnoczi | 4fdd0a1 | 2022-10-13 14:59:03 -0400 | [diff] [blame] | 862 | QLIST_FOREACH_SAFE(notifier, &ram_list.ramblock_notifiers, next, next) { |
David Hildenbrand | e15c7d1 | 2021-04-29 13:27:01 +0200 | [diff] [blame] | 863 | if (notifier->ram_block_added) { |
| 864 | notifier->ram_block_added(notifier, host, size, max_size); |
| 865 | } |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 866 | } |
| 867 | } |
| 868 | |
David Hildenbrand | 8f44304 | 2021-04-29 13:27:00 +0200 | [diff] [blame] | 869 | void ram_block_notify_remove(void *host, size_t size, size_t max_size) |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 870 | { |
| 871 | RAMBlockNotifier *notifier; |
Stefan Hajnoczi | 4fdd0a1 | 2022-10-13 14:59:03 -0400 | [diff] [blame] | 872 | RAMBlockNotifier *next; |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 873 | |
Stefan Hajnoczi | 4fdd0a1 | 2022-10-13 14:59:03 -0400 | [diff] [blame] | 874 | QLIST_FOREACH_SAFE(notifier, &ram_list.ramblock_notifiers, next, next) { |
David Hildenbrand | e15c7d1 | 2021-04-29 13:27:01 +0200 | [diff] [blame] | 875 | if (notifier->ram_block_removed) { |
| 876 | notifier->ram_block_removed(notifier, host, size, max_size); |
| 877 | } |
David Hildenbrand | 8f44304 | 2021-04-29 13:27:00 +0200 | [diff] [blame] | 878 | } |
| 879 | } |
| 880 | |
| 881 | void ram_block_notify_resize(void *host, size_t old_size, size_t new_size) |
| 882 | { |
| 883 | RAMBlockNotifier *notifier; |
Stefan Hajnoczi | 4fdd0a1 | 2022-10-13 14:59:03 -0400 | [diff] [blame] | 884 | RAMBlockNotifier *next; |
David Hildenbrand | 8f44304 | 2021-04-29 13:27:00 +0200 | [diff] [blame] | 885 | |
Stefan Hajnoczi | 4fdd0a1 | 2022-10-13 14:59:03 -0400 | [diff] [blame] | 886 | QLIST_FOREACH_SAFE(notifier, &ram_list.ramblock_notifiers, next, next) { |
David Hildenbrand | 8f44304 | 2021-04-29 13:27:00 +0200 | [diff] [blame] | 887 | if (notifier->ram_block_resized) { |
| 888 | notifier->ram_block_resized(notifier, host, old_size, new_size); |
| 889 | } |
Paolo Bonzini | 0987d73 | 2016-12-21 00:31:36 +0800 | [diff] [blame] | 890 | } |
| 891 | } |