blob: 16c719c3dea4ba2c091fbf64bd6d26a6232ce0cb [file] [log] [blame]
Peter Maydell0d755902016-01-26 18:16:58 +00001#include "qemu/osdep.h"
David Gibson0c21e072019-11-29 15:00:58 +11002#include "qemu/cutils.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +01003#include "qapi/error.h"
Vincent Palatinb3946622017-01-10 11:59:55 +01004#include "sysemu/hw_accel.h"
Markus Armbruster54d31232019-08-12 07:23:59 +02005#include "sysemu/runstate.h"
Paolo Bonzini03dd0242015-12-15 13:16:16 +01006#include "qemu/log.h"
Markus Armbrusterdb725812019-08-12 07:23:50 +02007#include "qemu/main-loop.h"
Markus Armbruster0b8fa322019-05-23 16:35:07 +02008#include "qemu/module.h"
David Gibson0b0b8312017-05-12 15:46:49 +10009#include "qemu/error-report.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010010#include "exec/exec-all.h"
David Gibsoned120052011-04-01 15:15:33 +110011#include "helper_regs.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010012#include "hw/ppc/spapr.h"
David Gibson7388efa2018-06-13 16:22:18 +100013#include "hw/ppc/spapr_cpu_core.h"
David Gibsond5aea6f2013-03-12 00:31:18 +000014#include "mmu-hash64.h"
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +100015#include "cpu-models.h"
16#include "trace.h"
17#include "kvm_ppc.h"
David Gibson0c21e072019-11-29 15:00:58 +110018#include "hw/ppc/fdt.h"
Michael Rothfacdb8b2016-10-24 23:47:28 -050019#include "hw/ppc/spapr_ovec.h"
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +110020#include "mmu-book3s-v3.h"
David Hildenbrand2cc0e2e2018-04-23 18:51:16 +020021#include "hw/mem/memory-device.h"
David Gibsonf43e3522011-04-01 15:15:22 +110022
Thomas Huthaf08a582016-02-11 13:47:19 +010023static bool has_spr(PowerPCCPU *cpu, int spr)
24{
25 /* We can test whether the SPR is defined by checking for a valid name */
26 return cpu->env.spr_cb[spr].name != NULL;
27}
28
David Gibsonc6404ad2017-02-21 14:00:16 +110029static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
Aneesh Kumar K.Vf3c75d42014-02-20 18:52:17 +010030{
31 /*
David Gibson36778662017-02-24 16:36:44 +110032 * hash value/pteg group index is normalized by HPT mask
Aneesh Kumar K.Vf3c75d42014-02-20 18:52:17 +010033 */
David Gibson36778662017-02-24 16:36:44 +110034 if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
Aneesh Kumar K.Vf3c75d42014-02-20 18:52:17 +010035 return false;
36 }
37 return true;
38}
39
David Gibsonce2918c2019-03-06 15:35:37 +110040static bool is_ram_address(SpaprMachineState *spapr, hwaddr addr)
David Gibsonecbc25f2016-01-21 14:48:43 +110041{
42 MachineState *machine = MACHINE(spapr);
David Hildenbrande017da32018-04-23 18:51:23 +020043 DeviceMemoryState *dms = machine->device_memory;
David Gibsonecbc25f2016-01-21 14:48:43 +110044
45 if (addr < machine->ram_size) {
46 return true;
47 }
David Hildenbrande017da32018-04-23 18:51:23 +020048 if ((addr >= dms->base)
49 && ((addr - dms->base) < memory_region_size(&dms->mr))) {
David Gibsonecbc25f2016-01-21 14:48:43 +110050 return true;
51 }
52
53 return false;
54}
55
David Gibsonce2918c2019-03-06 15:35:37 +110056static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsonf43e3522011-04-01 15:15:22 +110057 target_ulong opcode, target_ulong *args)
58{
59 target_ulong flags = args[0];
David Gibsonc6404ad2017-02-21 14:00:16 +110060 target_ulong ptex = args[1];
David Gibsonf43e3522011-04-01 15:15:22 +110061 target_ulong pteh = args[2];
62 target_ulong ptel = args[3];
Cédric Le Goater1f0252e2016-07-01 09:10:10 +020063 unsigned apshift;
David Gibsonf73a2572011-08-03 21:02:19 +000064 target_ulong raddr;
David Gibsonc6404ad2017-02-21 14:00:16 +110065 target_ulong slot;
David Gibson7222b942017-02-27 16:03:41 +110066 const ppc_hash_pte64_t *hptes;
David Gibsonf43e3522011-04-01 15:15:22 +110067
Cédric Le Goater1f0252e2016-07-01 09:10:10 +020068 apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
David Gibson1114e712016-01-27 12:01:20 +110069 if (!apshift) {
70 /* Bad page size encoding */
71 return H_PARAMETER;
David Gibsonf43e3522011-04-01 15:15:22 +110072 }
73
David Gibson1114e712016-01-27 12:01:20 +110074 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
David Gibsonf43e3522011-04-01 15:15:22 +110075
David Gibsonecbc25f2016-01-21 14:48:43 +110076 if (is_ram_address(spapr, raddr)) {
David Gibsonf73a2572011-08-03 21:02:19 +000077 /* Regular RAM - should have WIMG=0010 */
David Gibsond5aea6f2013-03-12 00:31:18 +000078 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
David Gibsonf73a2572011-08-03 21:02:19 +000079 return H_PARAMETER;
80 }
81 } else {
Aneesh Kumar K.Vc1175902016-06-17 16:07:20 +053082 target_ulong wimg_flags;
David Gibsonf73a2572011-08-03 21:02:19 +000083 /* Looks like an IO address */
84 /* FIXME: What WIMG combinations could be sensible for IO?
85 * For now we allow WIMG=010x, but are there others? */
86 /* FIXME: Should we check against registered IO addresses? */
Aneesh Kumar K.Vc1175902016-06-17 16:07:20 +053087 wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M));
88
89 if (wimg_flags != HPTE64_R_I &&
90 wimg_flags != (HPTE64_R_I | HPTE64_R_M)) {
David Gibsonf73a2572011-08-03 21:02:19 +000091 return H_PARAMETER;
92 }
David Gibsonf43e3522011-04-01 15:15:22 +110093 }
David Gibsonf73a2572011-08-03 21:02:19 +000094
David Gibsonf43e3522011-04-01 15:15:22 +110095 pteh &= ~0x60ULL;
96
David Gibsonc6404ad2017-02-21 14:00:16 +110097 if (!valid_ptex(cpu, ptex)) {
David Gibsonf43e3522011-04-01 15:15:22 +110098 return H_PARAMETER;
99 }
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100100
David Gibsonc6404ad2017-02-21 14:00:16 +1100101 slot = ptex & 7ULL;
102 ptex = ptex & ~7ULL;
103
David Gibsonf43e3522011-04-01 15:15:22 +1100104 if (likely((flags & H_EXACT) == 0)) {
David Gibson7222b942017-02-27 16:03:41 +1100105 hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
David Gibsonc6404ad2017-02-21 14:00:16 +1100106 for (slot = 0; slot < 8; slot++) {
David Gibson7222b942017-02-27 16:03:41 +1100107 if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) {
David Gibsonf43e3522011-04-01 15:15:22 +1100108 break;
109 }
Aneesh Kumar K.V7aaf4952014-03-14 19:21:49 +0530110 }
David Gibson7222b942017-02-27 16:03:41 +1100111 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
David Gibsonc6404ad2017-02-21 14:00:16 +1100112 if (slot == 8) {
Aneesh Kumar K.V7aaf4952014-03-14 19:21:49 +0530113 return H_PTEG_FULL;
114 }
David Gibsonf43e3522011-04-01 15:15:22 +1100115 } else {
David Gibson7222b942017-02-27 16:03:41 +1100116 hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1);
117 if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) {
118 ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1);
David Gibsonf43e3522011-04-01 15:15:22 +1100119 return H_PTEG_FULL;
120 }
David Gibson7222b942017-02-27 16:03:41 +1100121 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
David Gibsonf43e3522011-04-01 15:15:22 +1100122 }
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100123
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200124 spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
David Gibsonf43e3522011-04-01 15:15:22 +1100125
David Gibsonc6404ad2017-02-21 14:00:16 +1100126 args[0] = ptex + slot;
David Gibsonf43e3522011-04-01 15:15:22 +1100127 return H_SUCCESS;
128}
129
Stefan Weila3801402013-06-24 19:48:47 +0200130typedef enum {
David Gibsona3d0aba2011-08-31 15:50:50 +0000131 REMOVE_SUCCESS = 0,
132 REMOVE_NOT_FOUND = 1,
133 REMOVE_PARM = 2,
134 REMOVE_HW = 3,
Stefan Weila3801402013-06-24 19:48:47 +0200135} RemoveResult;
David Gibsona3d0aba2011-08-31 15:50:50 +0000136
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200137static RemoveResult remove_hpte(PowerPCCPU *cpu
138 , target_ulong ptex,
David Gibsona3d0aba2011-08-31 15:50:50 +0000139 target_ulong avpn,
140 target_ulong flags,
141 target_ulong *vp, target_ulong *rp)
David Gibsonf43e3522011-04-01 15:15:22 +1100142{
David Gibson7222b942017-02-27 16:03:41 +1100143 const ppc_hash_pte64_t *hptes;
David Gibson61a36c92016-01-15 16:12:09 +1100144 target_ulong v, r;
David Gibsonf43e3522011-04-01 15:15:22 +1100145
David Gibsonc6404ad2017-02-21 14:00:16 +1100146 if (!valid_ptex(cpu, ptex)) {
David Gibsona3d0aba2011-08-31 15:50:50 +0000147 return REMOVE_PARM;
David Gibsonf43e3522011-04-01 15:15:22 +1100148 }
149
David Gibson7222b942017-02-27 16:03:41 +1100150 hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
151 v = ppc_hash64_hpte0(cpu, hptes, 0);
152 r = ppc_hash64_hpte1(cpu, hptes, 0);
153 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
David Gibsonf43e3522011-04-01 15:15:22 +1100154
David Gibsond5aea6f2013-03-12 00:31:18 +0000155 if ((v & HPTE64_V_VALID) == 0 ||
David Gibsonf43e3522011-04-01 15:15:22 +1100156 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
157 ((flags & H_ANDCOND) && (v & avpn) != 0)) {
David Gibsona3d0aba2011-08-31 15:50:50 +0000158 return REMOVE_NOT_FOUND;
David Gibsonf43e3522011-04-01 15:15:22 +1100159 }
David Gibson35f93042012-09-20 17:42:30 +0000160 *vp = v;
David Gibsona3d0aba2011-08-31 15:50:50 +0000161 *rp = r;
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200162 spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
David Gibson61a36c92016-01-15 16:12:09 +1100163 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
David Gibsona3d0aba2011-08-31 15:50:50 +0000164 return REMOVE_SUCCESS;
165}
166
David Gibsonce2918c2019-03-06 15:35:37 +1100167static target_ulong h_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsona3d0aba2011-08-31 15:50:50 +0000168 target_ulong opcode, target_ulong *args)
169{
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200170 CPUPPCState *env = &cpu->env;
David Gibsona3d0aba2011-08-31 15:50:50 +0000171 target_ulong flags = args[0];
David Gibsonc6404ad2017-02-21 14:00:16 +1100172 target_ulong ptex = args[1];
David Gibsona3d0aba2011-08-31 15:50:50 +0000173 target_ulong avpn = args[2];
Stefan Weila3801402013-06-24 19:48:47 +0200174 RemoveResult ret;
David Gibsona3d0aba2011-08-31 15:50:50 +0000175
David Gibsonc6404ad2017-02-21 14:00:16 +1100176 ret = remove_hpte(cpu, ptex, avpn, flags,
David Gibsona3d0aba2011-08-31 15:50:50 +0000177 &args[0], &args[1]);
178
179 switch (ret) {
180 case REMOVE_SUCCESS:
Nikunj A Dadhaniae3cffe62016-09-20 22:05:00 +0530181 check_tlb_flush(env, true);
David Gibsona3d0aba2011-08-31 15:50:50 +0000182 return H_SUCCESS;
183
184 case REMOVE_NOT_FOUND:
185 return H_NOT_FOUND;
186
187 case REMOVE_PARM:
188 return H_PARAMETER;
189
190 case REMOVE_HW:
191 return H_HARDWARE;
192 }
193
Stefan Weil9a399702013-06-29 15:47:26 +0200194 g_assert_not_reached();
David Gibsona3d0aba2011-08-31 15:50:50 +0000195}
196
197#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
198#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
199#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
200#define H_BULK_REMOVE_END 0xc000000000000000ULL
201#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
202#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
203#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
204#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
205#define H_BULK_REMOVE_HW 0x3000000000000000ULL
206#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
207#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
208#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
209#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
210#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
211#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
212
213#define H_BULK_REMOVE_MAX_BATCH 4
214
David Gibsonce2918c2019-03-06 15:35:37 +1100215static target_ulong h_bulk_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsona3d0aba2011-08-31 15:50:50 +0000216 target_ulong opcode, target_ulong *args)
217{
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200218 CPUPPCState *env = &cpu->env;
David Gibsona3d0aba2011-08-31 15:50:50 +0000219 int i;
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200220 target_ulong rc = H_SUCCESS;
David Gibsona3d0aba2011-08-31 15:50:50 +0000221
222 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
223 target_ulong *tsh = &args[i*2];
224 target_ulong tsl = args[i*2 + 1];
225 target_ulong v, r, ret;
226
227 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
228 break;
229 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
230 return H_PARAMETER;
231 }
232
233 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
234 *tsh |= H_BULK_REMOVE_RESPONSE;
235
236 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
237 *tsh |= H_BULK_REMOVE_PARM;
238 return H_PARAMETER;
239 }
240
David Gibson7ef23062016-01-14 15:33:27 +1100241 ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
David Gibsona3d0aba2011-08-31 15:50:50 +0000242 (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
243 &v, &r);
244
245 *tsh |= ret << 60;
246
247 switch (ret) {
248 case REMOVE_SUCCESS:
David Gibsond5aea6f2013-03-12 00:31:18 +0000249 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
David Gibsona3d0aba2011-08-31 15:50:50 +0000250 break;
251
252 case REMOVE_PARM:
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200253 rc = H_PARAMETER;
254 goto exit;
David Gibsona3d0aba2011-08-31 15:50:50 +0000255
256 case REMOVE_HW:
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200257 rc = H_HARDWARE;
258 goto exit;
David Gibsona3d0aba2011-08-31 15:50:50 +0000259 }
260 }
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200261 exit:
Nikunj A Dadhaniae3cffe62016-09-20 22:05:00 +0530262 check_tlb_flush(env, true);
David Gibsona3d0aba2011-08-31 15:50:50 +0000263
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200264 return rc;
David Gibsonf43e3522011-04-01 15:15:22 +1100265}
266
David Gibsonce2918c2019-03-06 15:35:37 +1100267static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsonf43e3522011-04-01 15:15:22 +1100268 target_ulong opcode, target_ulong *args)
269{
Andreas Färberb13ce262012-05-03 06:23:01 +0200270 CPUPPCState *env = &cpu->env;
David Gibsonf43e3522011-04-01 15:15:22 +1100271 target_ulong flags = args[0];
David Gibsonc6404ad2017-02-21 14:00:16 +1100272 target_ulong ptex = args[1];
David Gibsonf43e3522011-04-01 15:15:22 +1100273 target_ulong avpn = args[2];
David Gibson7222b942017-02-27 16:03:41 +1100274 const ppc_hash_pte64_t *hptes;
David Gibson61a36c92016-01-15 16:12:09 +1100275 target_ulong v, r;
David Gibsonf43e3522011-04-01 15:15:22 +1100276
David Gibsonc6404ad2017-02-21 14:00:16 +1100277 if (!valid_ptex(cpu, ptex)) {
David Gibsonf43e3522011-04-01 15:15:22 +1100278 return H_PARAMETER;
279 }
280
David Gibson7222b942017-02-27 16:03:41 +1100281 hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
282 v = ppc_hash64_hpte0(cpu, hptes, 0);
283 r = ppc_hash64_hpte1(cpu, hptes, 0);
284 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
David Gibsonf43e3522011-04-01 15:15:22 +1100285
David Gibsond5aea6f2013-03-12 00:31:18 +0000286 if ((v & HPTE64_V_VALID) == 0 ||
David Gibsonf43e3522011-04-01 15:15:22 +1100287 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
David Gibsonf43e3522011-04-01 15:15:22 +1100288 return H_NOT_FOUND;
289 }
290
David Gibsond5aea6f2013-03-12 00:31:18 +0000291 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
292 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
293 r |= (flags << 55) & HPTE64_R_PP0;
294 r |= (flags << 48) & HPTE64_R_KEY_HI;
295 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200296 spapr_store_hpte(cpu, ptex,
297 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
David Gibsonc6404ad2017-02-21 14:00:16 +1100298 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
Nikunj A Dadhaniad76ab5e2016-09-20 22:05:01 +0530299 /* Flush the tlb */
300 check_tlb_flush(env, true);
David Gibsonf43e3522011-04-01 15:15:22 +1100301 /* Don't need a memory barrier, due to qemu's global lock */
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200302 spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
David Gibsonf43e3522011-04-01 15:15:22 +1100303 return H_SUCCESS;
304}
305
David Gibsonce2918c2019-03-06 15:35:37 +1100306static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000307 target_ulong opcode, target_ulong *args)
308{
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000309 target_ulong flags = args[0];
David Gibsonc6404ad2017-02-21 14:00:16 +1100310 target_ulong ptex = args[1];
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000311 int i, ridx, n_entries = 1;
Benjamin Herrenschmidt993aaf02019-04-11 10:00:00 +0200312 const ppc_hash_pte64_t *hptes;
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000313
David Gibsonc6404ad2017-02-21 14:00:16 +1100314 if (!valid_ptex(cpu, ptex)) {
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000315 return H_PARAMETER;
316 }
317
318 if (flags & H_READ_4) {
319 /* Clear the two low order bits */
David Gibsonc6404ad2017-02-21 14:00:16 +1100320 ptex &= ~(3ULL);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000321 n_entries = 4;
322 }
323
Benjamin Herrenschmidt993aaf02019-04-11 10:00:00 +0200324 hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000325 for (i = 0, ridx = 0; i < n_entries; i++) {
Benjamin Herrenschmidt993aaf02019-04-11 10:00:00 +0200326 args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i);
327 args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000328 }
Benjamin Herrenschmidt993aaf02019-04-11 10:00:00 +0200329 ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000330
331 return H_SUCCESS;
332}
333
David Gibsonce2918c2019-03-06 15:35:37 +1100334struct SpaprPendingHpt {
David Gibson0b0b8312017-05-12 15:46:49 +1000335 /* These fields are read-only after initialization */
336 int shift;
337 QemuThread thread;
338
339 /* These fields are protected by the BQL */
340 bool complete;
341
342 /* These fields are private to the preparation thread if
343 * !complete, otherwise protected by the BQL */
344 int ret;
345 void *hpt;
346};
347
David Gibsonce2918c2019-03-06 15:35:37 +1100348static void free_pending_hpt(SpaprPendingHpt *pending)
David Gibson0b0b8312017-05-12 15:46:49 +1000349{
350 if (pending->hpt) {
351 qemu_vfree(pending->hpt);
352 }
353
354 g_free(pending);
355}
356
357static void *hpt_prepare_thread(void *opaque)
358{
David Gibsonce2918c2019-03-06 15:35:37 +1100359 SpaprPendingHpt *pending = opaque;
David Gibson0b0b8312017-05-12 15:46:49 +1000360 size_t size = 1ULL << pending->shift;
361
Greg Kurzf29b9592020-10-29 16:33:56 +0100362 pending->hpt = qemu_try_memalign(size, size);
David Gibson0b0b8312017-05-12 15:46:49 +1000363 if (pending->hpt) {
364 memset(pending->hpt, 0, size);
365 pending->ret = H_SUCCESS;
366 } else {
367 pending->ret = H_NO_MEM;
368 }
369
370 qemu_mutex_lock_iothread();
371
372 if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
373 /* Ready to go */
374 pending->complete = true;
375 } else {
376 /* We've been cancelled, clean ourselves up */
377 free_pending_hpt(pending);
378 }
379
380 qemu_mutex_unlock_iothread();
381 return NULL;
382}
383
384/* Must be called with BQL held */
David Gibsonce2918c2019-03-06 15:35:37 +1100385static void cancel_hpt_prepare(SpaprMachineState *spapr)
David Gibson0b0b8312017-05-12 15:46:49 +1000386{
David Gibsonce2918c2019-03-06 15:35:37 +1100387 SpaprPendingHpt *pending = spapr->pending_hpt;
David Gibson0b0b8312017-05-12 15:46:49 +1000388
389 /* Let the thread know it's cancelled */
390 spapr->pending_hpt = NULL;
391
392 if (!pending) {
393 /* Nothing to do */
394 return;
395 }
396
397 if (!pending->complete) {
398 /* thread will clean itself up */
399 return;
400 }
401
402 free_pending_hpt(pending);
403}
404
David Gibsonb55d2952017-07-12 17:56:55 +1000405/* Convert a return code from the KVM ioctl()s implementing resize HPT
406 * into a PAPR hypercall return code */
407static target_ulong resize_hpt_convert_rc(int ret)
408{
409 if (ret >= 100000) {
410 return H_LONG_BUSY_ORDER_100_SEC;
411 } else if (ret >= 10000) {
412 return H_LONG_BUSY_ORDER_10_SEC;
413 } else if (ret >= 1000) {
414 return H_LONG_BUSY_ORDER_1_SEC;
415 } else if (ret >= 100) {
416 return H_LONG_BUSY_ORDER_100_MSEC;
417 } else if (ret >= 10) {
418 return H_LONG_BUSY_ORDER_10_MSEC;
419 } else if (ret > 0) {
420 return H_LONG_BUSY_ORDER_1_MSEC;
421 }
422
423 switch (ret) {
424 case 0:
425 return H_SUCCESS;
426 case -EPERM:
427 return H_AUTHORITY;
428 case -EINVAL:
429 return H_PARAMETER;
430 case -ENXIO:
431 return H_CLOSED;
432 case -ENOSPC:
433 return H_PTEG_FULL;
434 case -EBUSY:
435 return H_BUSY;
436 case -ENOMEM:
437 return H_NO_MEM;
438 default:
439 return H_HARDWARE;
440 }
441}
442
David Gibson30f4b052017-05-12 15:46:11 +1000443static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +1100444 SpaprMachineState *spapr,
David Gibson30f4b052017-05-12 15:46:11 +1000445 target_ulong opcode,
446 target_ulong *args)
447{
448 target_ulong flags = args[0];
David Gibson0b0b8312017-05-12 15:46:49 +1000449 int shift = args[1];
David Gibsonce2918c2019-03-06 15:35:37 +1100450 SpaprPendingHpt *pending = spapr->pending_hpt;
David Gibsondb50f282017-10-11 00:16:57 +1100451 uint64_t current_ram_size;
David Gibsonb55d2952017-07-12 17:56:55 +1000452 int rc;
David Gibson30f4b052017-05-12 15:46:11 +1000453
454 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
455 return H_AUTHORITY;
456 }
457
David Gibson0b0b8312017-05-12 15:46:49 +1000458 if (!spapr->htab_shift) {
459 /* Radix guest, no HPT */
460 return H_NOT_AVAILABLE;
461 }
462
David Gibson30f4b052017-05-12 15:46:11 +1000463 trace_spapr_h_resize_hpt_prepare(flags, shift);
David Gibson0b0b8312017-05-12 15:46:49 +1000464
465 if (flags != 0) {
466 return H_PARAMETER;
467 }
468
469 if (shift && ((shift < 18) || (shift > 46))) {
470 return H_PARAMETER;
471 }
472
David Gibsondb50f282017-10-11 00:16:57 +1100473 current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
David Gibson0b0b8312017-05-12 15:46:49 +1000474
475 /* We only allow the guest to allocate an HPT one order above what
476 * we'd normally give them (to stop a small guest claiming a huge
477 * chunk of resources in the HPT */
478 if (shift > (spapr_hpt_shift_for_ramsize(current_ram_size) + 1)) {
479 return H_RESOURCE;
480 }
481
David Gibsonb55d2952017-07-12 17:56:55 +1000482 rc = kvmppc_resize_hpt_prepare(cpu, flags, shift);
483 if (rc != -ENOSYS) {
484 return resize_hpt_convert_rc(rc);
485 }
486
David Gibson0b0b8312017-05-12 15:46:49 +1000487 if (pending) {
488 /* something already in progress */
489 if (pending->shift == shift) {
490 /* and it's suitable */
491 if (pending->complete) {
492 return pending->ret;
493 } else {
494 return H_LONG_BUSY_ORDER_100_MSEC;
495 }
496 }
497
498 /* not suitable, cancel and replace */
499 cancel_hpt_prepare(spapr);
500 }
501
502 if (!shift) {
503 /* nothing to do */
504 return H_SUCCESS;
505 }
506
507 /* start new prepare */
508
David Gibsonce2918c2019-03-06 15:35:37 +1100509 pending = g_new0(SpaprPendingHpt, 1);
David Gibson0b0b8312017-05-12 15:46:49 +1000510 pending->shift = shift;
511 pending->ret = H_HARDWARE;
512
513 qemu_thread_create(&pending->thread, "sPAPR HPT prepare",
514 hpt_prepare_thread, pending, QEMU_THREAD_DETACHED);
515
516 spapr->pending_hpt = pending;
517
518 /* In theory we could estimate the time more accurately based on
519 * the new size, but there's not much point */
520 return H_LONG_BUSY_ORDER_100_MSEC;
521}
522
523static uint64_t new_hpte_load0(void *htab, uint64_t pteg, int slot)
524{
525 uint8_t *addr = htab;
526
527 addr += pteg * HASH_PTEG_SIZE_64;
528 addr += slot * HASH_PTE_SIZE_64;
529 return ldq_p(addr);
530}
531
532static void new_hpte_store(void *htab, uint64_t pteg, int slot,
533 uint64_t pte0, uint64_t pte1)
534{
535 uint8_t *addr = htab;
536
537 addr += pteg * HASH_PTEG_SIZE_64;
538 addr += slot * HASH_PTE_SIZE_64;
539
540 stq_p(addr, pte0);
541 stq_p(addr + HASH_PTE_SIZE_64 / 2, pte1);
542}
543
544static int rehash_hpte(PowerPCCPU *cpu,
545 const ppc_hash_pte64_t *hptes,
546 void *old_hpt, uint64_t oldsize,
547 void *new_hpt, uint64_t newsize,
548 uint64_t pteg, int slot)
549{
550 uint64_t old_hash_mask = (oldsize >> 7) - 1;
551 uint64_t new_hash_mask = (newsize >> 7) - 1;
552 target_ulong pte0 = ppc_hash64_hpte0(cpu, hptes, slot);
553 target_ulong pte1;
554 uint64_t avpn;
555 unsigned base_pg_shift;
556 uint64_t hash, new_pteg, replace_pte0;
557
558 if (!(pte0 & HPTE64_V_VALID) || !(pte0 & HPTE64_V_BOLTED)) {
559 return H_SUCCESS;
560 }
561
562 pte1 = ppc_hash64_hpte1(cpu, hptes, slot);
563
564 base_pg_shift = ppc_hash64_hpte_page_shift_noslb(cpu, pte0, pte1);
565 assert(base_pg_shift); /* H_ENTER shouldn't allow a bad encoding */
566 avpn = HPTE64_V_AVPN_VAL(pte0) & ~(((1ULL << base_pg_shift) - 1) >> 23);
567
568 if (pte0 & HPTE64_V_SECONDARY) {
569 pteg = ~pteg;
570 }
571
572 if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_256M) {
573 uint64_t offset, vsid;
574
575 /* We only have 28 - 23 bits of offset in avpn */
576 offset = (avpn & 0x1f) << 23;
577 vsid = avpn >> 5;
578 /* We can find more bits from the pteg value */
579 if (base_pg_shift < 23) {
580 offset |= ((vsid ^ pteg) & old_hash_mask) << base_pg_shift;
581 }
582
583 hash = vsid ^ (offset >> base_pg_shift);
584 } else if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_1T) {
585 uint64_t offset, vsid;
586
587 /* We only have 40 - 23 bits of seg_off in avpn */
588 offset = (avpn & 0x1ffff) << 23;
589 vsid = avpn >> 17;
590 if (base_pg_shift < 23) {
591 offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask)
592 << base_pg_shift;
593 }
594
595 hash = vsid ^ (vsid << 25) ^ (offset >> base_pg_shift);
596 } else {
597 error_report("rehash_pte: Bad segment size in HPTE");
598 return H_HARDWARE;
599 }
600
601 new_pteg = hash & new_hash_mask;
602 if (pte0 & HPTE64_V_SECONDARY) {
603 assert(~pteg == (hash & old_hash_mask));
604 new_pteg = ~new_pteg;
605 } else {
606 assert(pteg == (hash & old_hash_mask));
607 }
608 assert((oldsize != newsize) || (pteg == new_pteg));
609 replace_pte0 = new_hpte_load0(new_hpt, new_pteg, slot);
610 /*
611 * Strictly speaking, we don't need all these tests, since we only
612 * ever rehash bolted HPTEs. We might in future handle non-bolted
613 * HPTEs, though so make the logic correct for those cases as
614 * well.
615 */
616 if (replace_pte0 & HPTE64_V_VALID) {
617 assert(newsize < oldsize);
618 if (replace_pte0 & HPTE64_V_BOLTED) {
619 if (pte0 & HPTE64_V_BOLTED) {
620 /* Bolted collision, nothing we can do */
621 return H_PTEG_FULL;
622 } else {
623 /* Discard this hpte */
624 return H_SUCCESS;
625 }
626 }
627 }
628
629 new_hpte_store(new_hpt, new_pteg, slot, pte0, pte1);
630 return H_SUCCESS;
631}
632
633static int rehash_hpt(PowerPCCPU *cpu,
634 void *old_hpt, uint64_t oldsize,
635 void *new_hpt, uint64_t newsize)
636{
637 uint64_t n_ptegs = oldsize >> 7;
638 uint64_t pteg;
639 int slot;
640 int rc;
641
642 for (pteg = 0; pteg < n_ptegs; pteg++) {
643 hwaddr ptex = pteg * HPTES_PER_GROUP;
644 const ppc_hash_pte64_t *hptes
645 = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
646
647 if (!hptes) {
648 return H_HARDWARE;
649 }
650
651 for (slot = 0; slot < HPTES_PER_GROUP; slot++) {
652 rc = rehash_hpte(cpu, hptes, old_hpt, oldsize, new_hpt, newsize,
653 pteg, slot);
654 if (rc != H_SUCCESS) {
655 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
656 return rc;
657 }
658 }
659 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
660 }
661
662 return H_SUCCESS;
David Gibson30f4b052017-05-12 15:46:11 +1000663}
664
Greg Kurz1ec26c72017-09-25 13:00:02 +0200665static void do_push_sregs_to_kvm_pr(CPUState *cs, run_on_cpu_data data)
666{
667 int ret;
668
669 cpu_synchronize_state(cs);
670
671 ret = kvmppc_put_books_sregs(POWERPC_CPU(cs));
672 if (ret < 0) {
673 error_report("failed to push sregs to KVM: %s", strerror(-ret));
674 exit(1);
675 }
676}
677
David Gibsonce2918c2019-03-06 15:35:37 +1100678static void push_sregs_to_kvm_pr(SpaprMachineState *spapr)
Greg Kurz1ec26c72017-09-25 13:00:02 +0200679{
680 CPUState *cs;
681
682 /*
683 * This is a hack for the benefit of KVM PR - it abuses the SDR1
684 * slot in kvm_sregs to communicate the userspace address of the
685 * HPT
686 */
687 if (!kvm_enabled() || !spapr->htab) {
688 return;
689 }
690
691 CPU_FOREACH(cs) {
692 run_on_cpu(cs, do_push_sregs_to_kvm_pr, RUN_ON_CPU_NULL);
693 }
694}
695
David Gibson30f4b052017-05-12 15:46:11 +1000696static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +1100697 SpaprMachineState *spapr,
David Gibson30f4b052017-05-12 15:46:11 +1000698 target_ulong opcode,
699 target_ulong *args)
700{
701 target_ulong flags = args[0];
702 target_ulong shift = args[1];
David Gibsonce2918c2019-03-06 15:35:37 +1100703 SpaprPendingHpt *pending = spapr->pending_hpt;
David Gibson0b0b8312017-05-12 15:46:49 +1000704 int rc;
705 size_t newsize;
David Gibson30f4b052017-05-12 15:46:11 +1000706
707 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
708 return H_AUTHORITY;
709 }
710
Daniel Henrique Barboza94789562018-02-13 15:37:16 -0200711 if (!spapr->htab_shift) {
712 /* Radix guest, no HPT */
713 return H_NOT_AVAILABLE;
714 }
715
David Gibson30f4b052017-05-12 15:46:11 +1000716 trace_spapr_h_resize_hpt_commit(flags, shift);
David Gibson0b0b8312017-05-12 15:46:49 +1000717
David Gibsonb55d2952017-07-12 17:56:55 +1000718 rc = kvmppc_resize_hpt_commit(cpu, flags, shift);
719 if (rc != -ENOSYS) {
Daniel Henrique Barboza94789562018-02-13 15:37:16 -0200720 rc = resize_hpt_convert_rc(rc);
721 if (rc == H_SUCCESS) {
722 /* Need to set the new htab_shift in the machine state */
723 spapr->htab_shift = shift;
724 }
725 return rc;
David Gibsonb55d2952017-07-12 17:56:55 +1000726 }
727
David Gibson0b0b8312017-05-12 15:46:49 +1000728 if (flags != 0) {
729 return H_PARAMETER;
730 }
731
732 if (!pending || (pending->shift != shift)) {
733 /* no matching prepare */
734 return H_CLOSED;
735 }
736
737 if (!pending->complete) {
738 /* prepare has not completed */
739 return H_BUSY;
740 }
741
742 /* Shouldn't have got past PREPARE without an HPT */
743 g_assert(spapr->htab_shift);
744
745 newsize = 1ULL << pending->shift;
746 rc = rehash_hpt(cpu, spapr->htab, HTAB_SIZE(spapr),
747 pending->hpt, newsize);
748 if (rc == H_SUCCESS) {
749 qemu_vfree(spapr->htab);
750 spapr->htab = pending->hpt;
751 spapr->htab_shift = pending->shift;
752
Greg Kurz1ec26c72017-09-25 13:00:02 +0200753 push_sregs_to_kvm_pr(spapr);
David Gibsonb55d2952017-07-12 17:56:55 +1000754
David Gibson0b0b8312017-05-12 15:46:49 +1000755 pending->hpt = NULL; /* so it's not free()d */
756 }
757
758 /* Clean up */
759 spapr->pending_hpt = NULL;
760 free_pending_hpt(pending);
761
762 return rc;
David Gibson30f4b052017-05-12 15:46:11 +1000763}
764
David Gibsonce2918c2019-03-06 15:35:37 +1100765static target_ulong h_set_sprg0(PowerPCCPU *cpu, SpaprMachineState *spapr,
Thomas Huth423576f2016-02-11 13:47:18 +0100766 target_ulong opcode, target_ulong *args)
767{
768 cpu_synchronize_state(CPU(cpu));
769 cpu->env.spr[SPR_SPRG0] = args[0];
770
771 return H_SUCCESS;
772}
773
David Gibsonce2918c2019-03-06 15:35:37 +1100774static target_ulong h_set_dabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson821303f2011-04-01 15:15:24 +1100775 target_ulong opcode, target_ulong *args)
776{
Thomas Huthaf08a582016-02-11 13:47:19 +0100777 if (!has_spr(cpu, SPR_DABR)) {
778 return H_HARDWARE; /* DABR register not available */
779 }
780 cpu_synchronize_state(CPU(cpu));
781
782 if (has_spr(cpu, SPR_DABRX)) {
783 cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */
784 } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */
785 return H_RESERVED_DABR;
786 }
787
788 cpu->env.spr[SPR_DABR] = args[0];
789 return H_SUCCESS;
David Gibson821303f2011-04-01 15:15:24 +1100790}
791
David Gibsonce2918c2019-03-06 15:35:37 +1100792static target_ulong h_set_xdabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
Thomas Huthe49ff262016-02-11 13:47:20 +0100793 target_ulong opcode, target_ulong *args)
794{
795 target_ulong dabrx = args[1];
796
797 if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
798 return H_HARDWARE;
799 }
800
801 if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
802 || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
803 return H_PARAMETER;
804 }
805
806 cpu_synchronize_state(CPU(cpu));
807 cpu->env.spr[SPR_DABRX] = dabrx;
808 cpu->env.spr[SPR_DABR] = args[0];
809
810 return H_SUCCESS;
811}
812
David Gibsonce2918c2019-03-06 15:35:37 +1100813static target_ulong h_page_init(PowerPCCPU *cpu, SpaprMachineState *spapr,
Thomas Huth3240dd92016-02-18 10:15:54 +0100814 target_ulong opcode, target_ulong *args)
815{
816 target_ulong flags = args[0];
817 hwaddr dst = args[1];
818 hwaddr src = args[2];
819 hwaddr len = TARGET_PAGE_SIZE;
820 uint8_t *pdst, *psrc;
821 target_long ret = H_SUCCESS;
822
823 if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
824 | H_COPY_PAGE | H_ZERO_PAGE)) {
825 qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
826 flags);
827 return H_PARAMETER;
828 }
829
830 /* Map-in destination */
831 if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
832 return H_PARAMETER;
833 }
Philippe Mathieu-Daudé85eb7c12020-02-19 20:20:42 +0100834 pdst = cpu_physical_memory_map(dst, &len, true);
Thomas Huth3240dd92016-02-18 10:15:54 +0100835 if (!pdst || len != TARGET_PAGE_SIZE) {
836 return H_PARAMETER;
837 }
838
839 if (flags & H_COPY_PAGE) {
840 /* Map-in source, copy to destination, and unmap source again */
841 if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
842 ret = H_PARAMETER;
843 goto unmap_out;
844 }
Philippe Mathieu-Daudé85eb7c12020-02-19 20:20:42 +0100845 psrc = cpu_physical_memory_map(src, &len, false);
Thomas Huth3240dd92016-02-18 10:15:54 +0100846 if (!psrc || len != TARGET_PAGE_SIZE) {
847 ret = H_PARAMETER;
848 goto unmap_out;
849 }
850 memcpy(pdst, psrc, len);
851 cpu_physical_memory_unmap(psrc, len, 0, len);
852 } else if (flags & H_ZERO_PAGE) {
853 memset(pdst, 0, len); /* Just clear the destination page */
854 }
855
856 if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
857 kvmppc_dcbst_range(cpu, pdst, len);
858 }
859 if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
860 if (kvm_enabled()) {
861 kvmppc_icbi_range(cpu, pdst, len);
862 } else {
863 tb_flush(CPU(cpu));
864 }
865 }
866
867unmap_out:
868 cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
869 return ret;
870}
871
David Gibsoned120052011-04-01 15:15:33 +1100872#define FLAGS_REGISTER_VPA 0x0000200000000000ULL
873#define FLAGS_REGISTER_DTL 0x0000400000000000ULL
874#define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
875#define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
876#define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
877#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
878
David Gibson7388efa2018-06-13 16:22:18 +1000879static target_ulong register_vpa(PowerPCCPU *cpu, target_ulong vpa)
David Gibsoned120052011-04-01 15:15:33 +1100880{
David Gibson7388efa2018-06-13 16:22:18 +1000881 CPUState *cs = CPU(cpu);
882 CPUPPCState *env = &cpu->env;
David Gibsonce2918c2019-03-06 15:35:37 +1100883 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibsoned120052011-04-01 15:15:33 +1100884 uint16_t size;
885 uint8_t tmp;
886
887 if (vpa == 0) {
888 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
889 return H_HARDWARE;
890 }
891
892 if (vpa % env->dcache_line_size) {
893 return H_PARAMETER;
894 }
895 /* FIXME: bounds check the address */
896
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +1000897 size = lduw_be_phys(cs->as, vpa + 0x4);
David Gibsoned120052011-04-01 15:15:33 +1100898
899 if (size < VPA_MIN_SIZE) {
900 return H_PARAMETER;
901 }
902
903 /* VPA is not allowed to cross a page boundary */
904 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
905 return H_PARAMETER;
906 }
907
David Gibson7388efa2018-06-13 16:22:18 +1000908 spapr_cpu->vpa_addr = vpa;
David Gibsoned120052011-04-01 15:15:33 +1100909
David Gibson7388efa2018-06-13 16:22:18 +1000910 tmp = ldub_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET);
David Gibsoned120052011-04-01 15:15:33 +1100911 tmp |= VPA_SHARED_PROC_VAL;
David Gibson7388efa2018-06-13 16:22:18 +1000912 stb_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
David Gibsoned120052011-04-01 15:15:33 +1100913
914 return H_SUCCESS;
915}
916
David Gibson7388efa2018-06-13 16:22:18 +1000917static target_ulong deregister_vpa(PowerPCCPU *cpu, target_ulong vpa)
David Gibsoned120052011-04-01 15:15:33 +1100918{
David Gibsonce2918c2019-03-06 15:35:37 +1100919 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibson7388efa2018-06-13 16:22:18 +1000920
921 if (spapr_cpu->slb_shadow_addr) {
David Gibsoned120052011-04-01 15:15:33 +1100922 return H_RESOURCE;
923 }
924
David Gibson7388efa2018-06-13 16:22:18 +1000925 if (spapr_cpu->dtl_addr) {
David Gibsoned120052011-04-01 15:15:33 +1100926 return H_RESOURCE;
927 }
928
David Gibson7388efa2018-06-13 16:22:18 +1000929 spapr_cpu->vpa_addr = 0;
David Gibsoned120052011-04-01 15:15:33 +1100930 return H_SUCCESS;
931}
932
David Gibson7388efa2018-06-13 16:22:18 +1000933static target_ulong register_slb_shadow(PowerPCCPU *cpu, target_ulong addr)
David Gibsoned120052011-04-01 15:15:33 +1100934{
David Gibsonce2918c2019-03-06 15:35:37 +1100935 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibsoned120052011-04-01 15:15:33 +1100936 uint32_t size;
937
938 if (addr == 0) {
939 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
940 return H_HARDWARE;
941 }
942
David Gibson7388efa2018-06-13 16:22:18 +1000943 size = ldl_be_phys(CPU(cpu)->as, addr + 0x4);
David Gibsoned120052011-04-01 15:15:33 +1100944 if (size < 0x8) {
945 return H_PARAMETER;
946 }
947
948 if ((addr / 4096) != ((addr + size - 1) / 4096)) {
949 return H_PARAMETER;
950 }
951
David Gibson7388efa2018-06-13 16:22:18 +1000952 if (!spapr_cpu->vpa_addr) {
David Gibsoned120052011-04-01 15:15:33 +1100953 return H_RESOURCE;
954 }
955
David Gibson7388efa2018-06-13 16:22:18 +1000956 spapr_cpu->slb_shadow_addr = addr;
957 spapr_cpu->slb_shadow_size = size;
David Gibsoned120052011-04-01 15:15:33 +1100958
959 return H_SUCCESS;
960}
961
David Gibson7388efa2018-06-13 16:22:18 +1000962static target_ulong deregister_slb_shadow(PowerPCCPU *cpu, target_ulong addr)
David Gibsoned120052011-04-01 15:15:33 +1100963{
David Gibsonce2918c2019-03-06 15:35:37 +1100964 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibson7388efa2018-06-13 16:22:18 +1000965
966 spapr_cpu->slb_shadow_addr = 0;
967 spapr_cpu->slb_shadow_size = 0;
David Gibsoned120052011-04-01 15:15:33 +1100968 return H_SUCCESS;
969}
970
David Gibson7388efa2018-06-13 16:22:18 +1000971static target_ulong register_dtl(PowerPCCPU *cpu, target_ulong addr)
David Gibsoned120052011-04-01 15:15:33 +1100972{
David Gibsonce2918c2019-03-06 15:35:37 +1100973 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibsoned120052011-04-01 15:15:33 +1100974 uint32_t size;
975
976 if (addr == 0) {
977 hcall_dprintf("Can't cope with DTL at logical 0\n");
978 return H_HARDWARE;
979 }
980
David Gibson7388efa2018-06-13 16:22:18 +1000981 size = ldl_be_phys(CPU(cpu)->as, addr + 0x4);
David Gibsoned120052011-04-01 15:15:33 +1100982
983 if (size < 48) {
984 return H_PARAMETER;
985 }
986
David Gibson7388efa2018-06-13 16:22:18 +1000987 if (!spapr_cpu->vpa_addr) {
David Gibsoned120052011-04-01 15:15:33 +1100988 return H_RESOURCE;
989 }
990
David Gibson7388efa2018-06-13 16:22:18 +1000991 spapr_cpu->dtl_addr = addr;
992 spapr_cpu->dtl_size = size;
David Gibsoned120052011-04-01 15:15:33 +1100993
994 return H_SUCCESS;
995}
996
David Gibson7388efa2018-06-13 16:22:18 +1000997static target_ulong deregister_dtl(PowerPCCPU *cpu, target_ulong addr)
David Gibsoned120052011-04-01 15:15:33 +1100998{
David Gibsonce2918c2019-03-06 15:35:37 +1100999 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibson7388efa2018-06-13 16:22:18 +10001000
1001 spapr_cpu->dtl_addr = 0;
1002 spapr_cpu->dtl_size = 0;
David Gibsoned120052011-04-01 15:15:33 +11001003
1004 return H_SUCCESS;
1005}
1006
David Gibsonce2918c2019-03-06 15:35:37 +11001007static target_ulong h_register_vpa(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsoned120052011-04-01 15:15:33 +11001008 target_ulong opcode, target_ulong *args)
1009{
1010 target_ulong flags = args[0];
1011 target_ulong procno = args[1];
1012 target_ulong vpa = args[2];
1013 target_ulong ret = H_PARAMETER;
Alexey Kardashevskiy0f20ba62014-02-02 01:45:52 +11001014 PowerPCCPU *tcpu;
David Gibsoned120052011-04-01 15:15:33 +11001015
Sam Bobroff2e886fb2017-08-09 15:38:56 +10001016 tcpu = spapr_find_cpu(procno);
Andreas Färber5353d032013-02-15 16:43:08 +01001017 if (!tcpu) {
David Gibsoned120052011-04-01 15:15:33 +11001018 return H_PARAMETER;
1019 }
1020
1021 switch (flags) {
1022 case FLAGS_REGISTER_VPA:
David Gibson7388efa2018-06-13 16:22:18 +10001023 ret = register_vpa(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001024 break;
1025
1026 case FLAGS_DEREGISTER_VPA:
David Gibson7388efa2018-06-13 16:22:18 +10001027 ret = deregister_vpa(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001028 break;
1029
1030 case FLAGS_REGISTER_SLBSHADOW:
David Gibson7388efa2018-06-13 16:22:18 +10001031 ret = register_slb_shadow(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001032 break;
1033
1034 case FLAGS_DEREGISTER_SLBSHADOW:
David Gibson7388efa2018-06-13 16:22:18 +10001035 ret = deregister_slb_shadow(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001036 break;
1037
1038 case FLAGS_REGISTER_DTL:
David Gibson7388efa2018-06-13 16:22:18 +10001039 ret = register_dtl(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001040 break;
1041
1042 case FLAGS_DEREGISTER_DTL:
David Gibson7388efa2018-06-13 16:22:18 +10001043 ret = deregister_dtl(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001044 break;
1045 }
1046
1047 return ret;
1048}
1049
David Gibsonce2918c2019-03-06 15:35:37 +11001050static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsoned120052011-04-01 15:15:33 +11001051 target_ulong opcode, target_ulong *args)
1052{
Andreas Färberb13ce262012-05-03 06:23:01 +02001053 CPUPPCState *env = &cpu->env;
Andreas Färberfcd7d002012-12-17 08:02:44 +01001054 CPUState *cs = CPU(cpu);
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10001055 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
Andreas Färberb13ce262012-05-03 06:23:01 +02001056
David Gibsoned120052011-04-01 15:15:33 +11001057 env->msr |= (1ULL << MSR_EE);
1058 hreg_compute_hflags(env);
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10001059
1060 if (spapr_cpu->prod) {
1061 spapr_cpu->prod = false;
1062 return H_SUCCESS;
1063 }
1064
Andreas Färberfcd7d002012-12-17 08:02:44 +01001065 if (!cpu_has_work(cs)) {
Andreas Färber259186a2013-01-17 18:51:17 +01001066 cs->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +02001067 cs->exception_index = EXCP_HLT;
Andreas Färberfcd7d002012-12-17 08:02:44 +01001068 cs->exit_request = 1;
David Gibsoned120052011-04-01 15:15:33 +11001069 }
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10001070
1071 return H_SUCCESS;
1072}
1073
Nicholas Piggin10741312019-07-18 13:42:14 +10001074/*
1075 * Confer to self, aka join. Cede could use the same pattern as well, if
1076 * EXCP_HLT can be changed to ECXP_HALTED.
1077 */
1078static target_ulong h_confer_self(PowerPCCPU *cpu)
1079{
1080 CPUState *cs = CPU(cpu);
1081 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1082
1083 if (spapr_cpu->prod) {
1084 spapr_cpu->prod = false;
1085 return H_SUCCESS;
1086 }
1087 cs->halted = 1;
1088 cs->exception_index = EXCP_HALTED;
1089 cs->exit_request = 1;
1090
1091 return H_SUCCESS;
1092}
1093
1094static target_ulong h_join(PowerPCCPU *cpu, SpaprMachineState *spapr,
1095 target_ulong opcode, target_ulong *args)
1096{
1097 CPUPPCState *env = &cpu->env;
1098 CPUState *cs;
1099 bool last_unjoined = true;
1100
1101 if (env->msr & (1ULL << MSR_EE)) {
1102 return H_BAD_MODE;
1103 }
1104
1105 /*
1106 * Must not join the last CPU running. Interestingly, no such restriction
1107 * for H_CONFER-to-self, but that is probably not intended to be used
1108 * when H_JOIN is available.
1109 */
1110 CPU_FOREACH(cs) {
1111 PowerPCCPU *c = POWERPC_CPU(cs);
1112 CPUPPCState *e = &c->env;
1113 if (c == cpu) {
1114 continue;
1115 }
1116
1117 /* Don't have a way to indicate joined, so use halted && MSR[EE]=0 */
1118 if (!cs->halted || (e->msr & (1ULL << MSR_EE))) {
1119 last_unjoined = false;
1120 break;
1121 }
1122 }
1123 if (last_unjoined) {
1124 return H_CONTINUE;
1125 }
1126
1127 return h_confer_self(cpu);
1128}
1129
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10001130static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr,
1131 target_ulong opcode, target_ulong *args)
1132{
1133 target_long target = args[0];
1134 uint32_t dispatch = args[1];
1135 CPUState *cs = CPU(cpu);
1136 SpaprCpuState *spapr_cpu;
1137
1138 /*
1139 * -1 means confer to all other CPUs without dispatch counter check,
1140 * otherwise it's a targeted confer.
1141 */
1142 if (target != -1) {
1143 PowerPCCPU *target_cpu = spapr_find_cpu(target);
1144 uint32_t target_dispatch;
1145
1146 if (!target_cpu) {
1147 return H_PARAMETER;
1148 }
1149
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10001150 /*
1151 * target == self is a special case, we wait until prodded, without
1152 * dispatch counter check.
1153 */
1154 if (cpu == target_cpu) {
Nicholas Piggin10741312019-07-18 13:42:14 +10001155 return h_confer_self(cpu);
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10001156 }
1157
Nicholas Piggin10741312019-07-18 13:42:14 +10001158 spapr_cpu = spapr_cpu_state(target_cpu);
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10001159 if (!spapr_cpu->vpa_addr || ((dispatch & 1) == 0)) {
1160 return H_SUCCESS;
1161 }
1162
1163 target_dispatch = ldl_be_phys(cs->as,
1164 spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
1165 if (target_dispatch != dispatch) {
1166 return H_SUCCESS;
1167 }
1168
1169 /*
1170 * The targeted confer does not do anything special beyond yielding
1171 * the current vCPU, but even this should be better than nothing.
1172 * At least for single-threaded tcg, it gives the target a chance to
1173 * run before we run again. Multi-threaded tcg does not really do
1174 * anything with EXCP_YIELD yet.
1175 */
1176 }
1177
1178 cs->exception_index = EXCP_YIELD;
1179 cs->exit_request = 1;
1180 cpu_loop_exit(cs);
1181
1182 return H_SUCCESS;
1183}
1184
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10001185static target_ulong h_prod(PowerPCCPU *cpu, SpaprMachineState *spapr,
1186 target_ulong opcode, target_ulong *args)
1187{
1188 target_long target = args[0];
1189 PowerPCCPU *tcpu;
1190 CPUState *cs;
1191 SpaprCpuState *spapr_cpu;
1192
1193 tcpu = spapr_find_cpu(target);
1194 cs = CPU(tcpu);
1195 if (!cs) {
1196 return H_PARAMETER;
1197 }
1198
1199 spapr_cpu = spapr_cpu_state(tcpu);
1200 spapr_cpu->prod = true;
1201 cs->halted = 0;
1202 qemu_cpu_kick(cs);
1203
David Gibsoned120052011-04-01 15:15:33 +11001204 return H_SUCCESS;
1205}
1206
David Gibsonce2918c2019-03-06 15:35:37 +11001207static target_ulong h_rtas(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson39ac8452011-04-01 15:15:23 +11001208 target_ulong opcode, target_ulong *args)
1209{
1210 target_ulong rtas_r3 = args[0];
Alexey Kardashevskiy4fe822e2013-09-27 18:10:18 +10001211 uint32_t token = rtas_ld(rtas_r3, 0);
1212 uint32_t nargs = rtas_ld(rtas_r3, 1);
1213 uint32_t nret = rtas_ld(rtas_r3, 2);
David Gibson39ac8452011-04-01 15:15:23 +11001214
Anthony Liguori210b5802013-06-19 15:40:30 -05001215 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
David Gibson39ac8452011-04-01 15:15:23 +11001216 nret, rtas_r3 + 12 + 4*nargs);
1217}
1218
David Gibsonce2918c2019-03-06 15:35:37 +11001219static target_ulong h_logical_load(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson827200a2011-08-10 14:44:20 +00001220 target_ulong opcode, target_ulong *args)
1221{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001222 CPUState *cs = CPU(cpu);
David Gibson827200a2011-08-10 14:44:20 +00001223 target_ulong size = args[0];
1224 target_ulong addr = args[1];
1225
1226 switch (size) {
1227 case 1:
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001228 args[0] = ldub_phys(cs->as, addr);
David Gibson827200a2011-08-10 14:44:20 +00001229 return H_SUCCESS;
1230 case 2:
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001231 args[0] = lduw_phys(cs->as, addr);
David Gibson827200a2011-08-10 14:44:20 +00001232 return H_SUCCESS;
1233 case 4:
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001234 args[0] = ldl_phys(cs->as, addr);
David Gibson827200a2011-08-10 14:44:20 +00001235 return H_SUCCESS;
1236 case 8:
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001237 args[0] = ldq_phys(cs->as, addr);
David Gibson827200a2011-08-10 14:44:20 +00001238 return H_SUCCESS;
1239 }
1240 return H_PARAMETER;
1241}
1242
David Gibsonce2918c2019-03-06 15:35:37 +11001243static target_ulong h_logical_store(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson827200a2011-08-10 14:44:20 +00001244 target_ulong opcode, target_ulong *args)
1245{
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01001246 CPUState *cs = CPU(cpu);
1247
David Gibson827200a2011-08-10 14:44:20 +00001248 target_ulong size = args[0];
1249 target_ulong addr = args[1];
1250 target_ulong val = args[2];
1251
1252 switch (size) {
1253 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001254 stb_phys(cs->as, addr, val);
David Gibson827200a2011-08-10 14:44:20 +00001255 return H_SUCCESS;
1256 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001257 stw_phys(cs->as, addr, val);
David Gibson827200a2011-08-10 14:44:20 +00001258 return H_SUCCESS;
1259 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001260 stl_phys(cs->as, addr, val);
David Gibson827200a2011-08-10 14:44:20 +00001261 return H_SUCCESS;
1262 case 8:
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01001263 stq_phys(cs->as, addr, val);
David Gibson827200a2011-08-10 14:44:20 +00001264 return H_SUCCESS;
1265 }
1266 return H_PARAMETER;
1267}
1268
David Gibsonce2918c2019-03-06 15:35:37 +11001269static target_ulong h_logical_memop(PowerPCCPU *cpu, SpaprMachineState *spapr,
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001270 target_ulong opcode, target_ulong *args)
1271{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001272 CPUState *cs = CPU(cpu);
1273
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001274 target_ulong dst = args[0]; /* Destination address */
1275 target_ulong src = args[1]; /* Source address */
1276 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
1277 target_ulong count = args[3]; /* Element count */
1278 target_ulong op = args[4]; /* 0 = copy, 1 = invert */
1279 uint64_t tmp;
1280 unsigned int mask = (1 << esize) - 1;
1281 int step = 1 << esize;
1282
1283 if (count > 0x80000000) {
1284 return H_PARAMETER;
1285 }
1286
1287 if ((dst & mask) || (src & mask) || (op > 1)) {
1288 return H_PARAMETER;
1289 }
1290
1291 if (dst >= src && dst < (src + (count << esize))) {
1292 dst = dst + ((count - 1) << esize);
1293 src = src + ((count - 1) << esize);
1294 step = -step;
1295 }
1296
1297 while (count--) {
1298 switch (esize) {
1299 case 0:
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001300 tmp = ldub_phys(cs->as, src);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001301 break;
1302 case 1:
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001303 tmp = lduw_phys(cs->as, src);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001304 break;
1305 case 2:
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001306 tmp = ldl_phys(cs->as, src);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001307 break;
1308 case 3:
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001309 tmp = ldq_phys(cs->as, src);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001310 break;
1311 default:
1312 return H_PARAMETER;
1313 }
1314 if (op == 1) {
1315 tmp = ~tmp;
1316 }
1317 switch (esize) {
1318 case 0:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001319 stb_phys(cs->as, dst, tmp);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001320 break;
1321 case 1:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001322 stw_phys(cs->as, dst, tmp);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001323 break;
1324 case 2:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001325 stl_phys(cs->as, dst, tmp);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001326 break;
1327 case 3:
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01001328 stq_phys(cs->as, dst, tmp);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001329 break;
1330 }
1331 dst = dst + step;
1332 src = src + step;
1333 }
1334
1335 return H_SUCCESS;
1336}
1337
David Gibsonce2918c2019-03-06 15:35:37 +11001338static target_ulong h_logical_icbi(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson827200a2011-08-10 14:44:20 +00001339 target_ulong opcode, target_ulong *args)
1340{
1341 /* Nothing to do on emulation, KVM will trap this in the kernel */
1342 return H_SUCCESS;
1343}
1344
David Gibsonce2918c2019-03-06 15:35:37 +11001345static target_ulong h_logical_dcbf(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson827200a2011-08-10 14:44:20 +00001346 target_ulong opcode, target_ulong *args)
1347{
1348 /* Nothing to do on emulation, KVM will trap this in the kernel */
1349 return H_SUCCESS;
1350}
1351
Peter Maydell7d0cd462014-07-08 16:02:26 +01001352static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
Greg Kurzc4c81d72020-12-09 18:00:49 +01001353 SpaprMachineState *spapr,
Peter Maydell7d0cd462014-07-08 16:02:26 +01001354 target_ulong mflags,
1355 target_ulong value1,
1356 target_ulong value2)
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001357{
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001358 if (value1) {
1359 return H_P3;
1360 }
1361 if (value2) {
1362 return H_P4;
1363 }
1364
1365 switch (mflags) {
1366 case H_SET_MODE_ENDIAN_BIG:
Benjamin Herrenschmidt00fd0752019-02-15 18:00:18 +01001367 spapr_set_all_lpcrs(0, LPCR_ILE);
Greg Kurzc4c81d72020-12-09 18:00:49 +01001368 spapr_pci_switch_vga(spapr, true);
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001369 return H_SUCCESS;
1370
1371 case H_SET_MODE_ENDIAN_LITTLE:
Benjamin Herrenschmidt00fd0752019-02-15 18:00:18 +01001372 spapr_set_all_lpcrs(LPCR_ILE, LPCR_ILE);
Greg Kurzc4c81d72020-12-09 18:00:49 +01001373 spapr_pci_switch_vga(spapr, false);
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001374 return H_SUCCESS;
1375 }
1376
1377 return H_UNSUPPORTED_FLAG;
1378}
1379
Peter Maydell7d0cd462014-07-08 16:02:26 +01001380static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
1381 target_ulong mflags,
1382 target_ulong value1,
1383 target_ulong value2)
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001384{
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001385 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001386
1387 if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
1388 return H_P2;
1389 }
1390 if (value1) {
1391 return H_P3;
1392 }
1393 if (value2) {
1394 return H_P4;
1395 }
1396
Nicholas Piggin8b7e6b02021-05-01 17:24:34 +10001397 if (mflags == 1) {
Nicholas Piggin526cdce2021-05-01 17:24:35 +10001398 /* AIL=1 is reserved in POWER8/POWER9/POWER10 */
1399 return H_UNSUPPORTED_FLAG;
1400 }
1401
1402 if (mflags == 2 && (pcc->insns_flags2 & PPC2_ISA310)) {
1403 /* AIL=2 is reserved in POWER10 (ISA v3.1) */
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001404 return H_UNSUPPORTED_FLAG;
1405 }
1406
Benjamin Herrenschmidt00fd0752019-02-15 18:00:18 +01001407 spapr_set_all_lpcrs(mflags << LPCR_AIL_SHIFT, LPCR_AIL);
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001408
1409 return H_SUCCESS;
1410}
1411
David Gibsonce2918c2019-03-06 15:35:37 +11001412static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr,
Anton Blanchard42561bf2013-08-19 21:04:20 +10001413 target_ulong opcode, target_ulong *args)
1414{
Anton Blanchard42561bf2013-08-19 21:04:20 +10001415 target_ulong resource = args[1];
Anton Blanchard42561bf2013-08-19 21:04:20 +10001416 target_ulong ret = H_P2;
1417
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001418 switch (resource) {
1419 case H_SET_MODE_RESOURCE_LE:
Greg Kurzc4c81d72020-12-09 18:00:49 +01001420 ret = h_set_mode_resource_le(cpu, spapr, args[0], args[2], args[3]);
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001421 break;
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001422 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
Peter Maydell7d0cd462014-07-08 16:02:26 +01001423 ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
1424 args[2], args[3]);
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001425 break;
Anton Blanchard42561bf2013-08-19 21:04:20 +10001426 }
1427
Anton Blanchard42561bf2013-08-19 21:04:20 +10001428 return ret;
1429}
1430
David Gibsonce2918c2019-03-06 15:35:37 +11001431static target_ulong h_clean_slb(PowerPCCPU *cpu, SpaprMachineState *spapr,
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001432 target_ulong opcode, target_ulong *args)
1433{
1434 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
1435 opcode, " (H_CLEAN_SLB)");
1436 return H_FUNCTION;
1437}
1438
David Gibsonce2918c2019-03-06 15:35:37 +11001439static target_ulong h_invalidate_pid(PowerPCCPU *cpu, SpaprMachineState *spapr,
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001440 target_ulong opcode, target_ulong *args)
1441{
1442 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
1443 opcode, " (H_INVALIDATE_PID)");
1444 return H_FUNCTION;
1445}
1446
David Gibsonce2918c2019-03-06 15:35:37 +11001447static void spapr_check_setup_free_hpt(SpaprMachineState *spapr,
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001448 uint64_t patbe_old, uint64_t patbe_new)
1449{
1450 /*
1451 * We have 4 Options:
1452 * HASH->HASH || RADIX->RADIX || NOTHING->RADIX : Do Nothing
1453 * HASH->RADIX : Free HPT
1454 * RADIX->HASH : Allocate HPT
1455 * NOTHING->HASH : Allocate HPT
1456 * Note: NOTHING implies the case where we said the guest could choose
1457 * later and so assumed radix and now it's called H_REG_PROC_TBL
1458 */
1459
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001460 if ((patbe_old & PATE1_GR) == (patbe_new & PATE1_GR)) {
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001461 /* We assume RADIX, so this catches all the "Do Nothing" cases */
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001462 } else if (!(patbe_old & PATE1_GR)) {
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001463 /* HASH->RADIX : Free HPT */
Bharata B Rao06ec79e2017-05-17 09:19:20 +05301464 spapr_free_hpt(spapr);
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001465 } else if (!(patbe_new & PATE1_GR)) {
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001466 /* RADIX->HASH || NOTHING->HASH : Allocate HPT */
David Gibson8897ea52019-11-28 16:37:04 +11001467 spapr_setup_hpt(spapr);
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001468 }
1469 return;
1470}
1471
1472#define FLAGS_MASK 0x01FULL
1473#define FLAG_MODIFY 0x10
1474#define FLAG_REGISTER 0x08
1475#define FLAG_RADIX 0x04
1476#define FLAG_HASH_PROC_TBL 0x02
1477#define FLAG_GTSE 0x01
1478
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001479static target_ulong h_register_process_table(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +11001480 SpaprMachineState *spapr,
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001481 target_ulong opcode,
1482 target_ulong *args)
1483{
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001484 target_ulong flags = args[0];
1485 target_ulong proc_tbl = args[1];
1486 target_ulong page_size = args[2];
1487 target_ulong table_size = args[3];
Suraj Jitindar Singh176dcce2019-03-05 13:21:02 +11001488 target_ulong update_lpcr = 0;
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001489 uint64_t cproc;
1490
1491 if (flags & ~FLAGS_MASK) { /* Check no reserved bits are set */
1492 return H_PARAMETER;
1493 }
1494 if (flags & FLAG_MODIFY) {
1495 if (flags & FLAG_REGISTER) {
1496 if (flags & FLAG_RADIX) { /* Register new RADIX process table */
1497 if (proc_tbl & 0xfff || proc_tbl >> 60) {
1498 return H_P2;
1499 } else if (page_size) {
1500 return H_P3;
1501 } else if (table_size > 24) {
1502 return H_P4;
1503 }
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001504 cproc = PATE1_GR | proc_tbl | table_size;
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001505 } else { /* Register new HPT process table */
1506 if (flags & FLAG_HASH_PROC_TBL) { /* Hash with Segment Tables */
1507 /* TODO - Not Supported */
1508 /* Technically caused by flag bits => H_PARAMETER */
1509 return H_PARAMETER;
1510 } else { /* Hash with SLB */
1511 if (proc_tbl >> 38) {
1512 return H_P2;
1513 } else if (page_size & ~0x7) {
1514 return H_P3;
1515 } else if (table_size > 24) {
1516 return H_P4;
1517 }
1518 }
1519 cproc = (proc_tbl << 25) | page_size << 5 | table_size;
1520 }
1521
1522 } else { /* Deregister current process table */
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001523 /*
1524 * Set to benign value: (current GR) | 0. This allows
1525 * deregistration in KVM to succeed even if the radix bit
1526 * in flags doesn't match the radix bit in the old PATE.
1527 */
1528 cproc = spapr->patb_entry & PATE1_GR;
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001529 }
1530 } else { /* Maintain current registration */
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001531 if (!(flags & FLAG_RADIX) != !(spapr->patb_entry & PATE1_GR)) {
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001532 /* Technically caused by flag bits => H_PARAMETER */
1533 return H_PARAMETER; /* Existing Process Table Mismatch */
1534 }
1535 cproc = spapr->patb_entry;
1536 }
1537
1538 /* Check if we need to setup OR free the hpt */
1539 spapr_check_setup_free_hpt(spapr, spapr->patb_entry, cproc);
1540
1541 spapr->patb_entry = cproc; /* Save new process table */
Suraj Jitindar Singh6de83302017-05-02 16:37:14 +10001542
Benjamin Herrenschmidt00fd0752019-02-15 18:00:18 +01001543 /* Update the UPRT, HR and GTSE bits in the LPCR for all cpus */
Suraj Jitindar Singh176dcce2019-03-05 13:21:02 +11001544 if (flags & FLAG_RADIX) /* Radix must use process tables, also set HR */
1545 update_lpcr |= (LPCR_UPRT | LPCR_HR);
1546 else if (flags & FLAG_HASH_PROC_TBL) /* Hash with process tables */
1547 update_lpcr |= LPCR_UPRT;
1548 if (flags & FLAG_GTSE) /* Guest translation shootdown enable */
David Gibson49e9fdd2019-03-13 14:17:27 +11001549 update_lpcr |= LPCR_GTSE;
1550
Suraj Jitindar Singh176dcce2019-03-05 13:21:02 +11001551 spapr_set_all_lpcrs(update_lpcr, LPCR_UPRT | LPCR_HR | LPCR_GTSE);
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001552
1553 if (kvm_enabled()) {
1554 return kvmppc_configure_v3_mmu(cpu, flags & FLAG_RADIX,
1555 flags & FLAG_GTSE, cproc);
1556 }
1557 return H_SUCCESS;
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001558}
1559
Nicholas Piggin1c7ad772016-12-05 16:50:21 +11001560#define H_SIGNAL_SYS_RESET_ALL -1
1561#define H_SIGNAL_SYS_RESET_ALLBUTSELF -2
1562
1563static target_ulong h_signal_sys_reset(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +11001564 SpaprMachineState *spapr,
Nicholas Piggin1c7ad772016-12-05 16:50:21 +11001565 target_ulong opcode, target_ulong *args)
1566{
1567 target_long target = args[0];
1568 CPUState *cs;
1569
1570 if (target < 0) {
1571 /* Broadcast */
1572 if (target < H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1573 return H_PARAMETER;
1574 }
1575
1576 CPU_FOREACH(cs) {
1577 PowerPCCPU *c = POWERPC_CPU(cs);
1578
1579 if (target == H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1580 if (c == cpu) {
1581 continue;
1582 }
1583 }
1584 run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1585 }
1586 return H_SUCCESS;
1587
1588 } else {
1589 /* Unicast */
Sam Bobroff2e886fb2017-08-09 15:38:56 +10001590 cs = CPU(spapr_find_cpu(target));
Sam Bobrofff57467e2017-08-03 16:28:27 +10001591 if (cs) {
1592 run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1593 return H_SUCCESS;
Nicholas Piggin1c7ad772016-12-05 16:50:21 +11001594 }
1595 return H_PARAMETER;
1596 }
1597}
1598
Greg Kurz121afbe2020-09-14 14:34:55 +02001599/* Returns either a logical PVR or zero if none was found */
1600static uint32_t cas_check_pvr(PowerPCCPU *cpu, uint32_t max_compat,
1601 target_ulong *addr, bool *raw_mode_supported)
Alexey Kardashevskiy2a6593c2014-05-23 12:26:54 +10001602{
David Gibson152ef802016-11-16 13:54:48 +11001603 bool explicit_match = false; /* Matched the CPU's real PVR */
David Gibson152ef802016-11-16 13:54:48 +11001604 uint32_t best_compat = 0;
1605 int i;
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001606
David Gibson152ef802016-11-16 13:54:48 +11001607 /*
1608 * We scan the supplied table of PVRs looking for two things
1609 * 1. Is our real CPU PVR in the list?
1610 * 2. What's the "best" listed logical PVR
1611 */
1612 for (i = 0; i < 512; ++i) {
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001613 uint32_t pvr, pvr_mask;
1614
David Gibson80c33d32017-05-18 14:47:44 +10001615 pvr_mask = ldl_be_phys(&address_space_memory, *addr);
1616 pvr = ldl_be_phys(&address_space_memory, *addr + 4);
1617 *addr += 8;
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001618
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001619 if (~pvr_mask & pvr) {
David Gibson152ef802016-11-16 13:54:48 +11001620 break; /* Terminator record */
1621 }
1622
1623 if ((cpu->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask)) {
1624 explicit_match = true;
1625 } else {
1626 if (ppc_check_compat(cpu, pvr, best_compat, max_compat)) {
1627 best_compat = pvr;
1628 }
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001629 }
1630 }
1631
Greg Kurzcc7b35b2017-08-17 13:23:50 +02001632 *raw_mode_supported = explicit_match;
1633
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001634 /* Parsing finished */
David Gibson152ef802016-11-16 13:54:48 +11001635 trace_spapr_cas_pvr(cpu->compat_pvr, explicit_match, best_compat);
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001636
David Gibson80c33d32017-05-18 14:47:44 +10001637 return best_compat;
1638}
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001639
Daniel Henrique Barbozaeb72b632021-01-14 15:06:23 -03001640static
Alexey Kardashevskiy91067db2020-03-25 16:25:42 +01001641target_ulong do_client_architecture_support(PowerPCCPU *cpu,
1642 SpaprMachineState *spapr,
1643 target_ulong vec,
1644 target_ulong fdt_bufsize)
David Gibson80c33d32017-05-18 14:47:44 +10001645{
Alexey Kardashevskiy91067db2020-03-25 16:25:42 +01001646 target_ulong ov_table; /* Working address in data buffer */
David Gibson80c33d32017-05-18 14:47:44 +10001647 uint32_t cas_pvr;
Greg Kurz86962462020-03-25 16:25:30 +01001648 SpaprOptionVector *ov1_guest, *ov5_guest;
David Gibson80c33d32017-05-18 14:47:44 +10001649 bool guest_radix;
Greg Kurzcc7b35b2017-08-17 13:23:50 +02001650 bool raw_mode_supported = false;
Greg Kurze7f78db2019-05-15 19:04:24 +02001651 bool guest_xive;
Greg Kurz12b38682020-01-22 14:11:12 +01001652 CPUState *cs;
Greg Kurz087820e2020-03-25 16:25:49 +01001653 void *fdt;
Greg Kurz121afbe2020-09-14 14:34:55 +02001654 uint32_t max_compat = spapr->max_compat_pvr;
Greg Kurz12b38682020-01-22 14:11:12 +01001655
1656 /* CAS is supposed to be called early when only the boot vCPU is active. */
1657 CPU_FOREACH(cs) {
1658 if (cs == CPU(cpu)) {
1659 continue;
1660 }
1661 if (!cs->halted) {
1662 warn_report("guest has multiple active vCPUs at CAS, which is not allowed");
1663 return H_MULTI_THREADS_ACTIVE;
1664 }
1665 }
David Gibson80c33d32017-05-18 14:47:44 +10001666
Greg Kurz121afbe2020-09-14 14:34:55 +02001667 cas_pvr = cas_check_pvr(cpu, max_compat, &vec, &raw_mode_supported);
1668 if (!cas_pvr && (!raw_mode_supported || max_compat)) {
1669 /*
1670 * We couldn't find a suitable compatibility mode, and either
1671 * the guest doesn't support "raw" mode for this CPU, or "raw"
1672 * mode is disabled because a maximum compat mode is set.
1673 */
1674 error_report("Couldn't negotiate a suitable PVR during CAS");
David Gibson80c33d32017-05-18 14:47:44 +10001675 return H_HARDWARE;
1676 }
1677
1678 /* Update CPUs */
1679 if (cpu->compat_pvr != cas_pvr) {
Greg Kurz7e92da82020-09-14 14:34:56 +02001680 Error *local_err = NULL;
1681
1682 if (ppc_set_compat_all(cas_pvr, &local_err) < 0) {
Greg Kurzcc7b35b2017-08-17 13:23:50 +02001683 /* We fail to set compat mode (likely because running with KVM PR),
1684 * but maybe we can fallback to raw mode if the guest supports it.
1685 */
1686 if (!raw_mode_supported) {
1687 error_report_err(local_err);
1688 return H_HARDWARE;
1689 }
Greg Kurz2c9dfda2018-06-12 19:01:26 +02001690 error_free(local_err);
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001691 }
1692 }
1693
Bharata B Rao03d196b2015-07-13 10:34:00 +10001694 /* For the future use: here @ov_table points to the first option vector */
Alexey Kardashevskiy91067db2020-03-25 16:25:42 +01001695 ov_table = vec;
Bharata B Rao03d196b2015-07-13 10:34:00 +10001696
Sam Bobroffe957f6a2017-03-20 10:46:49 +11001697 ov1_guest = spapr_ovec_parse_vector(ov_table, 1);
Greg Kurzcbd0d7f2020-01-17 10:15:52 +01001698 if (!ov1_guest) {
1699 warn_report("guest didn't provide option vector 1");
1700 return H_PARAMETER;
1701 }
Michael Rothfacdb8b2016-10-24 23:47:28 -05001702 ov5_guest = spapr_ovec_parse_vector(ov_table, 5);
Greg Kurzcbd0d7f2020-01-17 10:15:52 +01001703 if (!ov5_guest) {
Greg Kurzce05fa02020-03-21 18:34:22 +01001704 spapr_ovec_cleanup(ov1_guest);
Greg Kurzcbd0d7f2020-01-17 10:15:52 +01001705 warn_report("guest didn't provide option vector 5");
1706 return H_PARAMETER;
1707 }
Sam Bobroff9fb45412017-03-23 14:46:00 +11001708 if (spapr_ovec_test(ov5_guest, OV5_MMU_BOTH)) {
1709 error_report("guest requested hash and radix MMU, which is invalid.");
1710 exit(EXIT_FAILURE);
1711 }
Greg Kurze7f78db2019-05-15 19:04:24 +02001712 if (spapr_ovec_test(ov5_guest, OV5_XIVE_BOTH)) {
1713 error_report("guest requested an invalid interrupt mode");
1714 exit(EXIT_FAILURE);
1715 }
1716
Sam Bobroff9fb45412017-03-23 14:46:00 +11001717 guest_radix = spapr_ovec_test(ov5_guest, OV5_MMU_RADIX_300);
Alexey Kardashevskiy2a6593c2014-05-23 12:26:54 +10001718
Greg Kurze7f78db2019-05-15 19:04:24 +02001719 guest_xive = spapr_ovec_test(ov5_guest, OV5_XIVE_EXPLOIT);
1720
David Gibson2772cf62017-07-12 17:56:06 +10001721 /*
1722 * HPT resizing is a bit of a special case, because when enabled
1723 * we assume an HPT guest will support it until it says it
1724 * doesn't, instead of assuming it won't support it until it says
1725 * it does. Strictly speaking that approach could break for
1726 * guests which don't make a CAS call, but those are so old we
1727 * don't care about them. Without that assumption we'd have to
1728 * make at least a temporary allocation of an HPT sized for max
1729 * memory, which could be impossibly difficult under KVM HV if
1730 * maxram is large.
1731 */
1732 if (!guest_radix && !spapr_ovec_test(ov5_guest, OV5_HPT_RESIZE)) {
1733 int maxshift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1734
1735 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_REQUIRED) {
1736 error_report(
1737 "h_client_architecture_support: Guest doesn't support HPT resizing, but resize-hpt=required");
1738 exit(1);
1739 }
1740
1741 if (spapr->htab_shift < maxshift) {
1742 /* Guest doesn't know about HPT resizing, so we
1743 * pre-emptively resize for the maximum permitted RAM. At
1744 * the point this is called, nothing should have been
1745 * entered into the existing HPT */
1746 spapr_reallocate_hpt(spapr, maxshift, &error_fatal);
Greg Kurz1ec26c72017-09-25 13:00:02 +02001747 push_sregs_to_kvm_pr(spapr);
David Gibson2772cf62017-07-12 17:56:06 +10001748 }
1749 }
1750
Michael Rothfacdb8b2016-10-24 23:47:28 -05001751 /* NOTE: there are actually a number of ov5 bits where input from the
1752 * guest is always zero, and the platform/QEMU enables them independently
1753 * of guest input. To model these properly we'd want some sort of mask,
1754 * but since they only currently apply to memory migration as defined
1755 * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need
Michael Roth6787d272016-10-24 23:47:29 -05001756 * to worry about this for now.
Michael Rothfacdb8b2016-10-24 23:47:28 -05001757 */
Cédric Le Goater30bf9ed2017-09-08 16:33:43 +02001758
Michael Roth6787d272016-10-24 23:47:29 -05001759 /* full range of negotiated ov5 capabilities */
Michael Rothfacdb8b2016-10-24 23:47:28 -05001760 spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest);
1761 spapr_ovec_cleanup(ov5_guest);
Greg Kurzb4b83312020-03-25 16:25:36 +01001762
Sam Bobroff9fb45412017-03-23 14:46:00 +11001763 if (guest_radix) {
1764 if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
1765 error_report("Guest requested unavailable MMU mode (radix).");
1766 exit(EXIT_FAILURE);
1767 }
Sam Bobroff9fb45412017-03-23 14:46:00 +11001768 } else {
1769 if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
1770 && !kvmppc_has_cap_mmu_hash_v3()) {
1771 error_report("Guest requested unavailable MMU mode (hash).");
1772 exit(EXIT_FAILURE);
1773 }
1774 }
David Gibsondaa36372019-08-28 13:59:27 +10001775 spapr->cas_pre_isa3_guest = !spapr_ovec_test(ov1_guest, OV1_PPC_3_00);
Shivaprasad G Bhat00005f22019-07-17 03:20:31 -05001776 spapr_ovec_cleanup(ov1_guest);
Cédric Le Goater13db0cd2019-01-02 06:57:42 +01001777
1778 /*
David Gibson8deb8012019-10-18 15:19:31 +11001779 * Ensure the guest asks for an interrupt mode we support;
1780 * otherwise terminate the boot.
Greg Kurze7f78db2019-05-15 19:04:24 +02001781 */
1782 if (guest_xive) {
David Gibsonca628232019-09-25 15:12:07 +10001783 if (!spapr->irq->xive) {
Greg Kurz75de5942019-05-16 09:36:57 +02001784 error_report(
1785"Guest requested unavailable interrupt mode (XIVE), try the ic-mode=xive or ic-mode=dual machine property");
Greg Kurze7f78db2019-05-15 19:04:24 +02001786 exit(EXIT_FAILURE);
1787 }
1788 } else {
David Gibsonca628232019-09-25 15:12:07 +10001789 if (!spapr->irq->xics) {
Greg Kurz75de5942019-05-16 09:36:57 +02001790 error_report(
1791"Guest requested unavailable interrupt mode (XICS), either don't set the ic-mode machine property or try ic-mode=xics or ic-mode=dual");
Greg Kurze7f78db2019-05-15 19:04:24 +02001792 exit(EXIT_FAILURE);
1793 }
1794 }
1795
David Gibson8deb8012019-10-18 15:19:31 +11001796 spapr_irq_update_active_intc(spapr);
1797
Greg Kurzbabb8192020-12-18 11:33:57 +01001798 /*
1799 * Process all pending hot-plug/unplug requests now. An updated full
1800 * rendered FDT will be returned to the guest.
1801 */
1802 spapr_drc_reset_all(spapr);
1803 spapr_clear_pending_hotplug_events(spapr);
David Gibson0c21e072019-11-29 15:00:58 +11001804
Greg Kurz087820e2020-03-25 16:25:49 +01001805 /*
1806 * If spapr_machine_reset() did not set up a HPT but one is necessary
1807 * (because the guest isn't going to use radix) then set it up here.
1808 */
1809 if ((spapr->patb_entry & PATE1_GR) && !guest_radix) {
1810 /* legacy hash or new hash: */
1811 spapr_setup_hpt(spapr);
Cédric Le Goater13db0cd2019-01-02 06:57:42 +01001812 }
1813
Greg Kurz087820e2020-03-25 16:25:49 +01001814 fdt = spapr_build_fdt(spapr, false, fdt_bufsize);
1815
1816 g_free(spapr->fdt_blob);
1817 spapr->fdt_size = fdt_totalsize(fdt);
1818 spapr->fdt_initial_size = spapr->fdt_size;
1819 spapr->fdt_blob = fdt;
Alexey Kardashevskiy2a6593c2014-05-23 12:26:54 +10001820
1821 return H_SUCCESS;
1822}
1823
Alexey Kardashevskiy91067db2020-03-25 16:25:42 +01001824static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
1825 SpaprMachineState *spapr,
1826 target_ulong opcode,
1827 target_ulong *args)
1828{
1829 target_ulong vec = ppc64_phys_to_real(args[0]);
1830 target_ulong fdt_buf = args[1];
1831 target_ulong fdt_bufsize = args[2];
1832 target_ulong ret;
1833 SpaprDeviceTreeUpdateHeader hdr = { .version_id = 1 };
1834
1835 if (fdt_bufsize < sizeof(hdr)) {
1836 error_report("SLOF provided insufficient CAS buffer "
1837 TARGET_FMT_lu " (min: %zu)", fdt_bufsize, sizeof(hdr));
1838 exit(EXIT_FAILURE);
1839 }
1840
1841 fdt_bufsize -= sizeof(hdr);
1842
1843 ret = do_client_architecture_support(cpu, spapr, vec, fdt_bufsize);
1844 if (ret == H_SUCCESS) {
1845 _FDT((fdt_pack(spapr->fdt_blob)));
1846 spapr->fdt_size = fdt_totalsize(spapr->fdt_blob);
1847 spapr->fdt_initial_size = spapr->fdt_size;
1848
1849 cpu_physical_memory_write(fdt_buf, &hdr, sizeof(hdr));
1850 cpu_physical_memory_write(fdt_buf + sizeof(hdr), spapr->fdt_blob,
1851 spapr->fdt_size);
1852 trace_spapr_cas_continue(spapr->fdt_size + sizeof(hdr));
1853 }
1854
1855 return ret;
1856}
1857
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001858static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +11001859 SpaprMachineState *spapr,
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001860 target_ulong opcode,
1861 target_ulong *args)
1862{
1863 uint64_t characteristics = H_CPU_CHAR_HON_BRANCH_HINTS &
1864 ~H_CPU_CHAR_THR_RECONF_TRIG;
1865 uint64_t behaviour = H_CPU_BEHAV_FAVOUR_SECURITY;
1866 uint8_t safe_cache = spapr_get_cap(spapr, SPAPR_CAP_CFPC);
1867 uint8_t safe_bounds_check = spapr_get_cap(spapr, SPAPR_CAP_SBBC);
1868 uint8_t safe_indirect_branch = spapr_get_cap(spapr, SPAPR_CAP_IBS);
Suraj Jitindar Singh8ff43ee2019-03-01 14:19:12 +11001869 uint8_t count_cache_flush_assist = spapr_get_cap(spapr,
1870 SPAPR_CAP_CCF_ASSIST);
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001871
1872 switch (safe_cache) {
1873 case SPAPR_CAP_WORKAROUND:
1874 characteristics |= H_CPU_CHAR_L1D_FLUSH_ORI30;
1875 characteristics |= H_CPU_CHAR_L1D_FLUSH_TRIG2;
1876 characteristics |= H_CPU_CHAR_L1D_THREAD_PRIV;
1877 behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR;
1878 break;
1879 case SPAPR_CAP_FIXED:
1880 break;
1881 default: /* broken */
1882 assert(safe_cache == SPAPR_CAP_BROKEN);
1883 behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR;
1884 break;
1885 }
1886
1887 switch (safe_bounds_check) {
1888 case SPAPR_CAP_WORKAROUND:
1889 characteristics |= H_CPU_CHAR_SPEC_BAR_ORI31;
1890 behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1891 break;
1892 case SPAPR_CAP_FIXED:
1893 break;
1894 default: /* broken */
1895 assert(safe_bounds_check == SPAPR_CAP_BROKEN);
1896 behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1897 break;
1898 }
1899
1900 switch (safe_indirect_branch) {
Suraj Jitindar Singh399b2892019-03-01 14:19:11 +11001901 case SPAPR_CAP_FIXED_NA:
1902 break;
Suraj Jitindar Singhc76c0d32018-03-01 17:38:02 +11001903 case SPAPR_CAP_FIXED_CCD:
1904 characteristics |= H_CPU_CHAR_CACHE_COUNT_DIS;
1905 break;
1906 case SPAPR_CAP_FIXED_IBS:
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001907 characteristics |= H_CPU_CHAR_BCCTRL_SERIALISED;
Greg Kurzfa86f592018-02-01 20:47:41 +01001908 break;
Suraj Jitindar Singh399b2892019-03-01 14:19:11 +11001909 case SPAPR_CAP_WORKAROUND:
1910 behaviour |= H_CPU_BEHAV_FLUSH_COUNT_CACHE;
Suraj Jitindar Singh8ff43ee2019-03-01 14:19:12 +11001911 if (count_cache_flush_assist) {
1912 characteristics |= H_CPU_CHAR_BCCTR_FLUSH_ASSIST;
1913 }
Suraj Jitindar Singh399b2892019-03-01 14:19:11 +11001914 break;
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001915 default: /* broken */
1916 assert(safe_indirect_branch == SPAPR_CAP_BROKEN);
1917 break;
1918 }
1919
1920 args[0] = characteristics;
1921 args[1] = behaviour;
Alexey Kardashevskiyfea35ca2018-12-21 01:34:48 +01001922 return H_SUCCESS;
1923}
1924
David Gibsonce2918c2019-03-06 15:35:37 +11001925static target_ulong h_update_dt(PowerPCCPU *cpu, SpaprMachineState *spapr,
Alexey Kardashevskiyfea35ca2018-12-21 01:34:48 +01001926 target_ulong opcode, target_ulong *args)
1927{
1928 target_ulong dt = ppc64_phys_to_real(args[0]);
1929 struct fdt_header hdr = { 0 };
1930 unsigned cb;
David Gibsonce2918c2019-03-06 15:35:37 +11001931 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
Alexey Kardashevskiyfea35ca2018-12-21 01:34:48 +01001932 void *fdt;
1933
1934 cpu_physical_memory_read(dt, &hdr, sizeof(hdr));
1935 cb = fdt32_to_cpu(hdr.totalsize);
1936
1937 if (!smc->update_dt_enabled) {
1938 return H_SUCCESS;
1939 }
1940
1941 /* Check that the fdt did not grow out of proportion */
1942 if (cb > spapr->fdt_initial_size * 2) {
1943 trace_spapr_update_dt_failed_size(spapr->fdt_initial_size, cb,
1944 fdt32_to_cpu(hdr.magic));
1945 return H_PARAMETER;
1946 }
1947
1948 fdt = g_malloc0(cb);
1949 cpu_physical_memory_read(dt, fdt, cb);
1950
1951 /* Check the fdt consistency */
1952 if (fdt_check_full(fdt, cb)) {
1953 trace_spapr_update_dt_failed_check(spapr->fdt_initial_size, cb,
1954 fdt32_to_cpu(hdr.magic));
1955 return H_PARAMETER;
1956 }
1957
1958 g_free(spapr->fdt_blob);
1959 spapr->fdt_size = cb;
1960 spapr->fdt_blob = fdt;
1961 trace_spapr_update_dt(cb);
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001962
1963 return H_SUCCESS;
1964}
1965
David Gibson7d7ba3f2011-05-10 16:06:21 +10001966static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
1967static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
Michael Roth0fb6bd02019-07-17 15:58:42 -05001968static spapr_hcall_fn svm_hypercall_table[(SVM_HCALL_MAX - SVM_HCALL_BASE) / 4 + 1];
David Gibson9fdf0c22011-04-01 15:15:20 +11001969
1970void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
1971{
David Gibson39ac8452011-04-01 15:15:23 +11001972 spapr_hcall_fn *slot;
David Gibson9fdf0c22011-04-01 15:15:20 +11001973
David Gibson39ac8452011-04-01 15:15:23 +11001974 if (opcode <= MAX_HCALL_OPCODE) {
1975 assert((opcode & 0x3) == 0);
David Gibson9fdf0c22011-04-01 15:15:20 +11001976
David Gibson39ac8452011-04-01 15:15:23 +11001977 slot = &papr_hypercall_table[opcode / 4];
Michael Roth0fb6bd02019-07-17 15:58:42 -05001978 } else if (opcode >= SVM_HCALL_BASE && opcode <= SVM_HCALL_MAX) {
1979 /* we only have SVM-related hcall numbers assigned in multiples of 4 */
1980 assert((opcode & 0x3) == 0);
1981
1982 slot = &svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4];
David Gibson39ac8452011-04-01 15:15:23 +11001983 } else {
1984 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
David Gibson9fdf0c22011-04-01 15:15:20 +11001985
David Gibson39ac8452011-04-01 15:15:23 +11001986 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1987 }
1988
David Gibsonc89d5292012-10-08 18:17:36 +00001989 assert(!(*slot));
David Gibson39ac8452011-04-01 15:15:23 +11001990 *slot = fn;
David Gibson9fdf0c22011-04-01 15:15:20 +11001991}
1992
Andreas Färberaa100fa2012-05-03 06:13:14 +02001993target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
David Gibson9fdf0c22011-04-01 15:15:20 +11001994 target_ulong *args)
1995{
David Gibsonce2918c2019-03-06 15:35:37 +11001996 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
David Gibson28e02042015-07-02 16:23:04 +10001997
David Gibson9fdf0c22011-04-01 15:15:20 +11001998 if ((opcode <= MAX_HCALL_OPCODE)
1999 && ((opcode & 0x3) == 0)) {
David Gibson39ac8452011-04-01 15:15:23 +11002000 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
2001
2002 if (fn) {
Andreas Färberb13ce262012-05-03 06:23:01 +02002003 return fn(cpu, spapr, opcode, args);
David Gibson39ac8452011-04-01 15:15:23 +11002004 }
Michael Roth0fb6bd02019-07-17 15:58:42 -05002005 } else if ((opcode >= SVM_HCALL_BASE) &&
2006 (opcode <= SVM_HCALL_MAX)) {
2007 spapr_hcall_fn fn = svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4];
2008
2009 if (fn) {
2010 return fn(cpu, spapr, opcode, args);
2011 }
David Gibson39ac8452011-04-01 15:15:23 +11002012 } else if ((opcode >= KVMPPC_HCALL_BASE) &&
2013 (opcode <= KVMPPC_HCALL_MAX)) {
2014 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
David Gibson9fdf0c22011-04-01 15:15:20 +11002015
2016 if (fn) {
Andreas Färberb13ce262012-05-03 06:23:01 +02002017 return fn(cpu, spapr, opcode, args);
David Gibson9fdf0c22011-04-01 15:15:20 +11002018 }
2019 }
2020
Thomas Huthaaf87c62015-09-01 11:29:02 +10002021 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
2022 opcode);
David Gibson9fdf0c22011-04-01 15:15:20 +11002023 return H_FUNCTION;
2024}
David Gibsonf43e3522011-04-01 15:15:22 +11002025
Andreas Färber83f7d432012-02-09 15:20:55 +01002026static void hypercall_register_types(void)
David Gibsonf43e3522011-04-01 15:15:22 +11002027{
2028 /* hcall-pft */
2029 spapr_register_hypercall(H_ENTER, h_enter);
2030 spapr_register_hypercall(H_REMOVE, h_remove);
2031 spapr_register_hypercall(H_PROTECT, h_protect);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +00002032 spapr_register_hypercall(H_READ, h_read);
David Gibson39ac8452011-04-01 15:15:23 +11002033
David Gibsona3d0aba2011-08-31 15:50:50 +00002034 /* hcall-bulk */
2035 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
2036
David Gibson30f4b052017-05-12 15:46:11 +10002037 /* hcall-hpt-resize */
2038 spapr_register_hypercall(H_RESIZE_HPT_PREPARE, h_resize_hpt_prepare);
2039 spapr_register_hypercall(H_RESIZE_HPT_COMMIT, h_resize_hpt_commit);
2040
David Gibsoned120052011-04-01 15:15:33 +11002041 /* hcall-splpar */
2042 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
2043 spapr_register_hypercall(H_CEDE, h_cede);
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10002044 spapr_register_hypercall(H_CONFER, h_confer);
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10002045 spapr_register_hypercall(H_PROD, h_prod);
2046
Nicholas Piggin10741312019-07-18 13:42:14 +10002047 /* hcall-join */
2048 spapr_register_hypercall(H_JOIN, h_join);
2049
Nicholas Piggin1c7ad772016-12-05 16:50:21 +11002050 spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset);
David Gibsoned120052011-04-01 15:15:33 +11002051
Thomas Huth423576f2016-02-11 13:47:18 +01002052 /* processor register resource access h-calls */
2053 spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
Thomas Huthaf08a582016-02-11 13:47:19 +01002054 spapr_register_hypercall(H_SET_DABR, h_set_dabr);
Thomas Huthe49ff262016-02-11 13:47:20 +01002055 spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
Thomas Huth3240dd92016-02-18 10:15:54 +01002056 spapr_register_hypercall(H_PAGE_INIT, h_page_init);
Thomas Huth423576f2016-02-11 13:47:18 +01002057 spapr_register_hypercall(H_SET_MODE, h_set_mode);
2058
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11002059 /* In Memory Table MMU h-calls */
2060 spapr_register_hypercall(H_CLEAN_SLB, h_clean_slb);
2061 spapr_register_hypercall(H_INVALIDATE_PID, h_invalidate_pid);
2062 spapr_register_hypercall(H_REGISTER_PROC_TBL, h_register_process_table);
2063
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11002064 /* hcall-get-cpu-characteristics */
2065 spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS,
2066 h_get_cpu_characteristics);
2067
David Gibson827200a2011-08-10 14:44:20 +00002068 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
2069 * here between the "CI" and the "CACHE" variants, they will use whatever
2070 * mapping attributes qemu is using. When using KVM, the kernel will
2071 * enforce the attributes more strongly
2072 */
2073 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
2074 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
2075 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
2076 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
2077 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
2078 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00002079 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
David Gibson827200a2011-08-10 14:44:20 +00002080
David Gibson39ac8452011-04-01 15:15:23 +11002081 /* qemu/KVM-PPC specific hcalls */
2082 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
Anton Blanchard42561bf2013-08-19 21:04:20 +10002083
Alexey Kardashevskiy2a6593c2014-05-23 12:26:54 +10002084 /* ibm,client-architecture-support support */
2085 spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
Laurent Vivierc24ba3d2018-12-19 17:35:41 +01002086
Alexey Kardashevskiyfea35ca2018-12-21 01:34:48 +01002087 spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
David Gibsonf43e3522011-04-01 15:15:22 +11002088}
Andreas Färber83f7d432012-02-09 15:20:55 +01002089
2090type_init(hypercall_register_types)