blob: 40c86e91eb890451b2f895a10906e6834938d487 [file] [log] [blame]
Peter Maydell0d755902016-01-26 18:16:58 +00001#include "qemu/osdep.h"
David Gibson0c21e072019-11-29 15:00:58 +11002#include "qemu/cutils.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +01003#include "qapi/error.h"
Vincent Palatinb3946622017-01-10 11:59:55 +01004#include "sysemu/hw_accel.h"
Markus Armbruster54d31232019-08-12 07:23:59 +02005#include "sysemu/runstate.h"
Paolo Bonzini03dd0242015-12-15 13:16:16 +01006#include "qemu/log.h"
Markus Armbrusterdb725812019-08-12 07:23:50 +02007#include "qemu/main-loop.h"
Markus Armbruster0b8fa322019-05-23 16:35:07 +02008#include "qemu/module.h"
David Gibson0b0b8312017-05-12 15:46:49 +10009#include "qemu/error-report.h"
David Gibson9fdf0c22011-04-01 15:15:20 +110010#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010011#include "exec/exec-all.h"
David Gibsoned120052011-04-01 15:15:33 +110012#include "helper_regs.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010013#include "hw/ppc/spapr.h"
David Gibson7388efa2018-06-13 16:22:18 +100014#include "hw/ppc/spapr_cpu_core.h"
David Gibsond5aea6f2013-03-12 00:31:18 +000015#include "mmu-hash64.h"
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +100016#include "cpu-models.h"
17#include "trace.h"
18#include "kvm_ppc.h"
David Gibson0c21e072019-11-29 15:00:58 +110019#include "hw/ppc/fdt.h"
Michael Rothfacdb8b2016-10-24 23:47:28 -050020#include "hw/ppc/spapr_ovec.h"
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +110021#include "mmu-book3s-v3.h"
David Hildenbrand2cc0e2e2018-04-23 18:51:16 +020022#include "hw/mem/memory-device.h"
David Gibsonf43e3522011-04-01 15:15:22 +110023
Thomas Huthaf08a582016-02-11 13:47:19 +010024static bool has_spr(PowerPCCPU *cpu, int spr)
25{
26 /* We can test whether the SPR is defined by checking for a valid name */
27 return cpu->env.spr_cb[spr].name != NULL;
28}
29
David Gibsonc6404ad2017-02-21 14:00:16 +110030static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex)
Aneesh Kumar K.Vf3c75d42014-02-20 18:52:17 +010031{
32 /*
David Gibson36778662017-02-24 16:36:44 +110033 * hash value/pteg group index is normalized by HPT mask
Aneesh Kumar K.Vf3c75d42014-02-20 18:52:17 +010034 */
David Gibson36778662017-02-24 16:36:44 +110035 if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) {
Aneesh Kumar K.Vf3c75d42014-02-20 18:52:17 +010036 return false;
37 }
38 return true;
39}
40
David Gibsonce2918c2019-03-06 15:35:37 +110041static bool is_ram_address(SpaprMachineState *spapr, hwaddr addr)
David Gibsonecbc25f2016-01-21 14:48:43 +110042{
43 MachineState *machine = MACHINE(spapr);
David Hildenbrande017da32018-04-23 18:51:23 +020044 DeviceMemoryState *dms = machine->device_memory;
David Gibsonecbc25f2016-01-21 14:48:43 +110045
46 if (addr < machine->ram_size) {
47 return true;
48 }
David Hildenbrande017da32018-04-23 18:51:23 +020049 if ((addr >= dms->base)
50 && ((addr - dms->base) < memory_region_size(&dms->mr))) {
David Gibsonecbc25f2016-01-21 14:48:43 +110051 return true;
52 }
53
54 return false;
55}
56
David Gibsonce2918c2019-03-06 15:35:37 +110057static target_ulong h_enter(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsonf43e3522011-04-01 15:15:22 +110058 target_ulong opcode, target_ulong *args)
59{
60 target_ulong flags = args[0];
David Gibsonc6404ad2017-02-21 14:00:16 +110061 target_ulong ptex = args[1];
David Gibsonf43e3522011-04-01 15:15:22 +110062 target_ulong pteh = args[2];
63 target_ulong ptel = args[3];
Cédric Le Goater1f0252e2016-07-01 09:10:10 +020064 unsigned apshift;
David Gibsonf73a2572011-08-03 21:02:19 +000065 target_ulong raddr;
David Gibsonc6404ad2017-02-21 14:00:16 +110066 target_ulong slot;
David Gibson7222b942017-02-27 16:03:41 +110067 const ppc_hash_pte64_t *hptes;
David Gibsonf43e3522011-04-01 15:15:22 +110068
Cédric Le Goater1f0252e2016-07-01 09:10:10 +020069 apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
David Gibson1114e712016-01-27 12:01:20 +110070 if (!apshift) {
71 /* Bad page size encoding */
72 return H_PARAMETER;
David Gibsonf43e3522011-04-01 15:15:22 +110073 }
74
David Gibson1114e712016-01-27 12:01:20 +110075 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
David Gibsonf43e3522011-04-01 15:15:22 +110076
David Gibsonecbc25f2016-01-21 14:48:43 +110077 if (is_ram_address(spapr, raddr)) {
David Gibsonf73a2572011-08-03 21:02:19 +000078 /* Regular RAM - should have WIMG=0010 */
David Gibsond5aea6f2013-03-12 00:31:18 +000079 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
David Gibsonf73a2572011-08-03 21:02:19 +000080 return H_PARAMETER;
81 }
82 } else {
Aneesh Kumar K.Vc1175902016-06-17 16:07:20 +053083 target_ulong wimg_flags;
David Gibsonf73a2572011-08-03 21:02:19 +000084 /* Looks like an IO address */
85 /* FIXME: What WIMG combinations could be sensible for IO?
86 * For now we allow WIMG=010x, but are there others? */
87 /* FIXME: Should we check against registered IO addresses? */
Aneesh Kumar K.Vc1175902016-06-17 16:07:20 +053088 wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M));
89
90 if (wimg_flags != HPTE64_R_I &&
91 wimg_flags != (HPTE64_R_I | HPTE64_R_M)) {
David Gibsonf73a2572011-08-03 21:02:19 +000092 return H_PARAMETER;
93 }
David Gibsonf43e3522011-04-01 15:15:22 +110094 }
David Gibsonf73a2572011-08-03 21:02:19 +000095
David Gibsonf43e3522011-04-01 15:15:22 +110096 pteh &= ~0x60ULL;
97
David Gibsonc6404ad2017-02-21 14:00:16 +110098 if (!valid_ptex(cpu, ptex)) {
David Gibsonf43e3522011-04-01 15:15:22 +110099 return H_PARAMETER;
100 }
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100101
David Gibsonc6404ad2017-02-21 14:00:16 +1100102 slot = ptex & 7ULL;
103 ptex = ptex & ~7ULL;
104
David Gibsonf43e3522011-04-01 15:15:22 +1100105 if (likely((flags & H_EXACT) == 0)) {
David Gibson7222b942017-02-27 16:03:41 +1100106 hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
David Gibsonc6404ad2017-02-21 14:00:16 +1100107 for (slot = 0; slot < 8; slot++) {
David Gibson7222b942017-02-27 16:03:41 +1100108 if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) {
David Gibsonf43e3522011-04-01 15:15:22 +1100109 break;
110 }
Aneesh Kumar K.V7aaf4952014-03-14 19:21:49 +0530111 }
David Gibson7222b942017-02-27 16:03:41 +1100112 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
David Gibsonc6404ad2017-02-21 14:00:16 +1100113 if (slot == 8) {
Aneesh Kumar K.V7aaf4952014-03-14 19:21:49 +0530114 return H_PTEG_FULL;
115 }
David Gibsonf43e3522011-04-01 15:15:22 +1100116 } else {
David Gibson7222b942017-02-27 16:03:41 +1100117 hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1);
118 if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) {
119 ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1);
David Gibsonf43e3522011-04-01 15:15:22 +1100120 return H_PTEG_FULL;
121 }
David Gibson7222b942017-02-27 16:03:41 +1100122 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
David Gibsonf43e3522011-04-01 15:15:22 +1100123 }
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100124
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200125 spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel);
David Gibsonf43e3522011-04-01 15:15:22 +1100126
David Gibsonc6404ad2017-02-21 14:00:16 +1100127 args[0] = ptex + slot;
David Gibsonf43e3522011-04-01 15:15:22 +1100128 return H_SUCCESS;
129}
130
Stefan Weila3801402013-06-24 19:48:47 +0200131typedef enum {
David Gibsona3d0aba2011-08-31 15:50:50 +0000132 REMOVE_SUCCESS = 0,
133 REMOVE_NOT_FOUND = 1,
134 REMOVE_PARM = 2,
135 REMOVE_HW = 3,
Stefan Weila3801402013-06-24 19:48:47 +0200136} RemoveResult;
David Gibsona3d0aba2011-08-31 15:50:50 +0000137
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200138static RemoveResult remove_hpte(PowerPCCPU *cpu
139 , target_ulong ptex,
David Gibsona3d0aba2011-08-31 15:50:50 +0000140 target_ulong avpn,
141 target_ulong flags,
142 target_ulong *vp, target_ulong *rp)
David Gibsonf43e3522011-04-01 15:15:22 +1100143{
David Gibson7222b942017-02-27 16:03:41 +1100144 const ppc_hash_pte64_t *hptes;
David Gibson61a36c92016-01-15 16:12:09 +1100145 target_ulong v, r;
David Gibsonf43e3522011-04-01 15:15:22 +1100146
David Gibsonc6404ad2017-02-21 14:00:16 +1100147 if (!valid_ptex(cpu, ptex)) {
David Gibsona3d0aba2011-08-31 15:50:50 +0000148 return REMOVE_PARM;
David Gibsonf43e3522011-04-01 15:15:22 +1100149 }
150
David Gibson7222b942017-02-27 16:03:41 +1100151 hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
152 v = ppc_hash64_hpte0(cpu, hptes, 0);
153 r = ppc_hash64_hpte1(cpu, hptes, 0);
154 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
David Gibsonf43e3522011-04-01 15:15:22 +1100155
David Gibsond5aea6f2013-03-12 00:31:18 +0000156 if ((v & HPTE64_V_VALID) == 0 ||
David Gibsonf43e3522011-04-01 15:15:22 +1100157 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
158 ((flags & H_ANDCOND) && (v & avpn) != 0)) {
David Gibsona3d0aba2011-08-31 15:50:50 +0000159 return REMOVE_NOT_FOUND;
David Gibsonf43e3522011-04-01 15:15:22 +1100160 }
David Gibson35f93042012-09-20 17:42:30 +0000161 *vp = v;
David Gibsona3d0aba2011-08-31 15:50:50 +0000162 *rp = r;
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200163 spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
David Gibson61a36c92016-01-15 16:12:09 +1100164 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
David Gibsona3d0aba2011-08-31 15:50:50 +0000165 return REMOVE_SUCCESS;
166}
167
David Gibsonce2918c2019-03-06 15:35:37 +1100168static target_ulong h_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsona3d0aba2011-08-31 15:50:50 +0000169 target_ulong opcode, target_ulong *args)
170{
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200171 CPUPPCState *env = &cpu->env;
David Gibsona3d0aba2011-08-31 15:50:50 +0000172 target_ulong flags = args[0];
David Gibsonc6404ad2017-02-21 14:00:16 +1100173 target_ulong ptex = args[1];
David Gibsona3d0aba2011-08-31 15:50:50 +0000174 target_ulong avpn = args[2];
Stefan Weila3801402013-06-24 19:48:47 +0200175 RemoveResult ret;
David Gibsona3d0aba2011-08-31 15:50:50 +0000176
David Gibsonc6404ad2017-02-21 14:00:16 +1100177 ret = remove_hpte(cpu, ptex, avpn, flags,
David Gibsona3d0aba2011-08-31 15:50:50 +0000178 &args[0], &args[1]);
179
180 switch (ret) {
181 case REMOVE_SUCCESS:
Nikunj A Dadhaniae3cffe62016-09-20 22:05:00 +0530182 check_tlb_flush(env, true);
David Gibsona3d0aba2011-08-31 15:50:50 +0000183 return H_SUCCESS;
184
185 case REMOVE_NOT_FOUND:
186 return H_NOT_FOUND;
187
188 case REMOVE_PARM:
189 return H_PARAMETER;
190
191 case REMOVE_HW:
192 return H_HARDWARE;
193 }
194
Stefan Weil9a399702013-06-29 15:47:26 +0200195 g_assert_not_reached();
David Gibsona3d0aba2011-08-31 15:50:50 +0000196}
197
198#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
199#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
200#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
201#define H_BULK_REMOVE_END 0xc000000000000000ULL
202#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
203#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
204#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
205#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
206#define H_BULK_REMOVE_HW 0x3000000000000000ULL
207#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
208#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
209#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
210#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
211#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
212#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
213
214#define H_BULK_REMOVE_MAX_BATCH 4
215
David Gibsonce2918c2019-03-06 15:35:37 +1100216static target_ulong h_bulk_remove(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsona3d0aba2011-08-31 15:50:50 +0000217 target_ulong opcode, target_ulong *args)
218{
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200219 CPUPPCState *env = &cpu->env;
David Gibsona3d0aba2011-08-31 15:50:50 +0000220 int i;
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200221 target_ulong rc = H_SUCCESS;
David Gibsona3d0aba2011-08-31 15:50:50 +0000222
223 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
224 target_ulong *tsh = &args[i*2];
225 target_ulong tsl = args[i*2 + 1];
226 target_ulong v, r, ret;
227
228 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
229 break;
230 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
231 return H_PARAMETER;
232 }
233
234 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
235 *tsh |= H_BULK_REMOVE_RESPONSE;
236
237 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
238 *tsh |= H_BULK_REMOVE_PARM;
239 return H_PARAMETER;
240 }
241
David Gibson7ef23062016-01-14 15:33:27 +1100242 ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
David Gibsona3d0aba2011-08-31 15:50:50 +0000243 (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
244 &v, &r);
245
246 *tsh |= ret << 60;
247
248 switch (ret) {
249 case REMOVE_SUCCESS:
David Gibsond5aea6f2013-03-12 00:31:18 +0000250 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
David Gibsona3d0aba2011-08-31 15:50:50 +0000251 break;
252
253 case REMOVE_PARM:
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200254 rc = H_PARAMETER;
255 goto exit;
David Gibsona3d0aba2011-08-31 15:50:50 +0000256
257 case REMOVE_HW:
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200258 rc = H_HARDWARE;
259 goto exit;
David Gibsona3d0aba2011-08-31 15:50:50 +0000260 }
261 }
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200262 exit:
Nikunj A Dadhaniae3cffe62016-09-20 22:05:00 +0530263 check_tlb_flush(env, true);
David Gibsona3d0aba2011-08-31 15:50:50 +0000264
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200265 return rc;
David Gibsonf43e3522011-04-01 15:15:22 +1100266}
267
David Gibsonce2918c2019-03-06 15:35:37 +1100268static target_ulong h_protect(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsonf43e3522011-04-01 15:15:22 +1100269 target_ulong opcode, target_ulong *args)
270{
Andreas Färberb13ce262012-05-03 06:23:01 +0200271 CPUPPCState *env = &cpu->env;
David Gibsonf43e3522011-04-01 15:15:22 +1100272 target_ulong flags = args[0];
David Gibsonc6404ad2017-02-21 14:00:16 +1100273 target_ulong ptex = args[1];
David Gibsonf43e3522011-04-01 15:15:22 +1100274 target_ulong avpn = args[2];
David Gibson7222b942017-02-27 16:03:41 +1100275 const ppc_hash_pte64_t *hptes;
David Gibson61a36c92016-01-15 16:12:09 +1100276 target_ulong v, r;
David Gibsonf43e3522011-04-01 15:15:22 +1100277
David Gibsonc6404ad2017-02-21 14:00:16 +1100278 if (!valid_ptex(cpu, ptex)) {
David Gibsonf43e3522011-04-01 15:15:22 +1100279 return H_PARAMETER;
280 }
281
David Gibson7222b942017-02-27 16:03:41 +1100282 hptes = ppc_hash64_map_hptes(cpu, ptex, 1);
283 v = ppc_hash64_hpte0(cpu, hptes, 0);
284 r = ppc_hash64_hpte1(cpu, hptes, 0);
285 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1);
David Gibsonf43e3522011-04-01 15:15:22 +1100286
David Gibsond5aea6f2013-03-12 00:31:18 +0000287 if ((v & HPTE64_V_VALID) == 0 ||
David Gibsonf43e3522011-04-01 15:15:22 +1100288 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
David Gibsonf43e3522011-04-01 15:15:22 +1100289 return H_NOT_FOUND;
290 }
291
David Gibsond5aea6f2013-03-12 00:31:18 +0000292 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
293 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
294 r |= (flags << 55) & HPTE64_R_PP0;
295 r |= (flags << 48) & HPTE64_R_KEY_HI;
296 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200297 spapr_store_hpte(cpu, ptex,
298 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
David Gibsonc6404ad2017-02-21 14:00:16 +1100299 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
Nikunj A Dadhaniad76ab5e2016-09-20 22:05:01 +0530300 /* Flush the tlb */
301 check_tlb_flush(env, true);
David Gibsonf43e3522011-04-01 15:15:22 +1100302 /* Don't need a memory barrier, due to qemu's global lock */
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200303 spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r);
David Gibsonf43e3522011-04-01 15:15:22 +1100304 return H_SUCCESS;
305}
306
David Gibsonce2918c2019-03-06 15:35:37 +1100307static target_ulong h_read(PowerPCCPU *cpu, SpaprMachineState *spapr,
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000308 target_ulong opcode, target_ulong *args)
309{
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000310 target_ulong flags = args[0];
David Gibsonc6404ad2017-02-21 14:00:16 +1100311 target_ulong ptex = args[1];
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000312 int i, ridx, n_entries = 1;
Benjamin Herrenschmidt993aaf02019-04-11 10:00:00 +0200313 const ppc_hash_pte64_t *hptes;
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000314
David Gibsonc6404ad2017-02-21 14:00:16 +1100315 if (!valid_ptex(cpu, ptex)) {
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000316 return H_PARAMETER;
317 }
318
319 if (flags & H_READ_4) {
320 /* Clear the two low order bits */
David Gibsonc6404ad2017-02-21 14:00:16 +1100321 ptex &= ~(3ULL);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000322 n_entries = 4;
323 }
324
Benjamin Herrenschmidt993aaf02019-04-11 10:00:00 +0200325 hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000326 for (i = 0, ridx = 0; i < n_entries; i++) {
Benjamin Herrenschmidt993aaf02019-04-11 10:00:00 +0200327 args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i);
328 args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000329 }
Benjamin Herrenschmidt993aaf02019-04-11 10:00:00 +0200330 ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +0000331
332 return H_SUCCESS;
333}
334
David Gibsonce2918c2019-03-06 15:35:37 +1100335struct SpaprPendingHpt {
David Gibson0b0b8312017-05-12 15:46:49 +1000336 /* These fields are read-only after initialization */
337 int shift;
338 QemuThread thread;
339
340 /* These fields are protected by the BQL */
341 bool complete;
342
343 /* These fields are private to the preparation thread if
344 * !complete, otherwise protected by the BQL */
345 int ret;
346 void *hpt;
347};
348
David Gibsonce2918c2019-03-06 15:35:37 +1100349static void free_pending_hpt(SpaprPendingHpt *pending)
David Gibson0b0b8312017-05-12 15:46:49 +1000350{
351 if (pending->hpt) {
352 qemu_vfree(pending->hpt);
353 }
354
355 g_free(pending);
356}
357
358static void *hpt_prepare_thread(void *opaque)
359{
David Gibsonce2918c2019-03-06 15:35:37 +1100360 SpaprPendingHpt *pending = opaque;
David Gibson0b0b8312017-05-12 15:46:49 +1000361 size_t size = 1ULL << pending->shift;
362
363 pending->hpt = qemu_memalign(size, size);
364 if (pending->hpt) {
365 memset(pending->hpt, 0, size);
366 pending->ret = H_SUCCESS;
367 } else {
368 pending->ret = H_NO_MEM;
369 }
370
371 qemu_mutex_lock_iothread();
372
373 if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) {
374 /* Ready to go */
375 pending->complete = true;
376 } else {
377 /* We've been cancelled, clean ourselves up */
378 free_pending_hpt(pending);
379 }
380
381 qemu_mutex_unlock_iothread();
382 return NULL;
383}
384
385/* Must be called with BQL held */
David Gibsonce2918c2019-03-06 15:35:37 +1100386static void cancel_hpt_prepare(SpaprMachineState *spapr)
David Gibson0b0b8312017-05-12 15:46:49 +1000387{
David Gibsonce2918c2019-03-06 15:35:37 +1100388 SpaprPendingHpt *pending = spapr->pending_hpt;
David Gibson0b0b8312017-05-12 15:46:49 +1000389
390 /* Let the thread know it's cancelled */
391 spapr->pending_hpt = NULL;
392
393 if (!pending) {
394 /* Nothing to do */
395 return;
396 }
397
398 if (!pending->complete) {
399 /* thread will clean itself up */
400 return;
401 }
402
403 free_pending_hpt(pending);
404}
405
David Gibsonb55d2952017-07-12 17:56:55 +1000406/* Convert a return code from the KVM ioctl()s implementing resize HPT
407 * into a PAPR hypercall return code */
408static target_ulong resize_hpt_convert_rc(int ret)
409{
410 if (ret >= 100000) {
411 return H_LONG_BUSY_ORDER_100_SEC;
412 } else if (ret >= 10000) {
413 return H_LONG_BUSY_ORDER_10_SEC;
414 } else if (ret >= 1000) {
415 return H_LONG_BUSY_ORDER_1_SEC;
416 } else if (ret >= 100) {
417 return H_LONG_BUSY_ORDER_100_MSEC;
418 } else if (ret >= 10) {
419 return H_LONG_BUSY_ORDER_10_MSEC;
420 } else if (ret > 0) {
421 return H_LONG_BUSY_ORDER_1_MSEC;
422 }
423
424 switch (ret) {
425 case 0:
426 return H_SUCCESS;
427 case -EPERM:
428 return H_AUTHORITY;
429 case -EINVAL:
430 return H_PARAMETER;
431 case -ENXIO:
432 return H_CLOSED;
433 case -ENOSPC:
434 return H_PTEG_FULL;
435 case -EBUSY:
436 return H_BUSY;
437 case -ENOMEM:
438 return H_NO_MEM;
439 default:
440 return H_HARDWARE;
441 }
442}
443
David Gibson30f4b052017-05-12 15:46:11 +1000444static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +1100445 SpaprMachineState *spapr,
David Gibson30f4b052017-05-12 15:46:11 +1000446 target_ulong opcode,
447 target_ulong *args)
448{
449 target_ulong flags = args[0];
David Gibson0b0b8312017-05-12 15:46:49 +1000450 int shift = args[1];
David Gibsonce2918c2019-03-06 15:35:37 +1100451 SpaprPendingHpt *pending = spapr->pending_hpt;
David Gibsondb50f282017-10-11 00:16:57 +1100452 uint64_t current_ram_size;
David Gibsonb55d2952017-07-12 17:56:55 +1000453 int rc;
David Gibson30f4b052017-05-12 15:46:11 +1000454
455 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
456 return H_AUTHORITY;
457 }
458
David Gibson0b0b8312017-05-12 15:46:49 +1000459 if (!spapr->htab_shift) {
460 /* Radix guest, no HPT */
461 return H_NOT_AVAILABLE;
462 }
463
David Gibson30f4b052017-05-12 15:46:11 +1000464 trace_spapr_h_resize_hpt_prepare(flags, shift);
David Gibson0b0b8312017-05-12 15:46:49 +1000465
466 if (flags != 0) {
467 return H_PARAMETER;
468 }
469
470 if (shift && ((shift < 18) || (shift > 46))) {
471 return H_PARAMETER;
472 }
473
David Gibsondb50f282017-10-11 00:16:57 +1100474 current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size();
David Gibson0b0b8312017-05-12 15:46:49 +1000475
476 /* We only allow the guest to allocate an HPT one order above what
477 * we'd normally give them (to stop a small guest claiming a huge
478 * chunk of resources in the HPT */
479 if (shift > (spapr_hpt_shift_for_ramsize(current_ram_size) + 1)) {
480 return H_RESOURCE;
481 }
482
David Gibsonb55d2952017-07-12 17:56:55 +1000483 rc = kvmppc_resize_hpt_prepare(cpu, flags, shift);
484 if (rc != -ENOSYS) {
485 return resize_hpt_convert_rc(rc);
486 }
487
David Gibson0b0b8312017-05-12 15:46:49 +1000488 if (pending) {
489 /* something already in progress */
490 if (pending->shift == shift) {
491 /* and it's suitable */
492 if (pending->complete) {
493 return pending->ret;
494 } else {
495 return H_LONG_BUSY_ORDER_100_MSEC;
496 }
497 }
498
499 /* not suitable, cancel and replace */
500 cancel_hpt_prepare(spapr);
501 }
502
503 if (!shift) {
504 /* nothing to do */
505 return H_SUCCESS;
506 }
507
508 /* start new prepare */
509
David Gibsonce2918c2019-03-06 15:35:37 +1100510 pending = g_new0(SpaprPendingHpt, 1);
David Gibson0b0b8312017-05-12 15:46:49 +1000511 pending->shift = shift;
512 pending->ret = H_HARDWARE;
513
514 qemu_thread_create(&pending->thread, "sPAPR HPT prepare",
515 hpt_prepare_thread, pending, QEMU_THREAD_DETACHED);
516
517 spapr->pending_hpt = pending;
518
519 /* In theory we could estimate the time more accurately based on
520 * the new size, but there's not much point */
521 return H_LONG_BUSY_ORDER_100_MSEC;
522}
523
524static uint64_t new_hpte_load0(void *htab, uint64_t pteg, int slot)
525{
526 uint8_t *addr = htab;
527
528 addr += pteg * HASH_PTEG_SIZE_64;
529 addr += slot * HASH_PTE_SIZE_64;
530 return ldq_p(addr);
531}
532
533static void new_hpte_store(void *htab, uint64_t pteg, int slot,
534 uint64_t pte0, uint64_t pte1)
535{
536 uint8_t *addr = htab;
537
538 addr += pteg * HASH_PTEG_SIZE_64;
539 addr += slot * HASH_PTE_SIZE_64;
540
541 stq_p(addr, pte0);
542 stq_p(addr + HASH_PTE_SIZE_64 / 2, pte1);
543}
544
545static int rehash_hpte(PowerPCCPU *cpu,
546 const ppc_hash_pte64_t *hptes,
547 void *old_hpt, uint64_t oldsize,
548 void *new_hpt, uint64_t newsize,
549 uint64_t pteg, int slot)
550{
551 uint64_t old_hash_mask = (oldsize >> 7) - 1;
552 uint64_t new_hash_mask = (newsize >> 7) - 1;
553 target_ulong pte0 = ppc_hash64_hpte0(cpu, hptes, slot);
554 target_ulong pte1;
555 uint64_t avpn;
556 unsigned base_pg_shift;
557 uint64_t hash, new_pteg, replace_pte0;
558
559 if (!(pte0 & HPTE64_V_VALID) || !(pte0 & HPTE64_V_BOLTED)) {
560 return H_SUCCESS;
561 }
562
563 pte1 = ppc_hash64_hpte1(cpu, hptes, slot);
564
565 base_pg_shift = ppc_hash64_hpte_page_shift_noslb(cpu, pte0, pte1);
566 assert(base_pg_shift); /* H_ENTER shouldn't allow a bad encoding */
567 avpn = HPTE64_V_AVPN_VAL(pte0) & ~(((1ULL << base_pg_shift) - 1) >> 23);
568
569 if (pte0 & HPTE64_V_SECONDARY) {
570 pteg = ~pteg;
571 }
572
573 if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_256M) {
574 uint64_t offset, vsid;
575
576 /* We only have 28 - 23 bits of offset in avpn */
577 offset = (avpn & 0x1f) << 23;
578 vsid = avpn >> 5;
579 /* We can find more bits from the pteg value */
580 if (base_pg_shift < 23) {
581 offset |= ((vsid ^ pteg) & old_hash_mask) << base_pg_shift;
582 }
583
584 hash = vsid ^ (offset >> base_pg_shift);
585 } else if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_1T) {
586 uint64_t offset, vsid;
587
588 /* We only have 40 - 23 bits of seg_off in avpn */
589 offset = (avpn & 0x1ffff) << 23;
590 vsid = avpn >> 17;
591 if (base_pg_shift < 23) {
592 offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask)
593 << base_pg_shift;
594 }
595
596 hash = vsid ^ (vsid << 25) ^ (offset >> base_pg_shift);
597 } else {
598 error_report("rehash_pte: Bad segment size in HPTE");
599 return H_HARDWARE;
600 }
601
602 new_pteg = hash & new_hash_mask;
603 if (pte0 & HPTE64_V_SECONDARY) {
604 assert(~pteg == (hash & old_hash_mask));
605 new_pteg = ~new_pteg;
606 } else {
607 assert(pteg == (hash & old_hash_mask));
608 }
609 assert((oldsize != newsize) || (pteg == new_pteg));
610 replace_pte0 = new_hpte_load0(new_hpt, new_pteg, slot);
611 /*
612 * Strictly speaking, we don't need all these tests, since we only
613 * ever rehash bolted HPTEs. We might in future handle non-bolted
614 * HPTEs, though so make the logic correct for those cases as
615 * well.
616 */
617 if (replace_pte0 & HPTE64_V_VALID) {
618 assert(newsize < oldsize);
619 if (replace_pte0 & HPTE64_V_BOLTED) {
620 if (pte0 & HPTE64_V_BOLTED) {
621 /* Bolted collision, nothing we can do */
622 return H_PTEG_FULL;
623 } else {
624 /* Discard this hpte */
625 return H_SUCCESS;
626 }
627 }
628 }
629
630 new_hpte_store(new_hpt, new_pteg, slot, pte0, pte1);
631 return H_SUCCESS;
632}
633
634static int rehash_hpt(PowerPCCPU *cpu,
635 void *old_hpt, uint64_t oldsize,
636 void *new_hpt, uint64_t newsize)
637{
638 uint64_t n_ptegs = oldsize >> 7;
639 uint64_t pteg;
640 int slot;
641 int rc;
642
643 for (pteg = 0; pteg < n_ptegs; pteg++) {
644 hwaddr ptex = pteg * HPTES_PER_GROUP;
645 const ppc_hash_pte64_t *hptes
646 = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
647
648 if (!hptes) {
649 return H_HARDWARE;
650 }
651
652 for (slot = 0; slot < HPTES_PER_GROUP; slot++) {
653 rc = rehash_hpte(cpu, hptes, old_hpt, oldsize, new_hpt, newsize,
654 pteg, slot);
655 if (rc != H_SUCCESS) {
656 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
657 return rc;
658 }
659 }
660 ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP);
661 }
662
663 return H_SUCCESS;
David Gibson30f4b052017-05-12 15:46:11 +1000664}
665
Greg Kurz1ec26c72017-09-25 13:00:02 +0200666static void do_push_sregs_to_kvm_pr(CPUState *cs, run_on_cpu_data data)
667{
668 int ret;
669
670 cpu_synchronize_state(cs);
671
672 ret = kvmppc_put_books_sregs(POWERPC_CPU(cs));
673 if (ret < 0) {
674 error_report("failed to push sregs to KVM: %s", strerror(-ret));
675 exit(1);
676 }
677}
678
David Gibsonce2918c2019-03-06 15:35:37 +1100679static void push_sregs_to_kvm_pr(SpaprMachineState *spapr)
Greg Kurz1ec26c72017-09-25 13:00:02 +0200680{
681 CPUState *cs;
682
683 /*
684 * This is a hack for the benefit of KVM PR - it abuses the SDR1
685 * slot in kvm_sregs to communicate the userspace address of the
686 * HPT
687 */
688 if (!kvm_enabled() || !spapr->htab) {
689 return;
690 }
691
692 CPU_FOREACH(cs) {
693 run_on_cpu(cs, do_push_sregs_to_kvm_pr, RUN_ON_CPU_NULL);
694 }
695}
696
David Gibson30f4b052017-05-12 15:46:11 +1000697static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +1100698 SpaprMachineState *spapr,
David Gibson30f4b052017-05-12 15:46:11 +1000699 target_ulong opcode,
700 target_ulong *args)
701{
702 target_ulong flags = args[0];
703 target_ulong shift = args[1];
David Gibsonce2918c2019-03-06 15:35:37 +1100704 SpaprPendingHpt *pending = spapr->pending_hpt;
David Gibson0b0b8312017-05-12 15:46:49 +1000705 int rc;
706 size_t newsize;
David Gibson30f4b052017-05-12 15:46:11 +1000707
708 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) {
709 return H_AUTHORITY;
710 }
711
Daniel Henrique Barboza94789562018-02-13 15:37:16 -0200712 if (!spapr->htab_shift) {
713 /* Radix guest, no HPT */
714 return H_NOT_AVAILABLE;
715 }
716
David Gibson30f4b052017-05-12 15:46:11 +1000717 trace_spapr_h_resize_hpt_commit(flags, shift);
David Gibson0b0b8312017-05-12 15:46:49 +1000718
David Gibsonb55d2952017-07-12 17:56:55 +1000719 rc = kvmppc_resize_hpt_commit(cpu, flags, shift);
720 if (rc != -ENOSYS) {
Daniel Henrique Barboza94789562018-02-13 15:37:16 -0200721 rc = resize_hpt_convert_rc(rc);
722 if (rc == H_SUCCESS) {
723 /* Need to set the new htab_shift in the machine state */
724 spapr->htab_shift = shift;
725 }
726 return rc;
David Gibsonb55d2952017-07-12 17:56:55 +1000727 }
728
David Gibson0b0b8312017-05-12 15:46:49 +1000729 if (flags != 0) {
730 return H_PARAMETER;
731 }
732
733 if (!pending || (pending->shift != shift)) {
734 /* no matching prepare */
735 return H_CLOSED;
736 }
737
738 if (!pending->complete) {
739 /* prepare has not completed */
740 return H_BUSY;
741 }
742
743 /* Shouldn't have got past PREPARE without an HPT */
744 g_assert(spapr->htab_shift);
745
746 newsize = 1ULL << pending->shift;
747 rc = rehash_hpt(cpu, spapr->htab, HTAB_SIZE(spapr),
748 pending->hpt, newsize);
749 if (rc == H_SUCCESS) {
750 qemu_vfree(spapr->htab);
751 spapr->htab = pending->hpt;
752 spapr->htab_shift = pending->shift;
753
Greg Kurz1ec26c72017-09-25 13:00:02 +0200754 push_sregs_to_kvm_pr(spapr);
David Gibsonb55d2952017-07-12 17:56:55 +1000755
David Gibson0b0b8312017-05-12 15:46:49 +1000756 pending->hpt = NULL; /* so it's not free()d */
757 }
758
759 /* Clean up */
760 spapr->pending_hpt = NULL;
761 free_pending_hpt(pending);
762
763 return rc;
David Gibson30f4b052017-05-12 15:46:11 +1000764}
765
David Gibsonce2918c2019-03-06 15:35:37 +1100766static target_ulong h_set_sprg0(PowerPCCPU *cpu, SpaprMachineState *spapr,
Thomas Huth423576f2016-02-11 13:47:18 +0100767 target_ulong opcode, target_ulong *args)
768{
769 cpu_synchronize_state(CPU(cpu));
770 cpu->env.spr[SPR_SPRG0] = args[0];
771
772 return H_SUCCESS;
773}
774
David Gibsonce2918c2019-03-06 15:35:37 +1100775static target_ulong h_set_dabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson821303f2011-04-01 15:15:24 +1100776 target_ulong opcode, target_ulong *args)
777{
Thomas Huthaf08a582016-02-11 13:47:19 +0100778 if (!has_spr(cpu, SPR_DABR)) {
779 return H_HARDWARE; /* DABR register not available */
780 }
781 cpu_synchronize_state(CPU(cpu));
782
783 if (has_spr(cpu, SPR_DABRX)) {
784 cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */
785 } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */
786 return H_RESERVED_DABR;
787 }
788
789 cpu->env.spr[SPR_DABR] = args[0];
790 return H_SUCCESS;
David Gibson821303f2011-04-01 15:15:24 +1100791}
792
David Gibsonce2918c2019-03-06 15:35:37 +1100793static target_ulong h_set_xdabr(PowerPCCPU *cpu, SpaprMachineState *spapr,
Thomas Huthe49ff262016-02-11 13:47:20 +0100794 target_ulong opcode, target_ulong *args)
795{
796 target_ulong dabrx = args[1];
797
798 if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
799 return H_HARDWARE;
800 }
801
802 if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
803 || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
804 return H_PARAMETER;
805 }
806
807 cpu_synchronize_state(CPU(cpu));
808 cpu->env.spr[SPR_DABRX] = dabrx;
809 cpu->env.spr[SPR_DABR] = args[0];
810
811 return H_SUCCESS;
812}
813
David Gibsonce2918c2019-03-06 15:35:37 +1100814static target_ulong h_page_init(PowerPCCPU *cpu, SpaprMachineState *spapr,
Thomas Huth3240dd92016-02-18 10:15:54 +0100815 target_ulong opcode, target_ulong *args)
816{
817 target_ulong flags = args[0];
818 hwaddr dst = args[1];
819 hwaddr src = args[2];
820 hwaddr len = TARGET_PAGE_SIZE;
821 uint8_t *pdst, *psrc;
822 target_long ret = H_SUCCESS;
823
824 if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
825 | H_COPY_PAGE | H_ZERO_PAGE)) {
826 qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
827 flags);
828 return H_PARAMETER;
829 }
830
831 /* Map-in destination */
832 if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
833 return H_PARAMETER;
834 }
Philippe Mathieu-Daudé85eb7c12020-02-19 20:20:42 +0100835 pdst = cpu_physical_memory_map(dst, &len, true);
Thomas Huth3240dd92016-02-18 10:15:54 +0100836 if (!pdst || len != TARGET_PAGE_SIZE) {
837 return H_PARAMETER;
838 }
839
840 if (flags & H_COPY_PAGE) {
841 /* Map-in source, copy to destination, and unmap source again */
842 if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
843 ret = H_PARAMETER;
844 goto unmap_out;
845 }
Philippe Mathieu-Daudé85eb7c12020-02-19 20:20:42 +0100846 psrc = cpu_physical_memory_map(src, &len, false);
Thomas Huth3240dd92016-02-18 10:15:54 +0100847 if (!psrc || len != TARGET_PAGE_SIZE) {
848 ret = H_PARAMETER;
849 goto unmap_out;
850 }
851 memcpy(pdst, psrc, len);
852 cpu_physical_memory_unmap(psrc, len, 0, len);
853 } else if (flags & H_ZERO_PAGE) {
854 memset(pdst, 0, len); /* Just clear the destination page */
855 }
856
857 if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
858 kvmppc_dcbst_range(cpu, pdst, len);
859 }
860 if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
861 if (kvm_enabled()) {
862 kvmppc_icbi_range(cpu, pdst, len);
863 } else {
864 tb_flush(CPU(cpu));
865 }
866 }
867
868unmap_out:
869 cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
870 return ret;
871}
872
David Gibsoned120052011-04-01 15:15:33 +1100873#define FLAGS_REGISTER_VPA 0x0000200000000000ULL
874#define FLAGS_REGISTER_DTL 0x0000400000000000ULL
875#define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
876#define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
877#define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
878#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
879
David Gibson7388efa2018-06-13 16:22:18 +1000880static target_ulong register_vpa(PowerPCCPU *cpu, target_ulong vpa)
David Gibsoned120052011-04-01 15:15:33 +1100881{
David Gibson7388efa2018-06-13 16:22:18 +1000882 CPUState *cs = CPU(cpu);
883 CPUPPCState *env = &cpu->env;
David Gibsonce2918c2019-03-06 15:35:37 +1100884 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibsoned120052011-04-01 15:15:33 +1100885 uint16_t size;
886 uint8_t tmp;
887
888 if (vpa == 0) {
889 hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
890 return H_HARDWARE;
891 }
892
893 if (vpa % env->dcache_line_size) {
894 return H_PARAMETER;
895 }
896 /* FIXME: bounds check the address */
897
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +1000898 size = lduw_be_phys(cs->as, vpa + 0x4);
David Gibsoned120052011-04-01 15:15:33 +1100899
900 if (size < VPA_MIN_SIZE) {
901 return H_PARAMETER;
902 }
903
904 /* VPA is not allowed to cross a page boundary */
905 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
906 return H_PARAMETER;
907 }
908
David Gibson7388efa2018-06-13 16:22:18 +1000909 spapr_cpu->vpa_addr = vpa;
David Gibsoned120052011-04-01 15:15:33 +1100910
David Gibson7388efa2018-06-13 16:22:18 +1000911 tmp = ldub_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET);
David Gibsoned120052011-04-01 15:15:33 +1100912 tmp |= VPA_SHARED_PROC_VAL;
David Gibson7388efa2018-06-13 16:22:18 +1000913 stb_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
David Gibsoned120052011-04-01 15:15:33 +1100914
915 return H_SUCCESS;
916}
917
David Gibson7388efa2018-06-13 16:22:18 +1000918static target_ulong deregister_vpa(PowerPCCPU *cpu, target_ulong vpa)
David Gibsoned120052011-04-01 15:15:33 +1100919{
David Gibsonce2918c2019-03-06 15:35:37 +1100920 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibson7388efa2018-06-13 16:22:18 +1000921
922 if (spapr_cpu->slb_shadow_addr) {
David Gibsoned120052011-04-01 15:15:33 +1100923 return H_RESOURCE;
924 }
925
David Gibson7388efa2018-06-13 16:22:18 +1000926 if (spapr_cpu->dtl_addr) {
David Gibsoned120052011-04-01 15:15:33 +1100927 return H_RESOURCE;
928 }
929
David Gibson7388efa2018-06-13 16:22:18 +1000930 spapr_cpu->vpa_addr = 0;
David Gibsoned120052011-04-01 15:15:33 +1100931 return H_SUCCESS;
932}
933
David Gibson7388efa2018-06-13 16:22:18 +1000934static target_ulong register_slb_shadow(PowerPCCPU *cpu, target_ulong addr)
David Gibsoned120052011-04-01 15:15:33 +1100935{
David Gibsonce2918c2019-03-06 15:35:37 +1100936 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibsoned120052011-04-01 15:15:33 +1100937 uint32_t size;
938
939 if (addr == 0) {
940 hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
941 return H_HARDWARE;
942 }
943
David Gibson7388efa2018-06-13 16:22:18 +1000944 size = ldl_be_phys(CPU(cpu)->as, addr + 0x4);
David Gibsoned120052011-04-01 15:15:33 +1100945 if (size < 0x8) {
946 return H_PARAMETER;
947 }
948
949 if ((addr / 4096) != ((addr + size - 1) / 4096)) {
950 return H_PARAMETER;
951 }
952
David Gibson7388efa2018-06-13 16:22:18 +1000953 if (!spapr_cpu->vpa_addr) {
David Gibsoned120052011-04-01 15:15:33 +1100954 return H_RESOURCE;
955 }
956
David Gibson7388efa2018-06-13 16:22:18 +1000957 spapr_cpu->slb_shadow_addr = addr;
958 spapr_cpu->slb_shadow_size = size;
David Gibsoned120052011-04-01 15:15:33 +1100959
960 return H_SUCCESS;
961}
962
David Gibson7388efa2018-06-13 16:22:18 +1000963static target_ulong deregister_slb_shadow(PowerPCCPU *cpu, target_ulong addr)
David Gibsoned120052011-04-01 15:15:33 +1100964{
David Gibsonce2918c2019-03-06 15:35:37 +1100965 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibson7388efa2018-06-13 16:22:18 +1000966
967 spapr_cpu->slb_shadow_addr = 0;
968 spapr_cpu->slb_shadow_size = 0;
David Gibsoned120052011-04-01 15:15:33 +1100969 return H_SUCCESS;
970}
971
David Gibson7388efa2018-06-13 16:22:18 +1000972static target_ulong register_dtl(PowerPCCPU *cpu, target_ulong addr)
David Gibsoned120052011-04-01 15:15:33 +1100973{
David Gibsonce2918c2019-03-06 15:35:37 +1100974 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibsoned120052011-04-01 15:15:33 +1100975 uint32_t size;
976
977 if (addr == 0) {
978 hcall_dprintf("Can't cope with DTL at logical 0\n");
979 return H_HARDWARE;
980 }
981
David Gibson7388efa2018-06-13 16:22:18 +1000982 size = ldl_be_phys(CPU(cpu)->as, addr + 0x4);
David Gibsoned120052011-04-01 15:15:33 +1100983
984 if (size < 48) {
985 return H_PARAMETER;
986 }
987
David Gibson7388efa2018-06-13 16:22:18 +1000988 if (!spapr_cpu->vpa_addr) {
David Gibsoned120052011-04-01 15:15:33 +1100989 return H_RESOURCE;
990 }
991
David Gibson7388efa2018-06-13 16:22:18 +1000992 spapr_cpu->dtl_addr = addr;
993 spapr_cpu->dtl_size = size;
David Gibsoned120052011-04-01 15:15:33 +1100994
995 return H_SUCCESS;
996}
997
David Gibson7388efa2018-06-13 16:22:18 +1000998static target_ulong deregister_dtl(PowerPCCPU *cpu, target_ulong addr)
David Gibsoned120052011-04-01 15:15:33 +1100999{
David Gibsonce2918c2019-03-06 15:35:37 +11001000 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
David Gibson7388efa2018-06-13 16:22:18 +10001001
1002 spapr_cpu->dtl_addr = 0;
1003 spapr_cpu->dtl_size = 0;
David Gibsoned120052011-04-01 15:15:33 +11001004
1005 return H_SUCCESS;
1006}
1007
David Gibsonce2918c2019-03-06 15:35:37 +11001008static target_ulong h_register_vpa(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsoned120052011-04-01 15:15:33 +11001009 target_ulong opcode, target_ulong *args)
1010{
1011 target_ulong flags = args[0];
1012 target_ulong procno = args[1];
1013 target_ulong vpa = args[2];
1014 target_ulong ret = H_PARAMETER;
Alexey Kardashevskiy0f20ba62014-02-02 01:45:52 +11001015 PowerPCCPU *tcpu;
David Gibsoned120052011-04-01 15:15:33 +11001016
Sam Bobroff2e886fb2017-08-09 15:38:56 +10001017 tcpu = spapr_find_cpu(procno);
Andreas Färber5353d032013-02-15 16:43:08 +01001018 if (!tcpu) {
David Gibsoned120052011-04-01 15:15:33 +11001019 return H_PARAMETER;
1020 }
1021
1022 switch (flags) {
1023 case FLAGS_REGISTER_VPA:
David Gibson7388efa2018-06-13 16:22:18 +10001024 ret = register_vpa(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001025 break;
1026
1027 case FLAGS_DEREGISTER_VPA:
David Gibson7388efa2018-06-13 16:22:18 +10001028 ret = deregister_vpa(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001029 break;
1030
1031 case FLAGS_REGISTER_SLBSHADOW:
David Gibson7388efa2018-06-13 16:22:18 +10001032 ret = register_slb_shadow(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001033 break;
1034
1035 case FLAGS_DEREGISTER_SLBSHADOW:
David Gibson7388efa2018-06-13 16:22:18 +10001036 ret = deregister_slb_shadow(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001037 break;
1038
1039 case FLAGS_REGISTER_DTL:
David Gibson7388efa2018-06-13 16:22:18 +10001040 ret = register_dtl(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001041 break;
1042
1043 case FLAGS_DEREGISTER_DTL:
David Gibson7388efa2018-06-13 16:22:18 +10001044 ret = deregister_dtl(tcpu, vpa);
David Gibsoned120052011-04-01 15:15:33 +11001045 break;
1046 }
1047
1048 return ret;
1049}
1050
David Gibsonce2918c2019-03-06 15:35:37 +11001051static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibsoned120052011-04-01 15:15:33 +11001052 target_ulong opcode, target_ulong *args)
1053{
Andreas Färberb13ce262012-05-03 06:23:01 +02001054 CPUPPCState *env = &cpu->env;
Andreas Färberfcd7d002012-12-17 08:02:44 +01001055 CPUState *cs = CPU(cpu);
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10001056 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
Andreas Färberb13ce262012-05-03 06:23:01 +02001057
David Gibsoned120052011-04-01 15:15:33 +11001058 env->msr |= (1ULL << MSR_EE);
1059 hreg_compute_hflags(env);
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10001060
1061 if (spapr_cpu->prod) {
1062 spapr_cpu->prod = false;
1063 return H_SUCCESS;
1064 }
1065
Andreas Färberfcd7d002012-12-17 08:02:44 +01001066 if (!cpu_has_work(cs)) {
Andreas Färber259186a2013-01-17 18:51:17 +01001067 cs->halted = 1;
Andreas Färber27103422013-08-26 08:31:06 +02001068 cs->exception_index = EXCP_HLT;
Andreas Färberfcd7d002012-12-17 08:02:44 +01001069 cs->exit_request = 1;
David Gibsoned120052011-04-01 15:15:33 +11001070 }
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10001071
1072 return H_SUCCESS;
1073}
1074
Nicholas Piggin10741312019-07-18 13:42:14 +10001075/*
1076 * Confer to self, aka join. Cede could use the same pattern as well, if
1077 * EXCP_HLT can be changed to ECXP_HALTED.
1078 */
1079static target_ulong h_confer_self(PowerPCCPU *cpu)
1080{
1081 CPUState *cs = CPU(cpu);
1082 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1083
1084 if (spapr_cpu->prod) {
1085 spapr_cpu->prod = false;
1086 return H_SUCCESS;
1087 }
1088 cs->halted = 1;
1089 cs->exception_index = EXCP_HALTED;
1090 cs->exit_request = 1;
1091
1092 return H_SUCCESS;
1093}
1094
1095static target_ulong h_join(PowerPCCPU *cpu, SpaprMachineState *spapr,
1096 target_ulong opcode, target_ulong *args)
1097{
1098 CPUPPCState *env = &cpu->env;
1099 CPUState *cs;
1100 bool last_unjoined = true;
1101
1102 if (env->msr & (1ULL << MSR_EE)) {
1103 return H_BAD_MODE;
1104 }
1105
1106 /*
1107 * Must not join the last CPU running. Interestingly, no such restriction
1108 * for H_CONFER-to-self, but that is probably not intended to be used
1109 * when H_JOIN is available.
1110 */
1111 CPU_FOREACH(cs) {
1112 PowerPCCPU *c = POWERPC_CPU(cs);
1113 CPUPPCState *e = &c->env;
1114 if (c == cpu) {
1115 continue;
1116 }
1117
1118 /* Don't have a way to indicate joined, so use halted && MSR[EE]=0 */
1119 if (!cs->halted || (e->msr & (1ULL << MSR_EE))) {
1120 last_unjoined = false;
1121 break;
1122 }
1123 }
1124 if (last_unjoined) {
1125 return H_CONTINUE;
1126 }
1127
1128 return h_confer_self(cpu);
1129}
1130
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10001131static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr,
1132 target_ulong opcode, target_ulong *args)
1133{
1134 target_long target = args[0];
1135 uint32_t dispatch = args[1];
1136 CPUState *cs = CPU(cpu);
1137 SpaprCpuState *spapr_cpu;
1138
1139 /*
1140 * -1 means confer to all other CPUs without dispatch counter check,
1141 * otherwise it's a targeted confer.
1142 */
1143 if (target != -1) {
1144 PowerPCCPU *target_cpu = spapr_find_cpu(target);
1145 uint32_t target_dispatch;
1146
1147 if (!target_cpu) {
1148 return H_PARAMETER;
1149 }
1150
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10001151 /*
1152 * target == self is a special case, we wait until prodded, without
1153 * dispatch counter check.
1154 */
1155 if (cpu == target_cpu) {
Nicholas Piggin10741312019-07-18 13:42:14 +10001156 return h_confer_self(cpu);
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10001157 }
1158
Nicholas Piggin10741312019-07-18 13:42:14 +10001159 spapr_cpu = spapr_cpu_state(target_cpu);
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10001160 if (!spapr_cpu->vpa_addr || ((dispatch & 1) == 0)) {
1161 return H_SUCCESS;
1162 }
1163
1164 target_dispatch = ldl_be_phys(cs->as,
1165 spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER);
1166 if (target_dispatch != dispatch) {
1167 return H_SUCCESS;
1168 }
1169
1170 /*
1171 * The targeted confer does not do anything special beyond yielding
1172 * the current vCPU, but even this should be better than nothing.
1173 * At least for single-threaded tcg, it gives the target a chance to
1174 * run before we run again. Multi-threaded tcg does not really do
1175 * anything with EXCP_YIELD yet.
1176 */
1177 }
1178
1179 cs->exception_index = EXCP_YIELD;
1180 cs->exit_request = 1;
1181 cpu_loop_exit(cs);
1182
1183 return H_SUCCESS;
1184}
1185
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10001186static target_ulong h_prod(PowerPCCPU *cpu, SpaprMachineState *spapr,
1187 target_ulong opcode, target_ulong *args)
1188{
1189 target_long target = args[0];
1190 PowerPCCPU *tcpu;
1191 CPUState *cs;
1192 SpaprCpuState *spapr_cpu;
1193
1194 tcpu = spapr_find_cpu(target);
1195 cs = CPU(tcpu);
1196 if (!cs) {
1197 return H_PARAMETER;
1198 }
1199
1200 spapr_cpu = spapr_cpu_state(tcpu);
1201 spapr_cpu->prod = true;
1202 cs->halted = 0;
1203 qemu_cpu_kick(cs);
1204
David Gibsoned120052011-04-01 15:15:33 +11001205 return H_SUCCESS;
1206}
1207
David Gibsonce2918c2019-03-06 15:35:37 +11001208static target_ulong h_rtas(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson39ac8452011-04-01 15:15:23 +11001209 target_ulong opcode, target_ulong *args)
1210{
1211 target_ulong rtas_r3 = args[0];
Alexey Kardashevskiy4fe822e2013-09-27 18:10:18 +10001212 uint32_t token = rtas_ld(rtas_r3, 0);
1213 uint32_t nargs = rtas_ld(rtas_r3, 1);
1214 uint32_t nret = rtas_ld(rtas_r3, 2);
David Gibson39ac8452011-04-01 15:15:23 +11001215
Anthony Liguori210b5802013-06-19 15:40:30 -05001216 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
David Gibson39ac8452011-04-01 15:15:23 +11001217 nret, rtas_r3 + 12 + 4*nargs);
1218}
1219
David Gibsonce2918c2019-03-06 15:35:37 +11001220static target_ulong h_logical_load(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson827200a2011-08-10 14:44:20 +00001221 target_ulong opcode, target_ulong *args)
1222{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001223 CPUState *cs = CPU(cpu);
David Gibson827200a2011-08-10 14:44:20 +00001224 target_ulong size = args[0];
1225 target_ulong addr = args[1];
1226
1227 switch (size) {
1228 case 1:
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001229 args[0] = ldub_phys(cs->as, addr);
David Gibson827200a2011-08-10 14:44:20 +00001230 return H_SUCCESS;
1231 case 2:
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001232 args[0] = lduw_phys(cs->as, addr);
David Gibson827200a2011-08-10 14:44:20 +00001233 return H_SUCCESS;
1234 case 4:
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001235 args[0] = ldl_phys(cs->as, addr);
David Gibson827200a2011-08-10 14:44:20 +00001236 return H_SUCCESS;
1237 case 8:
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001238 args[0] = ldq_phys(cs->as, addr);
David Gibson827200a2011-08-10 14:44:20 +00001239 return H_SUCCESS;
1240 }
1241 return H_PARAMETER;
1242}
1243
David Gibsonce2918c2019-03-06 15:35:37 +11001244static target_ulong h_logical_store(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson827200a2011-08-10 14:44:20 +00001245 target_ulong opcode, target_ulong *args)
1246{
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01001247 CPUState *cs = CPU(cpu);
1248
David Gibson827200a2011-08-10 14:44:20 +00001249 target_ulong size = args[0];
1250 target_ulong addr = args[1];
1251 target_ulong val = args[2];
1252
1253 switch (size) {
1254 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001255 stb_phys(cs->as, addr, val);
David Gibson827200a2011-08-10 14:44:20 +00001256 return H_SUCCESS;
1257 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001258 stw_phys(cs->as, addr, val);
David Gibson827200a2011-08-10 14:44:20 +00001259 return H_SUCCESS;
1260 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001261 stl_phys(cs->as, addr, val);
David Gibson827200a2011-08-10 14:44:20 +00001262 return H_SUCCESS;
1263 case 8:
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01001264 stq_phys(cs->as, addr, val);
David Gibson827200a2011-08-10 14:44:20 +00001265 return H_SUCCESS;
1266 }
1267 return H_PARAMETER;
1268}
1269
David Gibsonce2918c2019-03-06 15:35:37 +11001270static target_ulong h_logical_memop(PowerPCCPU *cpu, SpaprMachineState *spapr,
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001271 target_ulong opcode, target_ulong *args)
1272{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001273 CPUState *cs = CPU(cpu);
1274
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001275 target_ulong dst = args[0]; /* Destination address */
1276 target_ulong src = args[1]; /* Source address */
1277 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
1278 target_ulong count = args[3]; /* Element count */
1279 target_ulong op = args[4]; /* 0 = copy, 1 = invert */
1280 uint64_t tmp;
1281 unsigned int mask = (1 << esize) - 1;
1282 int step = 1 << esize;
1283
1284 if (count > 0x80000000) {
1285 return H_PARAMETER;
1286 }
1287
1288 if ((dst & mask) || (src & mask) || (op > 1)) {
1289 return H_PARAMETER;
1290 }
1291
1292 if (dst >= src && dst < (src + (count << esize))) {
1293 dst = dst + ((count - 1) << esize);
1294 src = src + ((count - 1) << esize);
1295 step = -step;
1296 }
1297
1298 while (count--) {
1299 switch (esize) {
1300 case 0:
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001301 tmp = ldub_phys(cs->as, src);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001302 break;
1303 case 1:
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001304 tmp = lduw_phys(cs->as, src);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001305 break;
1306 case 2:
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001307 tmp = ldl_phys(cs->as, src);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001308 break;
1309 case 3:
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001310 tmp = ldq_phys(cs->as, src);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001311 break;
1312 default:
1313 return H_PARAMETER;
1314 }
1315 if (op == 1) {
1316 tmp = ~tmp;
1317 }
1318 switch (esize) {
1319 case 0:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001320 stb_phys(cs->as, dst, tmp);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001321 break;
1322 case 1:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001323 stw_phys(cs->as, dst, tmp);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001324 break;
1325 case 2:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001326 stl_phys(cs->as, dst, tmp);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001327 break;
1328 case 3:
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01001329 stq_phys(cs->as, dst, tmp);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00001330 break;
1331 }
1332 dst = dst + step;
1333 src = src + step;
1334 }
1335
1336 return H_SUCCESS;
1337}
1338
David Gibsonce2918c2019-03-06 15:35:37 +11001339static target_ulong h_logical_icbi(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson827200a2011-08-10 14:44:20 +00001340 target_ulong opcode, target_ulong *args)
1341{
1342 /* Nothing to do on emulation, KVM will trap this in the kernel */
1343 return H_SUCCESS;
1344}
1345
David Gibsonce2918c2019-03-06 15:35:37 +11001346static target_ulong h_logical_dcbf(PowerPCCPU *cpu, SpaprMachineState *spapr,
David Gibson827200a2011-08-10 14:44:20 +00001347 target_ulong opcode, target_ulong *args)
1348{
1349 /* Nothing to do on emulation, KVM will trap this in the kernel */
1350 return H_SUCCESS;
1351}
1352
Peter Maydell7d0cd462014-07-08 16:02:26 +01001353static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
1354 target_ulong mflags,
1355 target_ulong value1,
1356 target_ulong value2)
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001357{
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001358 if (value1) {
1359 return H_P3;
1360 }
1361 if (value2) {
1362 return H_P4;
1363 }
1364
1365 switch (mflags) {
1366 case H_SET_MODE_ENDIAN_BIG:
Benjamin Herrenschmidt00fd0752019-02-15 18:00:18 +01001367 spapr_set_all_lpcrs(0, LPCR_ILE);
David Gibsoneefaccc2015-02-10 15:36:16 +11001368 spapr_pci_switch_vga(true);
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001369 return H_SUCCESS;
1370
1371 case H_SET_MODE_ENDIAN_LITTLE:
Benjamin Herrenschmidt00fd0752019-02-15 18:00:18 +01001372 spapr_set_all_lpcrs(LPCR_ILE, LPCR_ILE);
David Gibsoneefaccc2015-02-10 15:36:16 +11001373 spapr_pci_switch_vga(false);
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001374 return H_SUCCESS;
1375 }
1376
1377 return H_UNSUPPORTED_FLAG;
1378}
1379
Peter Maydell7d0cd462014-07-08 16:02:26 +01001380static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
1381 target_ulong mflags,
1382 target_ulong value1,
1383 target_ulong value2)
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001384{
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001385 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001386
1387 if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
1388 return H_P2;
1389 }
1390 if (value1) {
1391 return H_P3;
1392 }
1393 if (value2) {
1394 return H_P4;
1395 }
1396
Cédric Le Goater5c94b2a2016-04-03 19:57:50 +02001397 if (mflags == AIL_RESERVED) {
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001398 return H_UNSUPPORTED_FLAG;
1399 }
1400
Benjamin Herrenschmidt00fd0752019-02-15 18:00:18 +01001401 spapr_set_all_lpcrs(mflags << LPCR_AIL_SHIFT, LPCR_AIL);
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001402
1403 return H_SUCCESS;
1404}
1405
David Gibsonce2918c2019-03-06 15:35:37 +11001406static target_ulong h_set_mode(PowerPCCPU *cpu, SpaprMachineState *spapr,
Anton Blanchard42561bf2013-08-19 21:04:20 +10001407 target_ulong opcode, target_ulong *args)
1408{
Anton Blanchard42561bf2013-08-19 21:04:20 +10001409 target_ulong resource = args[1];
Anton Blanchard42561bf2013-08-19 21:04:20 +10001410 target_ulong ret = H_P2;
1411
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001412 switch (resource) {
1413 case H_SET_MODE_RESOURCE_LE:
Peter Maydell7d0cd462014-07-08 16:02:26 +01001414 ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
Alexey Kardashevskiyc4015bb2014-06-04 22:51:04 +10001415 break;
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001416 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
Peter Maydell7d0cd462014-07-08 16:02:26 +01001417 ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
1418 args[2], args[3]);
Alexey Kardashevskiyd5ac4f52014-06-04 22:51:05 +10001419 break;
Anton Blanchard42561bf2013-08-19 21:04:20 +10001420 }
1421
Anton Blanchard42561bf2013-08-19 21:04:20 +10001422 return ret;
1423}
1424
David Gibsonce2918c2019-03-06 15:35:37 +11001425static target_ulong h_clean_slb(PowerPCCPU *cpu, SpaprMachineState *spapr,
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001426 target_ulong opcode, target_ulong *args)
1427{
1428 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
1429 opcode, " (H_CLEAN_SLB)");
1430 return H_FUNCTION;
1431}
1432
David Gibsonce2918c2019-03-06 15:35:37 +11001433static target_ulong h_invalidate_pid(PowerPCCPU *cpu, SpaprMachineState *spapr,
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001434 target_ulong opcode, target_ulong *args)
1435{
1436 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n",
1437 opcode, " (H_INVALIDATE_PID)");
1438 return H_FUNCTION;
1439}
1440
David Gibsonce2918c2019-03-06 15:35:37 +11001441static void spapr_check_setup_free_hpt(SpaprMachineState *spapr,
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001442 uint64_t patbe_old, uint64_t patbe_new)
1443{
1444 /*
1445 * We have 4 Options:
1446 * HASH->HASH || RADIX->RADIX || NOTHING->RADIX : Do Nothing
1447 * HASH->RADIX : Free HPT
1448 * RADIX->HASH : Allocate HPT
1449 * NOTHING->HASH : Allocate HPT
1450 * Note: NOTHING implies the case where we said the guest could choose
1451 * later and so assumed radix and now it's called H_REG_PROC_TBL
1452 */
1453
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001454 if ((patbe_old & PATE1_GR) == (patbe_new & PATE1_GR)) {
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001455 /* We assume RADIX, so this catches all the "Do Nothing" cases */
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001456 } else if (!(patbe_old & PATE1_GR)) {
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001457 /* HASH->RADIX : Free HPT */
Bharata B Rao06ec79e2017-05-17 09:19:20 +05301458 spapr_free_hpt(spapr);
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001459 } else if (!(patbe_new & PATE1_GR)) {
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001460 /* RADIX->HASH || NOTHING->HASH : Allocate HPT */
David Gibson8897ea52019-11-28 16:37:04 +11001461 spapr_setup_hpt(spapr);
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001462 }
1463 return;
1464}
1465
1466#define FLAGS_MASK 0x01FULL
1467#define FLAG_MODIFY 0x10
1468#define FLAG_REGISTER 0x08
1469#define FLAG_RADIX 0x04
1470#define FLAG_HASH_PROC_TBL 0x02
1471#define FLAG_GTSE 0x01
1472
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001473static target_ulong h_register_process_table(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +11001474 SpaprMachineState *spapr,
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001475 target_ulong opcode,
1476 target_ulong *args)
1477{
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001478 target_ulong flags = args[0];
1479 target_ulong proc_tbl = args[1];
1480 target_ulong page_size = args[2];
1481 target_ulong table_size = args[3];
Suraj Jitindar Singh176dcce2019-03-05 13:21:02 +11001482 target_ulong update_lpcr = 0;
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001483 uint64_t cproc;
1484
1485 if (flags & ~FLAGS_MASK) { /* Check no reserved bits are set */
1486 return H_PARAMETER;
1487 }
1488 if (flags & FLAG_MODIFY) {
1489 if (flags & FLAG_REGISTER) {
1490 if (flags & FLAG_RADIX) { /* Register new RADIX process table */
1491 if (proc_tbl & 0xfff || proc_tbl >> 60) {
1492 return H_P2;
1493 } else if (page_size) {
1494 return H_P3;
1495 } else if (table_size > 24) {
1496 return H_P4;
1497 }
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001498 cproc = PATE1_GR | proc_tbl | table_size;
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001499 } else { /* Register new HPT process table */
1500 if (flags & FLAG_HASH_PROC_TBL) { /* Hash with Segment Tables */
1501 /* TODO - Not Supported */
1502 /* Technically caused by flag bits => H_PARAMETER */
1503 return H_PARAMETER;
1504 } else { /* Hash with SLB */
1505 if (proc_tbl >> 38) {
1506 return H_P2;
1507 } else if (page_size & ~0x7) {
1508 return H_P3;
1509 } else if (table_size > 24) {
1510 return H_P4;
1511 }
1512 }
1513 cproc = (proc_tbl << 25) | page_size << 5 | table_size;
1514 }
1515
1516 } else { /* Deregister current process table */
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001517 /*
1518 * Set to benign value: (current GR) | 0. This allows
1519 * deregistration in KVM to succeed even if the radix bit
1520 * in flags doesn't match the radix bit in the old PATE.
1521 */
1522 cproc = spapr->patb_entry & PATE1_GR;
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001523 }
1524 } else { /* Maintain current registration */
Benjamin Herrenschmidt79825f42019-02-15 18:00:27 +01001525 if (!(flags & FLAG_RADIX) != !(spapr->patb_entry & PATE1_GR)) {
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001526 /* Technically caused by flag bits => H_PARAMETER */
1527 return H_PARAMETER; /* Existing Process Table Mismatch */
1528 }
1529 cproc = spapr->patb_entry;
1530 }
1531
1532 /* Check if we need to setup OR free the hpt */
1533 spapr_check_setup_free_hpt(spapr, spapr->patb_entry, cproc);
1534
1535 spapr->patb_entry = cproc; /* Save new process table */
Suraj Jitindar Singh6de83302017-05-02 16:37:14 +10001536
Benjamin Herrenschmidt00fd0752019-02-15 18:00:18 +01001537 /* Update the UPRT, HR and GTSE bits in the LPCR for all cpus */
Suraj Jitindar Singh176dcce2019-03-05 13:21:02 +11001538 if (flags & FLAG_RADIX) /* Radix must use process tables, also set HR */
1539 update_lpcr |= (LPCR_UPRT | LPCR_HR);
1540 else if (flags & FLAG_HASH_PROC_TBL) /* Hash with process tables */
1541 update_lpcr |= LPCR_UPRT;
1542 if (flags & FLAG_GTSE) /* Guest translation shootdown enable */
David Gibson49e9fdd2019-03-13 14:17:27 +11001543 update_lpcr |= LPCR_GTSE;
1544
Suraj Jitindar Singh176dcce2019-03-05 13:21:02 +11001545 spapr_set_all_lpcrs(update_lpcr, LPCR_UPRT | LPCR_HR | LPCR_GTSE);
Suraj Jitindar Singhb4db5412017-03-20 10:46:46 +11001546
1547 if (kvm_enabled()) {
1548 return kvmppc_configure_v3_mmu(cpu, flags & FLAG_RADIX,
1549 flags & FLAG_GTSE, cproc);
1550 }
1551 return H_SUCCESS;
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11001552}
1553
Nicholas Piggin1c7ad772016-12-05 16:50:21 +11001554#define H_SIGNAL_SYS_RESET_ALL -1
1555#define H_SIGNAL_SYS_RESET_ALLBUTSELF -2
1556
1557static target_ulong h_signal_sys_reset(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +11001558 SpaprMachineState *spapr,
Nicholas Piggin1c7ad772016-12-05 16:50:21 +11001559 target_ulong opcode, target_ulong *args)
1560{
1561 target_long target = args[0];
1562 CPUState *cs;
1563
1564 if (target < 0) {
1565 /* Broadcast */
1566 if (target < H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1567 return H_PARAMETER;
1568 }
1569
1570 CPU_FOREACH(cs) {
1571 PowerPCCPU *c = POWERPC_CPU(cs);
1572
1573 if (target == H_SIGNAL_SYS_RESET_ALLBUTSELF) {
1574 if (c == cpu) {
1575 continue;
1576 }
1577 }
1578 run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1579 }
1580 return H_SUCCESS;
1581
1582 } else {
1583 /* Unicast */
Sam Bobroff2e886fb2017-08-09 15:38:56 +10001584 cs = CPU(spapr_find_cpu(target));
Sam Bobrofff57467e2017-08-03 16:28:27 +10001585 if (cs) {
1586 run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL);
1587 return H_SUCCESS;
Nicholas Piggin1c7ad772016-12-05 16:50:21 +11001588 }
1589 return H_PARAMETER;
1590 }
1591}
1592
David Gibsonce2918c2019-03-06 15:35:37 +11001593static uint32_t cas_check_pvr(SpaprMachineState *spapr, PowerPCCPU *cpu,
Greg Kurzcc7b35b2017-08-17 13:23:50 +02001594 target_ulong *addr, bool *raw_mode_supported,
1595 Error **errp)
Alexey Kardashevskiy2a6593c2014-05-23 12:26:54 +10001596{
David Gibson152ef802016-11-16 13:54:48 +11001597 bool explicit_match = false; /* Matched the CPU's real PVR */
David Gibson7843c0d2017-06-11 20:33:59 +08001598 uint32_t max_compat = spapr->max_compat_pvr;
David Gibson152ef802016-11-16 13:54:48 +11001599 uint32_t best_compat = 0;
1600 int i;
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001601
David Gibson152ef802016-11-16 13:54:48 +11001602 /*
1603 * We scan the supplied table of PVRs looking for two things
1604 * 1. Is our real CPU PVR in the list?
1605 * 2. What's the "best" listed logical PVR
1606 */
1607 for (i = 0; i < 512; ++i) {
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001608 uint32_t pvr, pvr_mask;
1609
David Gibson80c33d32017-05-18 14:47:44 +10001610 pvr_mask = ldl_be_phys(&address_space_memory, *addr);
1611 pvr = ldl_be_phys(&address_space_memory, *addr + 4);
1612 *addr += 8;
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001613
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001614 if (~pvr_mask & pvr) {
David Gibson152ef802016-11-16 13:54:48 +11001615 break; /* Terminator record */
1616 }
1617
1618 if ((cpu->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask)) {
1619 explicit_match = true;
1620 } else {
1621 if (ppc_check_compat(cpu, pvr, best_compat, max_compat)) {
1622 best_compat = pvr;
1623 }
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001624 }
1625 }
1626
David Gibson152ef802016-11-16 13:54:48 +11001627 if ((best_compat == 0) && (!explicit_match || max_compat)) {
1628 /* We couldn't find a suitable compatibility mode, and either
1629 * the guest doesn't support "raw" mode for this CPU, or raw
1630 * mode is disabled because a maximum compat mode is set */
David Gibson80c33d32017-05-18 14:47:44 +10001631 error_setg(errp, "Couldn't negotiate a suitable PVR during CAS");
1632 return 0;
David Gibson152ef802016-11-16 13:54:48 +11001633 }
1634
Greg Kurzcc7b35b2017-08-17 13:23:50 +02001635 *raw_mode_supported = explicit_match;
1636
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001637 /* Parsing finished */
David Gibson152ef802016-11-16 13:54:48 +11001638 trace_spapr_cas_pvr(cpu->compat_pvr, explicit_match, best_compat);
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001639
David Gibson80c33d32017-05-18 14:47:44 +10001640 return best_compat;
1641}
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001642
Greg Kurzad334d82020-02-24 20:23:43 +01001643static void spapr_handle_transient_dev_before_cas(SpaprMachineState *spapr)
David Gibson0c21e072019-11-29 15:00:58 +11001644{
Greg Kurz4b63db12020-02-14 16:01:22 +01001645 Object *drc_container;
David Gibson0c21e072019-11-29 15:00:58 +11001646 ObjectProperty *prop;
1647 ObjectPropertyIterator iter;
1648
1649 drc_container = container_get(object_get_root(), "/dr-connector");
1650 object_property_iter_init(&iter, drc_container);
1651 while ((prop = object_property_iter_next(&iter))) {
Greg Kurz4b63db12020-02-14 16:01:22 +01001652 SpaprDrc *drc;
1653
David Gibson0c21e072019-11-29 15:00:58 +11001654 if (!strstart(prop->type, "link<", NULL)) {
1655 continue;
1656 }
Greg Kurz4b63db12020-02-14 16:01:22 +01001657 drc = SPAPR_DR_CONNECTOR(object_property_get_link(drc_container,
1658 prop->name, NULL));
1659
1660 if (spapr_drc_transient(drc)) {
Greg Kurzad334d82020-02-24 20:23:43 +01001661 spapr_drc_reset(drc);
David Gibson0c21e072019-11-29 15:00:58 +11001662 }
1663 }
Greg Kurzad334d82020-02-24 20:23:43 +01001664
1665 spapr_clear_pending_hotplug_events(spapr);
David Gibson0c21e072019-11-29 15:00:58 +11001666}
1667
David Gibson80c33d32017-05-18 14:47:44 +10001668static target_ulong h_client_architecture_support(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +11001669 SpaprMachineState *spapr,
David Gibson80c33d32017-05-18 14:47:44 +10001670 target_ulong opcode,
1671 target_ulong *args)
1672{
1673 /* Working address in data buffer */
1674 target_ulong addr = ppc64_phys_to_real(args[0]);
David Gibson0c21e072019-11-29 15:00:58 +11001675 target_ulong fdt_buf = args[1];
1676 target_ulong fdt_bufsize = args[2];
David Gibson80c33d32017-05-18 14:47:44 +10001677 target_ulong ov_table;
1678 uint32_t cas_pvr;
David Gibsond1d32d62019-11-29 16:23:21 +11001679 SpaprOptionVector *ov1_guest, *ov5_guest, *ov5_cas_old;
David Gibson80c33d32017-05-18 14:47:44 +10001680 bool guest_radix;
1681 Error *local_err = NULL;
Greg Kurzcc7b35b2017-08-17 13:23:50 +02001682 bool raw_mode_supported = false;
Greg Kurze7f78db2019-05-15 19:04:24 +02001683 bool guest_xive;
Greg Kurz12b38682020-01-22 14:11:12 +01001684 CPUState *cs;
1685
1686 /* CAS is supposed to be called early when only the boot vCPU is active. */
1687 CPU_FOREACH(cs) {
1688 if (cs == CPU(cpu)) {
1689 continue;
1690 }
1691 if (!cs->halted) {
1692 warn_report("guest has multiple active vCPUs at CAS, which is not allowed");
1693 return H_MULTI_THREADS_ACTIVE;
1694 }
1695 }
David Gibson80c33d32017-05-18 14:47:44 +10001696
Greg Kurzcc7b35b2017-08-17 13:23:50 +02001697 cas_pvr = cas_check_pvr(spapr, cpu, &addr, &raw_mode_supported, &local_err);
David Gibson80c33d32017-05-18 14:47:44 +10001698 if (local_err) {
1699 error_report_err(local_err);
1700 return H_HARDWARE;
1701 }
1702
1703 /* Update CPUs */
1704 if (cpu->compat_pvr != cas_pvr) {
1705 ppc_set_compat_all(cas_pvr, &local_err);
David Gibsonf6f242c2016-11-10 14:37:38 +11001706 if (local_err) {
Greg Kurzcc7b35b2017-08-17 13:23:50 +02001707 /* We fail to set compat mode (likely because running with KVM PR),
1708 * but maybe we can fallback to raw mode if the guest supports it.
1709 */
1710 if (!raw_mode_supported) {
1711 error_report_err(local_err);
1712 return H_HARDWARE;
1713 }
Greg Kurz2c9dfda2018-06-12 19:01:26 +02001714 error_free(local_err);
Greg Kurzcc7b35b2017-08-17 13:23:50 +02001715 local_err = NULL;
Alexey Kardashevskiy3794d542014-05-23 12:26:57 +10001716 }
1717 }
1718
Bharata B Rao03d196b2015-07-13 10:34:00 +10001719 /* For the future use: here @ov_table points to the first option vector */
David Gibson80c33d32017-05-18 14:47:44 +10001720 ov_table = addr;
Bharata B Rao03d196b2015-07-13 10:34:00 +10001721
Sam Bobroffe957f6a2017-03-20 10:46:49 +11001722 ov1_guest = spapr_ovec_parse_vector(ov_table, 1);
Greg Kurzcbd0d7f2020-01-17 10:15:52 +01001723 if (!ov1_guest) {
1724 warn_report("guest didn't provide option vector 1");
1725 return H_PARAMETER;
1726 }
Michael Rothfacdb8b2016-10-24 23:47:28 -05001727 ov5_guest = spapr_ovec_parse_vector(ov_table, 5);
Greg Kurzcbd0d7f2020-01-17 10:15:52 +01001728 if (!ov5_guest) {
1729 warn_report("guest didn't provide option vector 5");
1730 return H_PARAMETER;
1731 }
Sam Bobroff9fb45412017-03-23 14:46:00 +11001732 if (spapr_ovec_test(ov5_guest, OV5_MMU_BOTH)) {
1733 error_report("guest requested hash and radix MMU, which is invalid.");
1734 exit(EXIT_FAILURE);
1735 }
Greg Kurze7f78db2019-05-15 19:04:24 +02001736 if (spapr_ovec_test(ov5_guest, OV5_XIVE_BOTH)) {
1737 error_report("guest requested an invalid interrupt mode");
1738 exit(EXIT_FAILURE);
1739 }
1740
Sam Bobroff9fb45412017-03-23 14:46:00 +11001741 /* The radix/hash bit in byte 24 requires special handling: */
1742 guest_radix = spapr_ovec_test(ov5_guest, OV5_MMU_RADIX_300);
1743 spapr_ovec_clear(ov5_guest, OV5_MMU_RADIX_300);
Alexey Kardashevskiy2a6593c2014-05-23 12:26:54 +10001744
Greg Kurze7f78db2019-05-15 19:04:24 +02001745 guest_xive = spapr_ovec_test(ov5_guest, OV5_XIVE_EXPLOIT);
1746
David Gibson2772cf62017-07-12 17:56:06 +10001747 /*
1748 * HPT resizing is a bit of a special case, because when enabled
1749 * we assume an HPT guest will support it until it says it
1750 * doesn't, instead of assuming it won't support it until it says
1751 * it does. Strictly speaking that approach could break for
1752 * guests which don't make a CAS call, but those are so old we
1753 * don't care about them. Without that assumption we'd have to
1754 * make at least a temporary allocation of an HPT sized for max
1755 * memory, which could be impossibly difficult under KVM HV if
1756 * maxram is large.
1757 */
1758 if (!guest_radix && !spapr_ovec_test(ov5_guest, OV5_HPT_RESIZE)) {
1759 int maxshift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size);
1760
1761 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_REQUIRED) {
1762 error_report(
1763 "h_client_architecture_support: Guest doesn't support HPT resizing, but resize-hpt=required");
1764 exit(1);
1765 }
1766
1767 if (spapr->htab_shift < maxshift) {
1768 /* Guest doesn't know about HPT resizing, so we
1769 * pre-emptively resize for the maximum permitted RAM. At
1770 * the point this is called, nothing should have been
1771 * entered into the existing HPT */
1772 spapr_reallocate_hpt(spapr, maxshift, &error_fatal);
Greg Kurz1ec26c72017-09-25 13:00:02 +02001773 push_sregs_to_kvm_pr(spapr);
David Gibson2772cf62017-07-12 17:56:06 +10001774 }
1775 }
1776
Michael Rothfacdb8b2016-10-24 23:47:28 -05001777 /* NOTE: there are actually a number of ov5 bits where input from the
1778 * guest is always zero, and the platform/QEMU enables them independently
1779 * of guest input. To model these properly we'd want some sort of mask,
1780 * but since they only currently apply to memory migration as defined
1781 * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need
Michael Roth6787d272016-10-24 23:47:29 -05001782 * to worry about this for now.
Michael Rothfacdb8b2016-10-24 23:47:28 -05001783 */
Michael Roth6787d272016-10-24 23:47:29 -05001784 ov5_cas_old = spapr_ovec_clone(spapr->ov5_cas);
Cédric Le Goater30bf9ed2017-09-08 16:33:43 +02001785
1786 /* also clear the radix/hash bit from the current ov5_cas bits to
1787 * be in sync with the newly ov5 bits. Else the radix bit will be
1788 * seen as being removed and this will generate a reset loop
1789 */
1790 spapr_ovec_clear(ov5_cas_old, OV5_MMU_RADIX_300);
1791
Michael Roth6787d272016-10-24 23:47:29 -05001792 /* full range of negotiated ov5 capabilities */
Michael Rothfacdb8b2016-10-24 23:47:28 -05001793 spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest);
1794 spapr_ovec_cleanup(ov5_guest);
Michael Roth6787d272016-10-24 23:47:29 -05001795 /* capabilities that have been added since CAS-generated guest reset.
1796 * if capabilities have since been removed, generate another reset
1797 */
David Gibsond1d32d62019-11-29 16:23:21 +11001798 spapr->cas_reboot = !spapr_ovec_subset(ov5_cas_old, spapr->ov5_cas);
Shivaprasad G Bhat00005f22019-07-17 03:20:31 -05001799 spapr_ovec_cleanup(ov5_cas_old);
Sam Bobroff9fb45412017-03-23 14:46:00 +11001800 /* Now that processing is finished, set the radix/hash bit for the
1801 * guest if it requested a valid mode; otherwise terminate the boot. */
1802 if (guest_radix) {
1803 if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
1804 error_report("Guest requested unavailable MMU mode (radix).");
1805 exit(EXIT_FAILURE);
1806 }
1807 spapr_ovec_set(spapr->ov5_cas, OV5_MMU_RADIX_300);
1808 } else {
1809 if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
1810 && !kvmppc_has_cap_mmu_hash_v3()) {
1811 error_report("Guest requested unavailable MMU mode (hash).");
1812 exit(EXIT_FAILURE);
1813 }
1814 }
David Gibsondaa36372019-08-28 13:59:27 +10001815 spapr->cas_pre_isa3_guest = !spapr_ovec_test(ov1_guest, OV1_PPC_3_00);
Shivaprasad G Bhat00005f22019-07-17 03:20:31 -05001816 spapr_ovec_cleanup(ov1_guest);
Cédric Le Goater13db0cd2019-01-02 06:57:42 +01001817
1818 /*
David Gibson8deb8012019-10-18 15:19:31 +11001819 * Ensure the guest asks for an interrupt mode we support;
1820 * otherwise terminate the boot.
Greg Kurze7f78db2019-05-15 19:04:24 +02001821 */
1822 if (guest_xive) {
David Gibsonca628232019-09-25 15:12:07 +10001823 if (!spapr->irq->xive) {
Greg Kurz75de5942019-05-16 09:36:57 +02001824 error_report(
1825"Guest requested unavailable interrupt mode (XIVE), try the ic-mode=xive or ic-mode=dual machine property");
Greg Kurze7f78db2019-05-15 19:04:24 +02001826 exit(EXIT_FAILURE);
1827 }
1828 } else {
David Gibsonca628232019-09-25 15:12:07 +10001829 if (!spapr->irq->xics) {
Greg Kurz75de5942019-05-16 09:36:57 +02001830 error_report(
1831"Guest requested unavailable interrupt mode (XICS), either don't set the ic-mode machine property or try ic-mode=xics or ic-mode=dual");
Greg Kurze7f78db2019-05-15 19:04:24 +02001832 exit(EXIT_FAILURE);
1833 }
1834 }
1835
David Gibson8deb8012019-10-18 15:19:31 +11001836 spapr_irq_update_active_intc(spapr);
1837
Greg Kurzad334d82020-02-24 20:23:43 +01001838 spapr_handle_transient_dev_before_cas(spapr);
David Gibson0c21e072019-11-29 15:00:58 +11001839
Cédric Le Goater13db0cd2019-01-02 06:57:42 +01001840 if (!spapr->cas_reboot) {
David Gibson0c21e072019-11-29 15:00:58 +11001841 void *fdt;
1842 SpaprDeviceTreeUpdateHeader hdr = { .version_id = 1 };
1843
David Gibson8deb8012019-10-18 15:19:31 +11001844 /* If spapr_machine_reset() did not set up a HPT but one is necessary
1845 * (because the guest isn't going to use radix) then set it up here. */
1846 if ((spapr->patb_entry & PATE1_GR) && !guest_radix) {
1847 /* legacy hash or new hash: */
David Gibson8897ea52019-11-28 16:37:04 +11001848 spapr_setup_hpt(spapr);
David Gibson8deb8012019-10-18 15:19:31 +11001849 }
David Gibson0c21e072019-11-29 15:00:58 +11001850
1851 if (fdt_bufsize < sizeof(hdr)) {
1852 error_report("SLOF provided insufficient CAS buffer "
1853 TARGET_FMT_lu " (min: %zu)", fdt_bufsize, sizeof(hdr));
1854 exit(EXIT_FAILURE);
1855 }
1856
1857 fdt_bufsize -= sizeof(hdr);
1858
1859 fdt = spapr_build_fdt(spapr, false, fdt_bufsize);
1860 _FDT((fdt_pack(fdt)));
1861
1862 cpu_physical_memory_write(fdt_buf, &hdr, sizeof(hdr));
1863 cpu_physical_memory_write(fdt_buf + sizeof(hdr), fdt,
1864 fdt_totalsize(fdt));
1865 trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
1866
1867 g_free(spapr->fdt_blob);
1868 spapr->fdt_size = fdt_totalsize(fdt);
1869 spapr->fdt_initial_size = spapr->fdt_size;
1870 spapr->fdt_blob = fdt;
Cédric Le Goater13db0cd2019-01-02 06:57:42 +01001871 }
1872
Michael Roth6787d272016-10-24 23:47:29 -05001873 if (spapr->cas_reboot) {
David Gibson91462062019-08-28 11:43:15 +10001874 qemu_system_reset_request(SHUTDOWN_CAUSE_SUBSYSTEM_RESET);
Alexey Kardashevskiy2a6593c2014-05-23 12:26:54 +10001875 }
1876
1877 return H_SUCCESS;
1878}
1879
Laurent Vivierc24ba3d2018-12-19 17:35:41 +01001880static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +11001881 SpaprMachineState *spapr,
Laurent Vivierc24ba3d2018-12-19 17:35:41 +01001882 target_ulong opcode,
1883 target_ulong *args)
1884{
1885 target_ulong flags = args[0];
1886 target_ulong procno = args[1];
1887 PowerPCCPU *tcpu;
1888 int idx;
1889
1890 /* only support procno from H_REGISTER_VPA */
1891 if (flags != 0x1) {
1892 return H_FUNCTION;
1893 }
1894
1895 tcpu = spapr_find_cpu(procno);
1896 if (tcpu == NULL) {
1897 return H_P2;
1898 }
1899
1900 /* sequence is the same as in the "ibm,associativity" property */
1901
1902 idx = 0;
1903#define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
1904 ((uint64_t)(b) & 0xffffffff))
1905 args[idx++] = ASSOCIATIVITY(0, 0);
1906 args[idx++] = ASSOCIATIVITY(0, tcpu->node_id);
1907 args[idx++] = ASSOCIATIVITY(procno, -1);
1908 for ( ; idx < 6; idx++) {
1909 args[idx] = -1;
1910 }
1911#undef ASSOCIATIVITY
1912
1913 return H_SUCCESS;
1914}
1915
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001916static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu,
David Gibsonce2918c2019-03-06 15:35:37 +11001917 SpaprMachineState *spapr,
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001918 target_ulong opcode,
1919 target_ulong *args)
1920{
1921 uint64_t characteristics = H_CPU_CHAR_HON_BRANCH_HINTS &
1922 ~H_CPU_CHAR_THR_RECONF_TRIG;
1923 uint64_t behaviour = H_CPU_BEHAV_FAVOUR_SECURITY;
1924 uint8_t safe_cache = spapr_get_cap(spapr, SPAPR_CAP_CFPC);
1925 uint8_t safe_bounds_check = spapr_get_cap(spapr, SPAPR_CAP_SBBC);
1926 uint8_t safe_indirect_branch = spapr_get_cap(spapr, SPAPR_CAP_IBS);
Suraj Jitindar Singh8ff43ee2019-03-01 14:19:12 +11001927 uint8_t count_cache_flush_assist = spapr_get_cap(spapr,
1928 SPAPR_CAP_CCF_ASSIST);
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001929
1930 switch (safe_cache) {
1931 case SPAPR_CAP_WORKAROUND:
1932 characteristics |= H_CPU_CHAR_L1D_FLUSH_ORI30;
1933 characteristics |= H_CPU_CHAR_L1D_FLUSH_TRIG2;
1934 characteristics |= H_CPU_CHAR_L1D_THREAD_PRIV;
1935 behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR;
1936 break;
1937 case SPAPR_CAP_FIXED:
1938 break;
1939 default: /* broken */
1940 assert(safe_cache == SPAPR_CAP_BROKEN);
1941 behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR;
1942 break;
1943 }
1944
1945 switch (safe_bounds_check) {
1946 case SPAPR_CAP_WORKAROUND:
1947 characteristics |= H_CPU_CHAR_SPEC_BAR_ORI31;
1948 behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1949 break;
1950 case SPAPR_CAP_FIXED:
1951 break;
1952 default: /* broken */
1953 assert(safe_bounds_check == SPAPR_CAP_BROKEN);
1954 behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1955 break;
1956 }
1957
1958 switch (safe_indirect_branch) {
Suraj Jitindar Singh399b2892019-03-01 14:19:11 +11001959 case SPAPR_CAP_FIXED_NA:
1960 break;
Suraj Jitindar Singhc76c0d32018-03-01 17:38:02 +11001961 case SPAPR_CAP_FIXED_CCD:
1962 characteristics |= H_CPU_CHAR_CACHE_COUNT_DIS;
1963 break;
1964 case SPAPR_CAP_FIXED_IBS:
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001965 characteristics |= H_CPU_CHAR_BCCTRL_SERIALISED;
Greg Kurzfa86f592018-02-01 20:47:41 +01001966 break;
Suraj Jitindar Singh399b2892019-03-01 14:19:11 +11001967 case SPAPR_CAP_WORKAROUND:
1968 behaviour |= H_CPU_BEHAV_FLUSH_COUNT_CACHE;
Suraj Jitindar Singh8ff43ee2019-03-01 14:19:12 +11001969 if (count_cache_flush_assist) {
1970 characteristics |= H_CPU_CHAR_BCCTR_FLUSH_ASSIST;
1971 }
Suraj Jitindar Singh399b2892019-03-01 14:19:11 +11001972 break;
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11001973 default: /* broken */
1974 assert(safe_indirect_branch == SPAPR_CAP_BROKEN);
1975 break;
1976 }
1977
1978 args[0] = characteristics;
1979 args[1] = behaviour;
Alexey Kardashevskiyfea35ca2018-12-21 01:34:48 +01001980 return H_SUCCESS;
1981}
1982
David Gibsonce2918c2019-03-06 15:35:37 +11001983static target_ulong h_update_dt(PowerPCCPU *cpu, SpaprMachineState *spapr,
Alexey Kardashevskiyfea35ca2018-12-21 01:34:48 +01001984 target_ulong opcode, target_ulong *args)
1985{
1986 target_ulong dt = ppc64_phys_to_real(args[0]);
1987 struct fdt_header hdr = { 0 };
1988 unsigned cb;
David Gibsonce2918c2019-03-06 15:35:37 +11001989 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
Alexey Kardashevskiyfea35ca2018-12-21 01:34:48 +01001990 void *fdt;
1991
1992 cpu_physical_memory_read(dt, &hdr, sizeof(hdr));
1993 cb = fdt32_to_cpu(hdr.totalsize);
1994
1995 if (!smc->update_dt_enabled) {
1996 return H_SUCCESS;
1997 }
1998
1999 /* Check that the fdt did not grow out of proportion */
2000 if (cb > spapr->fdt_initial_size * 2) {
2001 trace_spapr_update_dt_failed_size(spapr->fdt_initial_size, cb,
2002 fdt32_to_cpu(hdr.magic));
2003 return H_PARAMETER;
2004 }
2005
2006 fdt = g_malloc0(cb);
2007 cpu_physical_memory_read(dt, fdt, cb);
2008
2009 /* Check the fdt consistency */
2010 if (fdt_check_full(fdt, cb)) {
2011 trace_spapr_update_dt_failed_check(spapr->fdt_initial_size, cb,
2012 fdt32_to_cpu(hdr.magic));
2013 return H_PARAMETER;
2014 }
2015
2016 g_free(spapr->fdt_blob);
2017 spapr->fdt_size = cb;
2018 spapr->fdt_blob = fdt;
2019 trace_spapr_update_dt(cb);
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11002020
2021 return H_SUCCESS;
2022}
2023
David Gibson7d7ba3f2011-05-10 16:06:21 +10002024static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
2025static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
Michael Roth0fb6bd02019-07-17 15:58:42 -05002026static spapr_hcall_fn svm_hypercall_table[(SVM_HCALL_MAX - SVM_HCALL_BASE) / 4 + 1];
David Gibson9fdf0c22011-04-01 15:15:20 +11002027
2028void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
2029{
David Gibson39ac8452011-04-01 15:15:23 +11002030 spapr_hcall_fn *slot;
David Gibson9fdf0c22011-04-01 15:15:20 +11002031
David Gibson39ac8452011-04-01 15:15:23 +11002032 if (opcode <= MAX_HCALL_OPCODE) {
2033 assert((opcode & 0x3) == 0);
David Gibson9fdf0c22011-04-01 15:15:20 +11002034
David Gibson39ac8452011-04-01 15:15:23 +11002035 slot = &papr_hypercall_table[opcode / 4];
Michael Roth0fb6bd02019-07-17 15:58:42 -05002036 } else if (opcode >= SVM_HCALL_BASE && opcode <= SVM_HCALL_MAX) {
2037 /* we only have SVM-related hcall numbers assigned in multiples of 4 */
2038 assert((opcode & 0x3) == 0);
2039
2040 slot = &svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4];
David Gibson39ac8452011-04-01 15:15:23 +11002041 } else {
2042 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
David Gibson9fdf0c22011-04-01 15:15:20 +11002043
David Gibson39ac8452011-04-01 15:15:23 +11002044 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
2045 }
2046
David Gibsonc89d5292012-10-08 18:17:36 +00002047 assert(!(*slot));
David Gibson39ac8452011-04-01 15:15:23 +11002048 *slot = fn;
David Gibson9fdf0c22011-04-01 15:15:20 +11002049}
2050
Andreas Färberaa100fa2012-05-03 06:13:14 +02002051target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
David Gibson9fdf0c22011-04-01 15:15:20 +11002052 target_ulong *args)
2053{
David Gibsonce2918c2019-03-06 15:35:37 +11002054 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
David Gibson28e02042015-07-02 16:23:04 +10002055
David Gibson9fdf0c22011-04-01 15:15:20 +11002056 if ((opcode <= MAX_HCALL_OPCODE)
2057 && ((opcode & 0x3) == 0)) {
David Gibson39ac8452011-04-01 15:15:23 +11002058 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
2059
2060 if (fn) {
Andreas Färberb13ce262012-05-03 06:23:01 +02002061 return fn(cpu, spapr, opcode, args);
David Gibson39ac8452011-04-01 15:15:23 +11002062 }
Michael Roth0fb6bd02019-07-17 15:58:42 -05002063 } else if ((opcode >= SVM_HCALL_BASE) &&
2064 (opcode <= SVM_HCALL_MAX)) {
2065 spapr_hcall_fn fn = svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4];
2066
2067 if (fn) {
2068 return fn(cpu, spapr, opcode, args);
2069 }
David Gibson39ac8452011-04-01 15:15:23 +11002070 } else if ((opcode >= KVMPPC_HCALL_BASE) &&
2071 (opcode <= KVMPPC_HCALL_MAX)) {
2072 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
David Gibson9fdf0c22011-04-01 15:15:20 +11002073
2074 if (fn) {
Andreas Färberb13ce262012-05-03 06:23:01 +02002075 return fn(cpu, spapr, opcode, args);
David Gibson9fdf0c22011-04-01 15:15:20 +11002076 }
2077 }
2078
Thomas Huthaaf87c62015-09-01 11:29:02 +10002079 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
2080 opcode);
David Gibson9fdf0c22011-04-01 15:15:20 +11002081 return H_FUNCTION;
2082}
David Gibsonf43e3522011-04-01 15:15:22 +11002083
Andreas Färber83f7d432012-02-09 15:20:55 +01002084static void hypercall_register_types(void)
David Gibsonf43e3522011-04-01 15:15:22 +11002085{
2086 /* hcall-pft */
2087 spapr_register_hypercall(H_ENTER, h_enter);
2088 spapr_register_hypercall(H_REMOVE, h_remove);
2089 spapr_register_hypercall(H_PROTECT, h_protect);
Erlon Cruz6bbd5dd2013-02-18 05:00:32 +00002090 spapr_register_hypercall(H_READ, h_read);
David Gibson39ac8452011-04-01 15:15:23 +11002091
David Gibsona3d0aba2011-08-31 15:50:50 +00002092 /* hcall-bulk */
2093 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
2094
David Gibson30f4b052017-05-12 15:46:11 +10002095 /* hcall-hpt-resize */
2096 spapr_register_hypercall(H_RESIZE_HPT_PREPARE, h_resize_hpt_prepare);
2097 spapr_register_hypercall(H_RESIZE_HPT_COMMIT, h_resize_hpt_commit);
2098
David Gibsoned120052011-04-01 15:15:33 +11002099 /* hcall-splpar */
2100 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
2101 spapr_register_hypercall(H_CEDE, h_cede);
Nicholas Piggine8ce0e42019-07-18 13:42:13 +10002102 spapr_register_hypercall(H_CONFER, h_confer);
Nicholas Piggin3a6e6222019-07-18 13:42:12 +10002103 spapr_register_hypercall(H_PROD, h_prod);
2104
Nicholas Piggin10741312019-07-18 13:42:14 +10002105 /* hcall-join */
2106 spapr_register_hypercall(H_JOIN, h_join);
2107
Nicholas Piggin1c7ad772016-12-05 16:50:21 +11002108 spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset);
David Gibsoned120052011-04-01 15:15:33 +11002109
Thomas Huth423576f2016-02-11 13:47:18 +01002110 /* processor register resource access h-calls */
2111 spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
Thomas Huthaf08a582016-02-11 13:47:19 +01002112 spapr_register_hypercall(H_SET_DABR, h_set_dabr);
Thomas Huthe49ff262016-02-11 13:47:20 +01002113 spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
Thomas Huth3240dd92016-02-18 10:15:54 +01002114 spapr_register_hypercall(H_PAGE_INIT, h_page_init);
Thomas Huth423576f2016-02-11 13:47:18 +01002115 spapr_register_hypercall(H_SET_MODE, h_set_mode);
2116
Suraj Jitindar Singhd77a98b2017-03-20 10:46:45 +11002117 /* In Memory Table MMU h-calls */
2118 spapr_register_hypercall(H_CLEAN_SLB, h_clean_slb);
2119 spapr_register_hypercall(H_INVALIDATE_PID, h_invalidate_pid);
2120 spapr_register_hypercall(H_REGISTER_PROC_TBL, h_register_process_table);
2121
Suraj Jitindar Singhc59704b2018-01-19 16:00:05 +11002122 /* hcall-get-cpu-characteristics */
2123 spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS,
2124 h_get_cpu_characteristics);
2125
David Gibson827200a2011-08-10 14:44:20 +00002126 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
2127 * here between the "CI" and the "CACHE" variants, they will use whatever
2128 * mapping attributes qemu is using. When using KVM, the kernel will
2129 * enforce the attributes more strongly
2130 */
2131 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
2132 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
2133 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
2134 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
2135 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
2136 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
Benjamin Herrenschmidtc73e3772012-06-18 20:21:37 +00002137 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
David Gibson827200a2011-08-10 14:44:20 +00002138
David Gibson39ac8452011-04-01 15:15:23 +11002139 /* qemu/KVM-PPC specific hcalls */
2140 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
Anton Blanchard42561bf2013-08-19 21:04:20 +10002141
Alexey Kardashevskiy2a6593c2014-05-23 12:26:54 +10002142 /* ibm,client-architecture-support support */
2143 spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
Laurent Vivierc24ba3d2018-12-19 17:35:41 +01002144
Alexey Kardashevskiyfea35ca2018-12-21 01:34:48 +01002145 spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
2146
Laurent Vivierc24ba3d2018-12-19 17:35:41 +01002147 /* Virtual Processor Home Node */
2148 spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
2149 h_home_node_associativity);
David Gibsonf43e3522011-04-01 15:15:22 +11002150}
Andreas Färber83f7d432012-02-09 15:20:55 +01002151
2152type_init(hypercall_register_types)