blob: 9ea941c6302f0b89775dd952cc8b955150d2655e [file] [log] [blame]
Yoshinori Satoe5918d72019-01-21 05:24:40 -08001/*
2 * RX translation
3 *
4 * Copyright (c) 2019 Yoshinori Sato
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "qemu/osdep.h"
20#include "qemu/bswap.h"
21#include "qemu/qemu-print.h"
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "tcg/tcg-op.h"
25#include "exec/cpu_ldst.h"
26#include "exec/helper-proto.h"
27#include "exec/helper-gen.h"
28#include "exec/translator.h"
29#include "trace-tcg.h"
30#include "exec/log.h"
31
32typedef struct DisasContext {
33 DisasContextBase base;
34 CPURXState *env;
35 uint32_t pc;
36} DisasContext;
37
38typedef struct DisasCompare {
39 TCGv value;
40 TCGv temp;
41 TCGCond cond;
42} DisasCompare;
43
Yoshinori Sato27a4a302019-01-21 05:23:56 -080044const char *rx_crname(uint8_t cr)
45{
46 static const char *cr_names[] = {
47 "psw", "pc", "usp", "fpsw", "", "", "", "",
48 "bpsw", "bpc", "isp", "fintv", "intb", "", "", ""
49 };
50 if (cr >= ARRAY_SIZE(cr_names)) {
51 return "illegal";
52 }
53 return cr_names[cr];
54}
Yoshinori Satoe5918d72019-01-21 05:24:40 -080055
56/* Target-specific values for dc->base.is_jmp. */
57#define DISAS_JUMP DISAS_TARGET_0
58#define DISAS_UPDATE DISAS_TARGET_1
59#define DISAS_EXIT DISAS_TARGET_2
60
61/* global register indexes */
62static TCGv cpu_regs[16];
63static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
64static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
65static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
66static TCGv cpu_fintv, cpu_intb, cpu_pc;
67static TCGv_i64 cpu_acc;
68
69#define cpu_sp cpu_regs[0]
70
71#include "exec/gen-icount.h"
72
73/* decoder helper */
74static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
75 int i, int n)
76{
77 while (++i <= n) {
78 uint8_t b = cpu_ldub_code(ctx->env, ctx->base.pc_next++);
79 insn |= b << (32 - i * 8);
80 }
81 return insn;
82}
83
84static uint32_t li(DisasContext *ctx, int sz)
85{
86 int32_t tmp, addr;
87 CPURXState *env = ctx->env;
88 addr = ctx->base.pc_next;
89
90 tcg_debug_assert(sz < 4);
91 switch (sz) {
92 case 1:
93 ctx->base.pc_next += 1;
94 return cpu_ldsb_code(env, addr);
95 case 2:
96 ctx->base.pc_next += 2;
97 return cpu_ldsw_code(env, addr);
98 case 3:
99 ctx->base.pc_next += 3;
100 tmp = cpu_ldsb_code(env, addr + 2) << 16;
101 tmp |= cpu_lduw_code(env, addr) & 0xffff;
102 return tmp;
103 case 0:
104 ctx->base.pc_next += 4;
105 return cpu_ldl_code(env, addr);
106 }
107 return 0;
108}
109
110static int bdsp_s(DisasContext *ctx, int d)
111{
112 /*
113 * 0 -> 8
114 * 1 -> 9
115 * 2 -> 10
116 * 3 -> 3
117 * :
118 * 7 -> 7
119 */
120 if (d < 3) {
121 d += 8;
122 }
123 return d;
124}
125
126/* Include the auto-generated decoder. */
Paolo Bonziniabff1ab2020-08-07 12:10:23 +0200127#include "decode-insns.c.inc"
Yoshinori Satoe5918d72019-01-21 05:24:40 -0800128
129void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
130{
Eduardo Habkost38688fd2020-08-25 15:20:48 -0400131 RXCPU *cpu = RX_CPU(cs);
Yoshinori Satoe5918d72019-01-21 05:24:40 -0800132 CPURXState *env = &cpu->env;
133 int i;
134 uint32_t psw;
135
136 psw = rx_cpu_pack_psw(env);
137 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n",
138 env->pc, psw);
139 for (i = 0; i < 16; i += 4) {
140 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
141 i, env->regs[i], i + 1, env->regs[i + 1],
142 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]);
143 }
144}
145
146static bool use_goto_tb(DisasContext *dc, target_ulong dest)
147{
148 if (unlikely(dc->base.singlestep_enabled)) {
149 return false;
150 } else {
151 return true;
152 }
153}
154
155static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
156{
157 if (use_goto_tb(dc, dest)) {
158 tcg_gen_goto_tb(n);
159 tcg_gen_movi_i32(cpu_pc, dest);
160 tcg_gen_exit_tb(dc->base.tb, n);
161 } else {
162 tcg_gen_movi_i32(cpu_pc, dest);
163 if (dc->base.singlestep_enabled) {
164 gen_helper_debug(cpu_env);
165 } else {
166 tcg_gen_lookup_and_goto_ptr();
167 }
168 }
169 dc->base.is_jmp = DISAS_NORETURN;
170}
171
172/* generic load wrapper */
173static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
174{
175 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
176}
177
178/* unsigned load wrapper */
179static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
180{
181 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
182}
183
184/* generic store wrapper */
185static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
186{
187 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
188}
189
190/* [ri, rb] */
191static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
192 int size, int ri, int rb)
193{
194 tcg_gen_shli_i32(mem, cpu_regs[ri], size);
195 tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
196}
197
198/* dsp[reg] */
199static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
200 int ld, int size, int reg)
201{
202 uint32_t dsp;
203
204 tcg_debug_assert(ld < 3);
205 switch (ld) {
206 case 0:
207 return cpu_regs[reg];
208 case 1:
209 dsp = cpu_ldub_code(ctx->env, ctx->base.pc_next) << size;
210 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
211 ctx->base.pc_next += 1;
212 return mem;
213 case 2:
214 dsp = cpu_lduw_code(ctx->env, ctx->base.pc_next) << size;
215 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
216 ctx->base.pc_next += 2;
217 return mem;
218 }
219 return NULL;
220}
221
222static inline MemOp mi_to_mop(unsigned mi)
223{
224 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB };
225 tcg_debug_assert(mi < 5);
226 return mop[mi];
227}
228
229/* load source operand */
230static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
231 int ld, int mi, int rs)
232{
233 TCGv addr;
234 MemOp mop;
235 if (ld < 3) {
236 mop = mi_to_mop(mi);
237 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
238 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
239 return mem;
240 } else {
241 return cpu_regs[rs];
242 }
243}
244
245/* Processor mode check */
246static int is_privileged(DisasContext *ctx, int is_exception)
247{
248 if (FIELD_EX32(ctx->base.tb->flags, PSW, PM)) {
249 if (is_exception) {
250 gen_helper_raise_privilege_violation(cpu_env);
251 }
252 return 0;
253 } else {
254 return 1;
255 }
256}
257
258/* generate QEMU condition */
259static void psw_cond(DisasCompare *dc, uint32_t cond)
260{
261 tcg_debug_assert(cond < 16);
262 switch (cond) {
263 case 0: /* z */
264 dc->cond = TCG_COND_EQ;
265 dc->value = cpu_psw_z;
266 break;
267 case 1: /* nz */
268 dc->cond = TCG_COND_NE;
269 dc->value = cpu_psw_z;
270 break;
271 case 2: /* c */
272 dc->cond = TCG_COND_NE;
273 dc->value = cpu_psw_c;
274 break;
275 case 3: /* nc */
276 dc->cond = TCG_COND_EQ;
277 dc->value = cpu_psw_c;
278 break;
279 case 4: /* gtu (C& ~Z) == 1 */
280 case 5: /* leu (C& ~Z) == 0 */
281 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0);
282 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c);
283 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ;
284 dc->value = dc->temp;
285 break;
286 case 6: /* pz (S == 0) */
287 dc->cond = TCG_COND_GE;
288 dc->value = cpu_psw_s;
289 break;
290 case 7: /* n (S == 1) */
291 dc->cond = TCG_COND_LT;
292 dc->value = cpu_psw_s;
293 break;
294 case 8: /* ge (S^O)==0 */
295 case 9: /* lt (S^O)==1 */
296 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
297 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT;
298 dc->value = dc->temp;
299 break;
300 case 10: /* gt ((S^O)|Z)==0 */
301 case 11: /* le ((S^O)|Z)==1 */
302 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
303 tcg_gen_sari_i32(dc->temp, dc->temp, 31);
304 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp);
305 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ;
306 dc->value = dc->temp;
307 break;
308 case 12: /* o */
309 dc->cond = TCG_COND_LT;
310 dc->value = cpu_psw_o;
311 break;
312 case 13: /* no */
313 dc->cond = TCG_COND_GE;
314 dc->value = cpu_psw_o;
315 break;
316 case 14: /* always true */
317 dc->cond = TCG_COND_ALWAYS;
318 dc->value = dc->temp;
319 break;
320 case 15: /* always false */
321 dc->cond = TCG_COND_NEVER;
322 dc->value = dc->temp;
323 break;
324 }
325}
326
327static void move_from_cr(TCGv ret, int cr, uint32_t pc)
328{
329 TCGv z = tcg_const_i32(0);
330 switch (cr) {
331 case 0: /* PSW */
332 gen_helper_pack_psw(ret, cpu_env);
333 break;
334 case 1: /* PC */
335 tcg_gen_movi_i32(ret, pc);
336 break;
337 case 2: /* USP */
338 tcg_gen_movcond_i32(TCG_COND_NE, ret,
339 cpu_psw_u, z, cpu_sp, cpu_usp);
340 break;
341 case 3: /* FPSW */
342 tcg_gen_mov_i32(ret, cpu_fpsw);
343 break;
344 case 8: /* BPSW */
345 tcg_gen_mov_i32(ret, cpu_bpsw);
346 break;
347 case 9: /* BPC */
348 tcg_gen_mov_i32(ret, cpu_bpc);
349 break;
350 case 10: /* ISP */
351 tcg_gen_movcond_i32(TCG_COND_EQ, ret,
352 cpu_psw_u, z, cpu_sp, cpu_isp);
353 break;
354 case 11: /* FINTV */
355 tcg_gen_mov_i32(ret, cpu_fintv);
356 break;
357 case 12: /* INTB */
358 tcg_gen_mov_i32(ret, cpu_intb);
359 break;
360 default:
361 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr);
362 /* Unimplement registers return 0 */
363 tcg_gen_movi_i32(ret, 0);
364 break;
365 }
366 tcg_temp_free(z);
367}
368
369static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
370{
371 TCGv z;
372 if (cr >= 8 && !is_privileged(ctx, 0)) {
373 /* Some control registers can only be written in privileged mode. */
374 qemu_log_mask(LOG_GUEST_ERROR,
Yoshinori Sato27a4a302019-01-21 05:23:56 -0800375 "disallow control register write %s", rx_crname(cr));
Yoshinori Satoe5918d72019-01-21 05:24:40 -0800376 return;
377 }
378 z = tcg_const_i32(0);
379 switch (cr) {
380 case 0: /* PSW */
381 gen_helper_set_psw(cpu_env, val);
382 break;
383 /* case 1: to PC not supported */
384 case 2: /* USP */
385 tcg_gen_mov_i32(cpu_usp, val);
386 tcg_gen_movcond_i32(TCG_COND_NE, cpu_sp,
387 cpu_psw_u, z, cpu_usp, cpu_sp);
388 break;
389 case 3: /* FPSW */
390 gen_helper_set_fpsw(cpu_env, val);
391 break;
392 case 8: /* BPSW */
393 tcg_gen_mov_i32(cpu_bpsw, val);
394 break;
395 case 9: /* BPC */
396 tcg_gen_mov_i32(cpu_bpc, val);
397 break;
398 case 10: /* ISP */
399 tcg_gen_mov_i32(cpu_isp, val);
400 /* if PSW.U is 0, copy isp to r0 */
401 tcg_gen_movcond_i32(TCG_COND_EQ, cpu_sp,
402 cpu_psw_u, z, cpu_isp, cpu_sp);
403 break;
404 case 11: /* FINTV */
405 tcg_gen_mov_i32(cpu_fintv, val);
406 break;
407 case 12: /* INTB */
408 tcg_gen_mov_i32(cpu_intb, val);
409 break;
410 default:
411 qemu_log_mask(LOG_GUEST_ERROR,
412 "Unimplement control register %d", cr);
413 break;
414 }
415 tcg_temp_free(z);
416}
417
418static void push(TCGv val)
419{
420 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
421 rx_gen_st(MO_32, val, cpu_sp);
422}
423
424static void pop(TCGv ret)
425{
426 rx_gen_ld(MO_32, ret, cpu_sp);
427 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
428}
429
430/* mov.<bwl> rs,dsp5[rd] */
431static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
432{
433 TCGv mem;
434 mem = tcg_temp_new();
435 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
436 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
437 tcg_temp_free(mem);
438 return true;
439}
440
441/* mov.<bwl> dsp5[rs],rd */
442static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
443{
444 TCGv mem;
445 mem = tcg_temp_new();
446 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
447 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
448 tcg_temp_free(mem);
449 return true;
450}
451
452/* mov.l #uimm4,rd */
453/* mov.l #uimm8,rd */
454/* mov.l #imm,rd */
455static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
456{
457 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm);
458 return true;
459}
460
461/* mov.<bwl> #uimm8,dsp[rd] */
462/* mov.<bwl> #imm, dsp[rd] */
463static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
464{
465 TCGv imm, mem;
466 imm = tcg_const_i32(a->imm);
467 mem = tcg_temp_new();
468 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
469 rx_gen_st(a->sz, imm, mem);
470 tcg_temp_free(imm);
471 tcg_temp_free(mem);
472 return true;
473}
474
475/* mov.<bwl> [ri,rb],rd */
476static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
477{
478 TCGv mem;
479 mem = tcg_temp_new();
480 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
481 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
482 tcg_temp_free(mem);
483 return true;
484}
485
486/* mov.<bwl> rd,[ri,rb] */
487static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
488{
489 TCGv mem;
490 mem = tcg_temp_new();
491 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
492 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
493 tcg_temp_free(mem);
494 return true;
495}
496
497/* mov.<bwl> dsp[rs],dsp[rd] */
498/* mov.<bwl> rs,dsp[rd] */
499/* mov.<bwl> dsp[rs],rd */
500/* mov.<bwl> rs,rd */
501static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
502{
503 static void (* const mov[])(TCGv ret, TCGv arg) = {
504 tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32,
505 };
506 TCGv tmp, mem, addr;
507 if (a->lds == 3 && a->ldd == 3) {
508 /* mov.<bwl> rs,rd */
509 mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
510 return true;
511 }
512
513 mem = tcg_temp_new();
514 if (a->lds == 3) {
515 /* mov.<bwl> rs,dsp[rd] */
516 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
517 rx_gen_st(a->sz, cpu_regs[a->rd], addr);
518 } else if (a->ldd == 3) {
519 /* mov.<bwl> dsp[rs],rd */
520 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
521 rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
522 } else {
523 /* mov.<bwl> dsp[rs],dsp[rd] */
524 tmp = tcg_temp_new();
525 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
526 rx_gen_ld(a->sz, tmp, addr);
527 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
528 rx_gen_st(a->sz, tmp, addr);
529 tcg_temp_free(tmp);
530 }
531 tcg_temp_free(mem);
532 return true;
533}
534
535/* mov.<bwl> rs,[rd+] */
536/* mov.<bwl> rs,[-rd] */
537static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
538{
539 TCGv val;
540 val = tcg_temp_new();
541 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
542 if (a->ad == 1) {
543 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
544 }
545 rx_gen_st(a->sz, val, cpu_regs[a->rd]);
546 if (a->ad == 0) {
547 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
548 }
549 tcg_temp_free(val);
550 return true;
551}
552
553/* mov.<bwl> [rd+],rs */
554/* mov.<bwl> [-rd],rs */
555static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
556{
557 TCGv val;
558 val = tcg_temp_new();
559 if (a->ad == 1) {
560 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
561 }
562 rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
563 if (a->ad == 0) {
564 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
565 }
566 tcg_gen_mov_i32(cpu_regs[a->rs], val);
567 tcg_temp_free(val);
568 return true;
569}
570
571/* movu.<bw> dsp5[rs],rd */
572/* movu.<bw> dsp[rs],rd */
573static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
574{
575 TCGv mem;
576 mem = tcg_temp_new();
577 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
578 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
579 tcg_temp_free(mem);
580 return true;
581}
582
583/* movu.<bw> rs,rd */
584static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
585{
586 static void (* const ext[])(TCGv ret, TCGv arg) = {
587 tcg_gen_ext8u_i32, tcg_gen_ext16u_i32,
588 };
589 ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
590 return true;
591}
592
593/* movu.<bw> [ri,rb],rd */
594static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
595{
596 TCGv mem;
597 mem = tcg_temp_new();
598 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
599 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
600 tcg_temp_free(mem);
601 return true;
602}
603
604/* movu.<bw> [rd+],rs */
605/* mov.<bw> [-rd],rs */
606static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
607{
608 TCGv val;
609 val = tcg_temp_new();
610 if (a->ad == 1) {
611 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
612 }
613 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
614 if (a->ad == 0) {
615 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
616 }
617 tcg_gen_mov_i32(cpu_regs[a->rs], val);
618 tcg_temp_free(val);
619 return true;
620}
621
622
623/* pop rd */
624static bool trans_POP(DisasContext *ctx, arg_POP *a)
625{
626 /* mov.l [r0+], rd */
627 arg_MOV_rp mov_a;
628 mov_a.rd = 0;
629 mov_a.rs = a->rd;
630 mov_a.ad = 0;
631 mov_a.sz = MO_32;
632 trans_MOV_pr(ctx, &mov_a);
633 return true;
634}
635
636/* popc cr */
637static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
638{
639 TCGv val;
640 val = tcg_temp_new();
641 pop(val);
642 move_to_cr(ctx, val, a->cr);
643 if (a->cr == 0 && is_privileged(ctx, 0)) {
644 /* PSW.I may be updated here. exit TB. */
645 ctx->base.is_jmp = DISAS_UPDATE;
646 }
647 tcg_temp_free(val);
648 return true;
649}
650
651/* popm rd-rd2 */
652static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
653{
654 int r;
655 if (a->rd == 0 || a->rd >= a->rd2) {
656 qemu_log_mask(LOG_GUEST_ERROR,
657 "Invalid register ranges r%d-r%d", a->rd, a->rd2);
658 }
659 r = a->rd;
660 while (r <= a->rd2 && r < 16) {
661 pop(cpu_regs[r++]);
662 }
663 return true;
664}
665
666
667/* push.<bwl> rs */
668static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
669{
670 TCGv val;
671 val = tcg_temp_new();
672 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
673 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
674 rx_gen_st(a->sz, val, cpu_sp);
675 tcg_temp_free(val);
676 return true;
677}
678
679/* push.<bwl> dsp[rs] */
680static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
681{
682 TCGv mem, val, addr;
683 mem = tcg_temp_new();
684 val = tcg_temp_new();
685 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
686 rx_gen_ld(a->sz, val, addr);
687 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
688 rx_gen_st(a->sz, val, cpu_sp);
689 tcg_temp_free(mem);
690 tcg_temp_free(val);
691 return true;
692}
693
694/* pushc rx */
695static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
696{
697 TCGv val;
698 val = tcg_temp_new();
699 move_from_cr(val, a->cr, ctx->pc);
700 push(val);
701 tcg_temp_free(val);
702 return true;
703}
704
705/* pushm rs-rs2 */
706static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
707{
708 int r;
709
710 if (a->rs == 0 || a->rs >= a->rs2) {
711 qemu_log_mask(LOG_GUEST_ERROR,
712 "Invalid register ranges r%d-r%d", a->rs, a->rs2);
713 }
714 r = a->rs2;
715 while (r >= a->rs && r >= 0) {
716 push(cpu_regs[r--]);
717 }
718 return true;
719}
720
721/* xchg rs,rd */
722static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
723{
724 TCGv tmp;
725 tmp = tcg_temp_new();
726 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
727 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
728 tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
729 tcg_temp_free(tmp);
730 return true;
731}
732
733/* xchg dsp[rs].<mi>,rd */
734static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
735{
736 TCGv mem, addr;
737 mem = tcg_temp_new();
738 switch (a->mi) {
739 case 0: /* dsp[rs].b */
740 case 1: /* dsp[rs].w */
741 case 2: /* dsp[rs].l */
742 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs);
743 break;
744 case 3: /* dsp[rs].uw */
745 case 4: /* dsp[rs].ub */
746 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs);
747 break;
748 default:
749 g_assert_not_reached();
750 }
751 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd],
752 0, mi_to_mop(a->mi));
753 tcg_temp_free(mem);
754 return true;
755}
756
757static inline void stcond(TCGCond cond, int rd, int imm)
758{
759 TCGv z;
760 TCGv _imm;
761 z = tcg_const_i32(0);
762 _imm = tcg_const_i32(imm);
763 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
764 _imm, cpu_regs[rd]);
765 tcg_temp_free(z);
766 tcg_temp_free(_imm);
767}
768
769/* stz #imm,rd */
770static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
771{
772 stcond(TCG_COND_EQ, a->rd, a->imm);
773 return true;
774}
775
776/* stnz #imm,rd */
777static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
778{
779 stcond(TCG_COND_NE, a->rd, a->imm);
780 return true;
781}
782
783/* sccnd.<bwl> rd */
784/* sccnd.<bwl> dsp:[rd] */
785static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
786{
787 DisasCompare dc;
788 TCGv val, mem, addr;
789 dc.temp = tcg_temp_new();
790 psw_cond(&dc, a->cd);
791 if (a->ld < 3) {
792 val = tcg_temp_new();
793 mem = tcg_temp_new();
794 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
795 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
796 rx_gen_st(a->sz, val, addr);
797 tcg_temp_free(val);
798 tcg_temp_free(mem);
799 } else {
800 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
801 }
802 tcg_temp_free(dc.temp);
803 return true;
804}
805
806/* rtsd #imm */
807static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
808{
809 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
810 pop(cpu_pc);
811 ctx->base.is_jmp = DISAS_JUMP;
812 return true;
813}
814
815/* rtsd #imm, rd-rd2 */
816static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
817{
818 int dst;
819 int adj;
820
821 if (a->rd2 >= a->rd) {
822 adj = a->imm - (a->rd2 - a->rd + 1);
823 } else {
824 adj = a->imm - (15 - a->rd + 1);
825 }
826
827 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
828 dst = a->rd;
829 while (dst <= a->rd2 && dst < 16) {
830 pop(cpu_regs[dst++]);
831 }
832 pop(cpu_pc);
833 ctx->base.is_jmp = DISAS_JUMP;
834 return true;
835}
836
837typedef void (*op2fn)(TCGv ret, TCGv arg1);
838typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
839
840static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
841{
842 opr(cpu_regs[dst], cpu_regs[src]);
843}
844
845static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
846{
847 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
848}
849
850static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
851{
852 TCGv imm = tcg_const_i32(src2);
853 opr(cpu_regs[dst], cpu_regs[src], imm);
854 tcg_temp_free(imm);
855}
856
857static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
858 int dst, int src, int ld, int mi)
859{
860 TCGv val, mem;
861 mem = tcg_temp_new();
862 val = rx_load_source(ctx, mem, ld, mi, src);
863 opr(cpu_regs[dst], cpu_regs[dst], val);
864 tcg_temp_free(mem);
865}
866
867static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
868{
869 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
870 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
871 tcg_gen_mov_i32(ret, cpu_psw_s);
872}
873
874/* and #uimm:4, rd */
875/* and #imm, rd */
876static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
877{
878 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm);
879 return true;
880}
881
882/* and dsp[rs], rd */
883/* and rs,rd */
884static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
885{
886 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi);
887 return true;
888}
889
890/* and rs,rs2,rd */
891static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
892{
893 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2);
894 return true;
895}
896
897static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
898{
899 tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
900 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
901 tcg_gen_mov_i32(ret, cpu_psw_s);
902}
903
904/* or #uimm:4, rd */
905/* or #imm, rd */
906static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
907{
908 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm);
909 return true;
910}
911
912/* or dsp[rs], rd */
913/* or rs,rd */
914static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
915{
916 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi);
917 return true;
918}
919
920/* or rs,rs2,rd */
921static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
922{
923 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2);
924 return true;
925}
926
927static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
928{
929 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
930 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
931 tcg_gen_mov_i32(ret, cpu_psw_s);
932}
933
934/* xor #imm, rd */
935static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
936{
937 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm);
938 return true;
939}
940
941/* xor dsp[rs], rd */
942/* xor rs,rd */
943static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
944{
945 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi);
946 return true;
947}
948
949static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
950{
951 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
952 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
953}
954
955/* tst #imm, rd */
956static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
957{
958 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm);
959 return true;
960}
961
962/* tst dsp[rs], rd */
963/* tst rs, rd */
964static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
965{
966 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi);
967 return true;
968}
969
970static void rx_not(TCGv ret, TCGv arg1)
971{
972 tcg_gen_not_i32(ret, arg1);
973 tcg_gen_mov_i32(cpu_psw_z, ret);
974 tcg_gen_mov_i32(cpu_psw_s, ret);
975}
976
977/* not rd */
978/* not rs, rd */
979static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
980{
981 rx_gen_op_rr(rx_not, a->rd, a->rs);
982 return true;
983}
984
985static void rx_neg(TCGv ret, TCGv arg1)
986{
987 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
988 tcg_gen_neg_i32(ret, arg1);
989 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0);
990 tcg_gen_mov_i32(cpu_psw_z, ret);
991 tcg_gen_mov_i32(cpu_psw_s, ret);
992}
993
994
995/* neg rd */
996/* neg rs, rd */
997static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
998{
999 rx_gen_op_rr(rx_neg, a->rd, a->rs);
1000 return true;
1001}
1002
1003/* ret = arg1 + arg2 + psw_c */
1004static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
1005{
1006 TCGv z;
1007 z = tcg_const_i32(0);
1008 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
1009 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
1010 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1011 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1012 tcg_gen_xor_i32(z, arg1, arg2);
1013 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1014 tcg_gen_mov_i32(ret, cpu_psw_s);
1015 tcg_temp_free(z);
1016}
1017
1018/* adc #imm, rd */
1019static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
1020{
1021 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm);
1022 return true;
1023}
1024
1025/* adc rs, rd */
1026static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
1027{
1028 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs);
1029 return true;
1030}
1031
1032/* adc dsp[rs], rd */
1033static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
1034{
1035 /* mi only 2 */
1036 if (a->mi != 2) {
1037 return false;
1038 }
1039 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi);
1040 return true;
1041}
1042
1043/* ret = arg1 + arg2 */
1044static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
1045{
1046 TCGv z;
1047 z = tcg_const_i32(0);
1048 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
1049 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1050 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1051 tcg_gen_xor_i32(z, arg1, arg2);
1052 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1053 tcg_gen_mov_i32(ret, cpu_psw_s);
1054 tcg_temp_free(z);
1055}
1056
1057/* add #uimm4, rd */
1058/* add #imm, rs, rd */
1059static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
1060{
1061 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm);
1062 return true;
1063}
1064
1065/* add rs, rd */
1066/* add dsp[rs], rd */
1067static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
1068{
1069 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi);
1070 return true;
1071}
1072
1073/* add rs, rs2, rd */
1074static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
1075{
1076 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2);
1077 return true;
1078}
1079
1080/* ret = arg1 - arg2 */
1081static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
1082{
1083 TCGv temp;
1084 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
1085 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1086 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
1087 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1088 temp = tcg_temp_new_i32();
1089 tcg_gen_xor_i32(temp, arg1, arg2);
1090 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp);
1091 tcg_temp_free_i32(temp);
Lichang Zhao97841432020-10-09 14:44:39 +08001092 /* CMP not required return */
Yoshinori Satoe5918d72019-01-21 05:24:40 -08001093 if (ret) {
1094 tcg_gen_mov_i32(ret, cpu_psw_s);
1095 }
1096}
1097static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
1098{
1099 rx_sub(NULL, arg1, arg2);
1100}
1101/* ret = arg1 - arg2 - !psw_c */
1102/* -> ret = arg1 + ~arg2 + psw_c */
1103static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
1104{
1105 TCGv temp;
1106 temp = tcg_temp_new();
1107 tcg_gen_not_i32(temp, arg2);
1108 rx_adc(ret, arg1, temp);
1109 tcg_temp_free(temp);
1110}
1111
1112/* cmp #imm4, rs2 */
1113/* cmp #imm8, rs2 */
1114/* cmp #imm, rs2 */
1115static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
1116{
1117 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm);
1118 return true;
1119}
1120
1121/* cmp rs, rs2 */
1122/* cmp dsp[rs], rs2 */
1123static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
1124{
1125 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi);
1126 return true;
1127}
1128
1129/* sub #imm4, rd */
1130static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
1131{
1132 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm);
1133 return true;
1134}
1135
1136/* sub rs, rd */
1137/* sub dsp[rs], rd */
1138static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
1139{
1140 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi);
1141 return true;
1142}
1143
1144/* sub rs2, rs, rd */
1145static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
1146{
1147 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs);
1148 return true;
1149}
1150
1151/* sbb rs, rd */
1152static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
1153{
1154 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs);
1155 return true;
1156}
1157
1158/* sbb dsp[rs], rd */
1159static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
1160{
1161 /* mi only 2 */
1162 if (a->mi != 2) {
1163 return false;
1164 }
1165 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi);
1166 return true;
1167}
1168
1169static void rx_abs(TCGv ret, TCGv arg1)
1170{
1171 TCGv neg;
1172 TCGv zero;
1173 neg = tcg_temp_new();
1174 zero = tcg_const_i32(0);
1175 tcg_gen_neg_i32(neg, arg1);
1176 tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1);
1177 tcg_temp_free(neg);
1178 tcg_temp_free(zero);
1179}
1180
1181/* abs rd */
1182/* abs rs, rd */
1183static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
1184{
1185 rx_gen_op_rr(rx_abs, a->rd, a->rs);
1186 return true;
1187}
1188
1189/* max #imm, rd */
1190static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
1191{
1192 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm);
1193 return true;
1194}
1195
1196/* max rs, rd */
1197/* max dsp[rs], rd */
1198static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
1199{
1200 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1201 return true;
1202}
1203
1204/* min #imm, rd */
1205static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
1206{
1207 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm);
1208 return true;
1209}
1210
1211/* min rs, rd */
1212/* min dsp[rs], rd */
1213static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
1214{
1215 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1216 return true;
1217}
1218
1219/* mul #uimm4, rd */
1220/* mul #imm, rd */
1221static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
1222{
1223 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm);
1224 return true;
1225}
1226
1227/* mul rs, rd */
1228/* mul dsp[rs], rd */
1229static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
1230{
1231 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1232 return true;
1233}
1234
1235/* mul rs, rs2, rd */
1236static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
1237{
1238 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2);
1239 return true;
1240}
1241
1242/* emul #imm, rd */
1243static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
1244{
1245 TCGv imm = tcg_const_i32(a->imm);
1246 if (a->rd > 14) {
1247 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1248 }
1249 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1250 cpu_regs[a->rd], imm);
1251 tcg_temp_free(imm);
1252 return true;
1253}
1254
1255/* emul rs, rd */
1256/* emul dsp[rs], rd */
1257static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
1258{
1259 TCGv val, mem;
1260 if (a->rd > 14) {
1261 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1262 }
1263 mem = tcg_temp_new();
1264 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1265 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1266 cpu_regs[a->rd], val);
1267 tcg_temp_free(mem);
1268 return true;
1269}
1270
1271/* emulu #imm, rd */
1272static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
1273{
1274 TCGv imm = tcg_const_i32(a->imm);
1275 if (a->rd > 14) {
1276 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1277 }
1278 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1279 cpu_regs[a->rd], imm);
1280 tcg_temp_free(imm);
1281 return true;
1282}
1283
1284/* emulu rs, rd */
1285/* emulu dsp[rs], rd */
1286static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
1287{
1288 TCGv val, mem;
1289 if (a->rd > 14) {
1290 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1291 }
1292 mem = tcg_temp_new();
1293 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1294 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1295 cpu_regs[a->rd], val);
1296 tcg_temp_free(mem);
1297 return true;
1298}
1299
1300static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
1301{
1302 gen_helper_div(ret, cpu_env, arg1, arg2);
1303}
1304
1305static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
1306{
1307 gen_helper_divu(ret, cpu_env, arg1, arg2);
1308}
1309
1310/* div #imm, rd */
1311static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
1312{
1313 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm);
1314 return true;
1315}
1316
1317/* div rs, rd */
1318/* div dsp[rs], rd */
1319static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
1320{
1321 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi);
1322 return true;
1323}
1324
1325/* divu #imm, rd */
1326static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
1327{
1328 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm);
1329 return true;
1330}
1331
1332/* divu rs, rd */
1333/* divu dsp[rs], rd */
1334static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
1335{
1336 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi);
1337 return true;
1338}
1339
1340
1341/* shll #imm:5, rd */
1342/* shll #imm:5, rs2, rd */
1343static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
1344{
1345 TCGv tmp;
1346 tmp = tcg_temp_new();
1347 if (a->imm) {
1348 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
1349 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
1350 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1351 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1352 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1353 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1354 } else {
1355 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]);
1356 tcg_gen_movi_i32(cpu_psw_c, 0);
1357 tcg_gen_movi_i32(cpu_psw_o, 0);
1358 }
1359 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1360 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1361 return true;
1362}
1363
1364/* shll rs, rd */
1365static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
1366{
1367 TCGLabel *noshift, *done;
1368 TCGv count, tmp;
1369
1370 noshift = gen_new_label();
1371 done = gen_new_label();
1372 /* if (cpu_regs[a->rs]) { */
1373 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
1374 count = tcg_const_i32(32);
1375 tmp = tcg_temp_new();
1376 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
1377 tcg_gen_sub_i32(count, count, tmp);
1378 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
1379 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1380 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1381 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1382 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1383 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1384 tcg_gen_br(done);
1385 /* } else { */
1386 gen_set_label(noshift);
1387 tcg_gen_movi_i32(cpu_psw_c, 0);
1388 tcg_gen_movi_i32(cpu_psw_o, 0);
1389 /* } */
1390 gen_set_label(done);
1391 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1392 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1393 tcg_temp_free(count);
1394 tcg_temp_free(tmp);
1395 return true;
1396}
1397
1398static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
1399 unsigned int alith)
1400{
1401 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1402 tcg_gen_shri_i32, tcg_gen_sari_i32,
1403 };
1404 tcg_debug_assert(alith < 2);
1405 if (imm) {
1406 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1);
1407 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1408 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1409 } else {
1410 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]);
1411 tcg_gen_movi_i32(cpu_psw_c, 0);
1412 }
1413 tcg_gen_movi_i32(cpu_psw_o, 0);
1414 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1415 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1416}
1417
1418static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
1419{
1420 TCGLabel *noshift, *done;
1421 TCGv count;
1422 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1423 tcg_gen_shri_i32, tcg_gen_sari_i32,
1424 };
1425 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
1426 tcg_gen_shr_i32, tcg_gen_sar_i32,
1427 };
1428 tcg_debug_assert(alith < 2);
1429 noshift = gen_new_label();
1430 done = gen_new_label();
1431 count = tcg_temp_new();
1432 /* if (cpu_regs[rs]) { */
1433 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
1434 tcg_gen_andi_i32(count, cpu_regs[rs], 31);
1435 tcg_gen_subi_i32(count, count, 1);
1436 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count);
1437 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1438 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1439 tcg_gen_br(done);
1440 /* } else { */
1441 gen_set_label(noshift);
1442 tcg_gen_movi_i32(cpu_psw_c, 0);
1443 /* } */
1444 gen_set_label(done);
1445 tcg_gen_movi_i32(cpu_psw_o, 0);
1446 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1447 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1448 tcg_temp_free(count);
1449}
1450
1451/* shar #imm:5, rd */
1452/* shar #imm:5, rs2, rd */
1453static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
1454{
1455 shiftr_imm(a->rd, a->rs2, a->imm, 1);
1456 return true;
1457}
1458
1459/* shar rs, rd */
1460static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
1461{
1462 shiftr_reg(a->rd, a->rs, 1);
1463 return true;
1464}
1465
1466/* shlr #imm:5, rd */
1467/* shlr #imm:5, rs2, rd */
1468static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
1469{
1470 shiftr_imm(a->rd, a->rs2, a->imm, 0);
1471 return true;
1472}
1473
1474/* shlr rs, rd */
1475static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
1476{
1477 shiftr_reg(a->rd, a->rs, 0);
1478 return true;
1479}
1480
1481/* rolc rd */
1482static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
1483{
1484 TCGv tmp;
1485 tmp = tcg_temp_new();
1486 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
1487 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1488 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1489 tcg_gen_mov_i32(cpu_psw_c, tmp);
1490 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1491 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1492 tcg_temp_free(tmp);
1493 return true;
1494}
1495
1496/* rorc rd */
1497static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
1498{
1499 TCGv tmp;
1500 tmp = tcg_temp_new();
1501 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
1502 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1503 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
1504 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1505 tcg_gen_mov_i32(cpu_psw_c, tmp);
1506 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1507 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1508 return true;
1509}
1510
1511enum {ROTR = 0, ROTL = 1};
1512enum {ROT_IMM = 0, ROT_REG = 1};
1513static inline void rx_rot(int ir, int dir, int rd, int src)
1514{
1515 switch (dir) {
1516 case ROTL:
1517 if (ir == ROT_IMM) {
1518 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src);
1519 } else {
1520 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1521 }
1522 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1523 break;
1524 case ROTR:
1525 if (ir == ROT_IMM) {
1526 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src);
1527 } else {
1528 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1529 }
1530 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31);
1531 break;
1532 }
1533 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1534 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1535}
1536
1537/* rotl #imm, rd */
1538static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
1539{
1540 rx_rot(ROT_IMM, ROTL, a->rd, a->imm);
1541 return true;
1542}
1543
1544/* rotl rs, rd */
1545static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
1546{
1547 rx_rot(ROT_REG, ROTL, a->rd, a->rs);
1548 return true;
1549}
1550
1551/* rotr #imm, rd */
1552static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
1553{
1554 rx_rot(ROT_IMM, ROTR, a->rd, a->imm);
1555 return true;
1556}
1557
1558/* rotr rs, rd */
1559static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
1560{
1561 rx_rot(ROT_REG, ROTR, a->rd, a->rs);
1562 return true;
1563}
1564
1565/* revl rs, rd */
1566static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
1567{
1568 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]);
1569 return true;
1570}
1571
1572/* revw rs, rd */
1573static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
1574{
1575 TCGv tmp;
1576 tmp = tcg_temp_new();
1577 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
1578 tcg_gen_shli_i32(tmp, tmp, 8);
1579 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
1580 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff);
1581 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1582 tcg_temp_free(tmp);
1583 return true;
1584}
1585
1586/* conditional branch helper */
1587static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
1588{
1589 DisasCompare dc;
1590 TCGLabel *t, *done;
1591
1592 switch (cd) {
1593 case 0 ... 13:
1594 dc.temp = tcg_temp_new();
1595 psw_cond(&dc, cd);
1596 t = gen_new_label();
1597 done = gen_new_label();
1598 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t);
1599 gen_goto_tb(ctx, 0, ctx->base.pc_next);
1600 tcg_gen_br(done);
1601 gen_set_label(t);
1602 gen_goto_tb(ctx, 1, ctx->pc + dst);
1603 gen_set_label(done);
1604 tcg_temp_free(dc.temp);
1605 break;
1606 case 14:
1607 /* always true case */
1608 gen_goto_tb(ctx, 0, ctx->pc + dst);
1609 break;
1610 case 15:
1611 /* always false case */
1612 /* Nothing do */
1613 break;
1614 }
1615}
1616
1617/* beq dsp:3 / bne dsp:3 */
1618/* beq dsp:8 / bne dsp:8 */
1619/* bc dsp:8 / bnc dsp:8 */
1620/* bgtu dsp:8 / bleu dsp:8 */
1621/* bpz dsp:8 / bn dsp:8 */
1622/* bge dsp:8 / blt dsp:8 */
1623/* bgt dsp:8 / ble dsp:8 */
1624/* bo dsp:8 / bno dsp:8 */
1625/* beq dsp:16 / bne dsp:16 */
1626static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
1627{
1628 rx_bcnd_main(ctx, a->cd, a->dsp);
1629 return true;
1630}
1631
1632/* bra dsp:3 */
1633/* bra dsp:8 */
1634/* bra dsp:16 */
1635/* bra dsp:24 */
1636static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
1637{
1638 rx_bcnd_main(ctx, 14, a->dsp);
1639 return true;
1640}
1641
1642/* bra rs */
1643static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
1644{
1645 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1646 ctx->base.is_jmp = DISAS_JUMP;
1647 return true;
1648}
1649
1650static inline void rx_save_pc(DisasContext *ctx)
1651{
1652 TCGv pc = tcg_const_i32(ctx->base.pc_next);
1653 push(pc);
1654 tcg_temp_free(pc);
1655}
1656
1657/* jmp rs */
1658static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
1659{
1660 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1661 ctx->base.is_jmp = DISAS_JUMP;
1662 return true;
1663}
1664
1665/* jsr rs */
1666static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
1667{
1668 rx_save_pc(ctx);
1669 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1670 ctx->base.is_jmp = DISAS_JUMP;
1671 return true;
1672}
1673
1674/* bsr dsp:16 */
1675/* bsr dsp:24 */
1676static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
1677{
1678 rx_save_pc(ctx);
1679 rx_bcnd_main(ctx, 14, a->dsp);
1680 return true;
1681}
1682
1683/* bsr rs */
1684static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
1685{
1686 rx_save_pc(ctx);
1687 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1688 ctx->base.is_jmp = DISAS_JUMP;
1689 return true;
1690}
1691
1692/* rts */
1693static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
1694{
1695 pop(cpu_pc);
1696 ctx->base.is_jmp = DISAS_JUMP;
1697 return true;
1698}
1699
1700/* nop */
1701static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
1702{
1703 return true;
1704}
1705
1706/* scmpu */
1707static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
1708{
1709 gen_helper_scmpu(cpu_env);
1710 return true;
1711}
1712
1713/* smovu */
1714static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
1715{
1716 gen_helper_smovu(cpu_env);
1717 return true;
1718}
1719
1720/* smovf */
1721static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
1722{
1723 gen_helper_smovf(cpu_env);
1724 return true;
1725}
1726
1727/* smovb */
1728static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
1729{
1730 gen_helper_smovb(cpu_env);
1731 return true;
1732}
1733
1734#define STRING(op) \
1735 do { \
1736 TCGv size = tcg_const_i32(a->sz); \
1737 gen_helper_##op(cpu_env, size); \
1738 tcg_temp_free(size); \
1739 } while (0)
1740
1741/* suntile.<bwl> */
1742static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
1743{
1744 STRING(suntil);
1745 return true;
1746}
1747
1748/* swhile.<bwl> */
1749static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
1750{
1751 STRING(swhile);
1752 return true;
1753}
1754/* sstr.<bwl> */
1755static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
1756{
1757 STRING(sstr);
1758 return true;
1759}
1760
1761/* rmpa.<bwl> */
1762static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
1763{
1764 STRING(rmpa);
1765 return true;
1766}
1767
1768static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2)
1769{
1770 TCGv_i64 tmp0, tmp1;
1771 tmp0 = tcg_temp_new_i64();
1772 tmp1 = tcg_temp_new_i64();
1773 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1774 tcg_gen_sari_i64(tmp0, tmp0, 16);
1775 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1776 tcg_gen_sari_i64(tmp1, tmp1, 16);
1777 tcg_gen_mul_i64(ret, tmp0, tmp1);
1778 tcg_gen_shli_i64(ret, ret, 16);
1779 tcg_temp_free_i64(tmp0);
1780 tcg_temp_free_i64(tmp1);
1781}
1782
1783static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2)
1784{
1785 TCGv_i64 tmp0, tmp1;
1786 tmp0 = tcg_temp_new_i64();
1787 tmp1 = tcg_temp_new_i64();
1788 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1789 tcg_gen_ext16s_i64(tmp0, tmp0);
1790 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1791 tcg_gen_ext16s_i64(tmp1, tmp1);
1792 tcg_gen_mul_i64(ret, tmp0, tmp1);
1793 tcg_gen_shli_i64(ret, ret, 16);
1794 tcg_temp_free_i64(tmp0);
1795 tcg_temp_free_i64(tmp1);
1796}
1797
1798/* mulhi rs,rs2 */
1799static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
1800{
1801 rx_mul64hi(cpu_acc, a->rs, a->rs2);
1802 return true;
1803}
1804
1805/* mullo rs,rs2 */
1806static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
1807{
1808 rx_mul64lo(cpu_acc, a->rs, a->rs2);
1809 return true;
1810}
1811
1812/* machi rs,rs2 */
1813static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
1814{
1815 TCGv_i64 tmp;
1816 tmp = tcg_temp_new_i64();
1817 rx_mul64hi(tmp, a->rs, a->rs2);
1818 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1819 tcg_temp_free_i64(tmp);
1820 return true;
1821}
1822
1823/* maclo rs,rs2 */
1824static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
1825{
1826 TCGv_i64 tmp;
1827 tmp = tcg_temp_new_i64();
1828 rx_mul64lo(tmp, a->rs, a->rs2);
1829 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1830 tcg_temp_free_i64(tmp);
1831 return true;
1832}
1833
1834/* mvfachi rd */
1835static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
1836{
1837 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc);
1838 return true;
1839}
1840
1841/* mvfacmi rd */
1842static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
1843{
1844 TCGv_i64 rd64;
1845 rd64 = tcg_temp_new_i64();
1846 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32);
1847 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64);
1848 tcg_temp_free_i64(rd64);
1849 return true;
1850}
1851
1852/* mvtachi rs */
1853static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
1854{
1855 TCGv_i64 rs64;
1856 rs64 = tcg_temp_new_i64();
1857 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1858 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32);
1859 tcg_temp_free_i64(rs64);
1860 return true;
1861}
1862
1863/* mvtaclo rs */
1864static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
1865{
1866 TCGv_i64 rs64;
1867 rs64 = tcg_temp_new_i64();
1868 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1869 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32);
1870 tcg_temp_free_i64(rs64);
1871 return true;
1872}
1873
1874/* racw #imm */
1875static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
1876{
1877 TCGv imm = tcg_const_i32(a->imm + 1);
1878 gen_helper_racw(cpu_env, imm);
1879 tcg_temp_free(imm);
1880 return true;
1881}
1882
1883/* sat rd */
1884static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
1885{
1886 TCGv tmp, z;
1887 tmp = tcg_temp_new();
1888 z = tcg_const_i32(0);
1889 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1890 tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
1891 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1892 tcg_gen_xori_i32(tmp, tmp, 0x80000000);
1893 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd],
1894 cpu_psw_o, z, tmp, cpu_regs[a->rd]);
1895 tcg_temp_free(tmp);
1896 tcg_temp_free(z);
1897 return true;
1898}
1899
1900/* satr */
1901static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
1902{
1903 gen_helper_satr(cpu_env);
1904 return true;
1905}
1906
1907#define cat3(a, b, c) a##b##c
1908#define FOP(name, op) \
1909 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1910 cat3(arg_, name, _ir) * a) \
1911 { \
1912 TCGv imm = tcg_const_i32(li(ctx, 0)); \
1913 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1914 cpu_regs[a->rd], imm); \
1915 tcg_temp_free(imm); \
1916 return true; \
1917 } \
1918 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1919 cat3(arg_, name, _mr) * a) \
1920 { \
1921 TCGv val, mem; \
1922 mem = tcg_temp_new(); \
1923 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1924 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1925 cpu_regs[a->rd], val); \
1926 tcg_temp_free(mem); \
1927 return true; \
1928 }
1929
1930#define FCONVOP(name, op) \
1931 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1932 { \
1933 TCGv val, mem; \
1934 mem = tcg_temp_new(); \
1935 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1936 gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \
1937 tcg_temp_free(mem); \
1938 return true; \
1939 }
1940
1941FOP(FADD, fadd)
1942FOP(FSUB, fsub)
1943FOP(FMUL, fmul)
1944FOP(FDIV, fdiv)
1945
1946/* fcmp #imm, rd */
1947static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
1948{
1949 TCGv imm = tcg_const_i32(li(ctx, 0));
1950 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm);
1951 tcg_temp_free(imm);
1952 return true;
1953}
1954
1955/* fcmp dsp[rs], rd */
1956/* fcmp rs, rd */
1957static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
1958{
1959 TCGv val, mem;
1960 mem = tcg_temp_new();
1961 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
1962 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val);
1963 tcg_temp_free(mem);
1964 return true;
1965}
1966
1967FCONVOP(FTOI, ftoi)
1968FCONVOP(ROUND, round)
1969
1970/* itof rs, rd */
1971/* itof dsp[rs], rd */
1972static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
1973{
1974 TCGv val, mem;
1975 mem = tcg_temp_new();
1976 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1977 gen_helper_itof(cpu_regs[a->rd], cpu_env, val);
1978 tcg_temp_free(mem);
1979 return true;
1980}
1981
1982static void rx_bsetm(TCGv mem, TCGv mask)
1983{
1984 TCGv val;
1985 val = tcg_temp_new();
1986 rx_gen_ld(MO_8, val, mem);
1987 tcg_gen_or_i32(val, val, mask);
1988 rx_gen_st(MO_8, val, mem);
1989 tcg_temp_free(val);
1990}
1991
1992static void rx_bclrm(TCGv mem, TCGv mask)
1993{
1994 TCGv val;
1995 val = tcg_temp_new();
1996 rx_gen_ld(MO_8, val, mem);
1997 tcg_gen_andc_i32(val, val, mask);
1998 rx_gen_st(MO_8, val, mem);
1999 tcg_temp_free(val);
2000}
2001
2002static void rx_btstm(TCGv mem, TCGv mask)
2003{
2004 TCGv val;
2005 val = tcg_temp_new();
2006 rx_gen_ld(MO_8, val, mem);
2007 tcg_gen_and_i32(val, val, mask);
2008 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
2009 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
2010 tcg_temp_free(val);
2011}
2012
2013static void rx_bnotm(TCGv mem, TCGv mask)
2014{
2015 TCGv val;
2016 val = tcg_temp_new();
2017 rx_gen_ld(MO_8, val, mem);
2018 tcg_gen_xor_i32(val, val, mask);
2019 rx_gen_st(MO_8, val, mem);
2020 tcg_temp_free(val);
2021}
2022
2023static void rx_bsetr(TCGv reg, TCGv mask)
2024{
2025 tcg_gen_or_i32(reg, reg, mask);
2026}
2027
2028static void rx_bclrr(TCGv reg, TCGv mask)
2029{
2030 tcg_gen_andc_i32(reg, reg, mask);
2031}
2032
2033static inline void rx_btstr(TCGv reg, TCGv mask)
2034{
2035 TCGv t0;
2036 t0 = tcg_temp_new();
2037 tcg_gen_and_i32(t0, reg, mask);
2038 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
2039 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
2040 tcg_temp_free(t0);
2041}
2042
2043static inline void rx_bnotr(TCGv reg, TCGv mask)
2044{
2045 tcg_gen_xor_i32(reg, reg, mask);
2046}
2047
2048#define BITOP(name, op) \
2049 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
2050 cat3(arg_, name, _im) * a) \
2051 { \
2052 TCGv mask, mem, addr; \
2053 mem = tcg_temp_new(); \
2054 mask = tcg_const_i32(1 << a->imm); \
2055 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2056 cat3(rx_, op, m)(addr, mask); \
2057 tcg_temp_free(mask); \
2058 tcg_temp_free(mem); \
2059 return true; \
2060 } \
2061 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
2062 cat3(arg_, name, _ir) * a) \
2063 { \
2064 TCGv mask; \
2065 mask = tcg_const_i32(1 << a->imm); \
2066 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2067 tcg_temp_free(mask); \
2068 return true; \
2069 } \
2070 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
2071 cat3(arg_, name, _rr) * a) \
2072 { \
2073 TCGv mask, b; \
2074 mask = tcg_const_i32(1); \
2075 b = tcg_temp_new(); \
2076 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
2077 tcg_gen_shl_i32(mask, mask, b); \
2078 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2079 tcg_temp_free(mask); \
2080 tcg_temp_free(b); \
2081 return true; \
2082 } \
2083 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
2084 cat3(arg_, name, _rm) * a) \
2085 { \
2086 TCGv mask, mem, addr, b; \
2087 mask = tcg_const_i32(1); \
2088 b = tcg_temp_new(); \
2089 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
2090 tcg_gen_shl_i32(mask, mask, b); \
2091 mem = tcg_temp_new(); \
2092 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2093 cat3(rx_, op, m)(addr, mask); \
2094 tcg_temp_free(mem); \
2095 tcg_temp_free(mask); \
2096 tcg_temp_free(b); \
2097 return true; \
2098 }
2099
2100BITOP(BSET, bset)
2101BITOP(BCLR, bclr)
2102BITOP(BTST, btst)
2103BITOP(BNOT, bnot)
2104
2105static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
2106{
2107 TCGv bit;
2108 DisasCompare dc;
2109 dc.temp = tcg_temp_new();
2110 bit = tcg_temp_new();
2111 psw_cond(&dc, cond);
2112 tcg_gen_andi_i32(val, val, ~(1 << pos));
2113 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
2114 tcg_gen_deposit_i32(val, val, bit, pos, 1);
2115 tcg_temp_free(bit);
2116 tcg_temp_free(dc.temp);
2117 }
2118
2119/* bmcnd #imm, dsp[rd] */
2120static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
2121{
2122 TCGv val, mem, addr;
2123 val = tcg_temp_new();
2124 mem = tcg_temp_new();
2125 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
2126 rx_gen_ld(MO_8, val, addr);
2127 bmcnd_op(val, a->cd, a->imm);
2128 rx_gen_st(MO_8, val, addr);
2129 tcg_temp_free(val);
2130 tcg_temp_free(mem);
2131 return true;
2132}
2133
2134/* bmcond #imm, rd */
2135static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
2136{
2137 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm);
2138 return true;
2139}
2140
2141enum {
2142 PSW_C = 0,
2143 PSW_Z = 1,
2144 PSW_S = 2,
2145 PSW_O = 3,
2146 PSW_I = 8,
2147 PSW_U = 9,
2148};
2149
2150static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
2151{
2152 if (cb < 8) {
2153 switch (cb) {
2154 case PSW_C:
2155 tcg_gen_movi_i32(cpu_psw_c, val);
2156 break;
2157 case PSW_Z:
2158 tcg_gen_movi_i32(cpu_psw_z, val == 0);
2159 break;
2160 case PSW_S:
2161 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0);
2162 break;
2163 case PSW_O:
2164 tcg_gen_movi_i32(cpu_psw_o, val << 31);
2165 break;
2166 default:
2167 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2168 break;
2169 }
2170 } else if (is_privileged(ctx, 0)) {
2171 switch (cb) {
2172 case PSW_I:
2173 tcg_gen_movi_i32(cpu_psw_i, val);
2174 ctx->base.is_jmp = DISAS_UPDATE;
2175 break;
2176 case PSW_U:
2177 tcg_gen_movi_i32(cpu_psw_u, val);
2178 break;
2179 default:
2180 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2181 break;
2182 }
2183 }
2184}
2185
2186/* clrpsw psw */
2187static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
2188{
2189 clrsetpsw(ctx, a->cb, 0);
2190 return true;
2191}
2192
2193/* setpsw psw */
2194static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
2195{
2196 clrsetpsw(ctx, a->cb, 1);
2197 return true;
2198}
2199
2200/* mvtipl #imm */
2201static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
2202{
2203 if (is_privileged(ctx, 1)) {
2204 tcg_gen_movi_i32(cpu_psw_ipl, a->imm);
2205 ctx->base.is_jmp = DISAS_UPDATE;
2206 }
2207 return true;
2208}
2209
2210/* mvtc #imm, rd */
2211static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
2212{
2213 TCGv imm;
2214
2215 imm = tcg_const_i32(a->imm);
2216 move_to_cr(ctx, imm, a->cr);
2217 if (a->cr == 0 && is_privileged(ctx, 0)) {
2218 ctx->base.is_jmp = DISAS_UPDATE;
2219 }
2220 tcg_temp_free(imm);
2221 return true;
2222}
2223
2224/* mvtc rs, rd */
2225static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
2226{
2227 move_to_cr(ctx, cpu_regs[a->rs], a->cr);
2228 if (a->cr == 0 && is_privileged(ctx, 0)) {
2229 ctx->base.is_jmp = DISAS_UPDATE;
2230 }
2231 return true;
2232}
2233
2234/* mvfc rs, rd */
2235static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
2236{
2237 move_from_cr(cpu_regs[a->rd], a->cr, ctx->pc);
2238 return true;
2239}
2240
2241/* rtfi */
2242static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
2243{
2244 TCGv psw;
2245 if (is_privileged(ctx, 1)) {
2246 psw = tcg_temp_new();
2247 tcg_gen_mov_i32(cpu_pc, cpu_bpc);
2248 tcg_gen_mov_i32(psw, cpu_bpsw);
2249 gen_helper_set_psw_rte(cpu_env, psw);
2250 ctx->base.is_jmp = DISAS_EXIT;
2251 tcg_temp_free(psw);
2252 }
2253 return true;
2254}
2255
2256/* rte */
2257static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
2258{
2259 TCGv psw;
2260 if (is_privileged(ctx, 1)) {
2261 psw = tcg_temp_new();
2262 pop(cpu_pc);
2263 pop(psw);
2264 gen_helper_set_psw_rte(cpu_env, psw);
2265 ctx->base.is_jmp = DISAS_EXIT;
2266 tcg_temp_free(psw);
2267 }
2268 return true;
2269}
2270
2271/* brk */
2272static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
2273{
2274 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2275 gen_helper_rxbrk(cpu_env);
2276 ctx->base.is_jmp = DISAS_NORETURN;
2277 return true;
2278}
2279
2280/* int #imm */
2281static bool trans_INT(DisasContext *ctx, arg_INT *a)
2282{
2283 TCGv vec;
2284
2285 tcg_debug_assert(a->imm < 0x100);
2286 vec = tcg_const_i32(a->imm);
2287 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2288 gen_helper_rxint(cpu_env, vec);
2289 tcg_temp_free(vec);
2290 ctx->base.is_jmp = DISAS_NORETURN;
2291 return true;
2292}
2293
2294/* wait */
2295static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
2296{
2297 if (is_privileged(ctx, 1)) {
2298 tcg_gen_addi_i32(cpu_pc, cpu_pc, 2);
2299 gen_helper_wait(cpu_env);
2300 }
2301 return true;
2302}
2303
2304static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2305{
2306 CPURXState *env = cs->env_ptr;
2307 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2308 ctx->env = env;
2309}
2310
2311static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2312{
2313}
2314
2315static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2316{
2317 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2318
2319 tcg_gen_insn_start(ctx->base.pc_next);
2320}
2321
2322static bool rx_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
2323 const CPUBreakpoint *bp)
2324{
2325 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2326
2327 /* We have hit a breakpoint - make sure PC is up-to-date */
2328 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2329 gen_helper_debug(cpu_env);
2330 ctx->base.is_jmp = DISAS_NORETURN;
2331 ctx->base.pc_next += 1;
2332 return true;
2333}
2334
2335static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2336{
2337 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2338 uint32_t insn;
2339
2340 ctx->pc = ctx->base.pc_next;
2341 insn = decode_load(ctx);
2342 if (!decode(ctx, insn)) {
2343 gen_helper_raise_illegal_instruction(cpu_env);
2344 }
2345}
2346
2347static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2348{
2349 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2350
2351 switch (ctx->base.is_jmp) {
2352 case DISAS_NEXT:
2353 case DISAS_TOO_MANY:
2354 gen_goto_tb(ctx, 0, dcbase->pc_next);
2355 break;
2356 case DISAS_JUMP:
2357 if (ctx->base.singlestep_enabled) {
2358 gen_helper_debug(cpu_env);
2359 } else {
2360 tcg_gen_lookup_and_goto_ptr();
2361 }
2362 break;
2363 case DISAS_UPDATE:
2364 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
Philippe Mathieu-Daudé40bd0502020-04-03 20:44:19 +02002365 /* fall through */
Yoshinori Satoe5918d72019-01-21 05:24:40 -08002366 case DISAS_EXIT:
2367 tcg_gen_exit_tb(NULL, 0);
2368 break;
2369 case DISAS_NORETURN:
2370 break;
2371 default:
2372 g_assert_not_reached();
2373 }
2374}
2375
2376static void rx_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
2377{
2378 qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
2379 log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
2380}
2381
2382static const TranslatorOps rx_tr_ops = {
2383 .init_disas_context = rx_tr_init_disas_context,
2384 .tb_start = rx_tr_tb_start,
2385 .insn_start = rx_tr_insn_start,
2386 .breakpoint_check = rx_tr_breakpoint_check,
2387 .translate_insn = rx_tr_translate_insn,
2388 .tb_stop = rx_tr_tb_stop,
2389 .disas_log = rx_tr_disas_log,
2390};
2391
2392void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
2393{
2394 DisasContext dc;
2395
2396 translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
2397}
2398
2399void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
2400 target_ulong *data)
2401{
2402 env->pc = data[0];
2403}
2404
2405#define ALLOC_REGISTER(sym, name) \
2406 cpu_##sym = tcg_global_mem_new_i32(cpu_env, \
2407 offsetof(CPURXState, sym), name)
2408
2409void rx_translate_init(void)
2410{
2411 static const char * const regnames[NUM_REGS] = {
2412 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2413 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2414 };
2415 int i;
2416
2417 for (i = 0; i < NUM_REGS; i++) {
2418 cpu_regs[i] = tcg_global_mem_new_i32(cpu_env,
2419 offsetof(CPURXState, regs[i]),
2420 regnames[i]);
2421 }
2422 ALLOC_REGISTER(pc, "PC");
2423 ALLOC_REGISTER(psw_o, "PSW(O)");
2424 ALLOC_REGISTER(psw_s, "PSW(S)");
2425 ALLOC_REGISTER(psw_z, "PSW(Z)");
2426 ALLOC_REGISTER(psw_c, "PSW(C)");
2427 ALLOC_REGISTER(psw_u, "PSW(U)");
2428 ALLOC_REGISTER(psw_i, "PSW(I)");
2429 ALLOC_REGISTER(psw_pm, "PSW(PM)");
2430 ALLOC_REGISTER(psw_ipl, "PSW(IPL)");
2431 ALLOC_REGISTER(usp, "USP");
2432 ALLOC_REGISTER(fpsw, "FPSW");
2433 ALLOC_REGISTER(bpsw, "BPSW");
2434 ALLOC_REGISTER(bpc, "BPC");
2435 ALLOC_REGISTER(isp, "ISP");
2436 ALLOC_REGISTER(fintv, "FINTV");
2437 ALLOC_REGISTER(intb, "INTB");
2438 cpu_acc = tcg_global_mem_new_i64(cpu_env,
2439 offsetof(CPURXState, acc), "ACC");
2440}