summaryrefslogtreecommitdiffstatshomepage
path: root/src/emu/cpu/sh2/sh2drc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/emu/cpu/sh2/sh2drc.c')
-rw-r--r--src/emu/cpu/sh2/sh2drc.c1574
1 files changed, 787 insertions, 787 deletions
diff --git a/src/emu/cpu/sh2/sh2drc.c b/src/emu/cpu/sh2/sh2drc.c
index 4f0eff81f5d..808e1321d57 100644
--- a/src/emu/cpu/sh2/sh2drc.c
+++ b/src/emu/cpu/sh2/sh2drc.c
@@ -31,19 +31,19 @@ using namespace uml;
DEBUGGING
***************************************************************************/
-#define FORCE_C_BACKEND (0) // use the C backend even when a native one is available
-#define LOG_UML (0) // log UML assembly
-#define LOG_NATIVE (0) // log native assembly
+#define FORCE_C_BACKEND (0) // use the C backend even when a native one is available
+#define LOG_UML (0) // log UML assembly
+#define LOG_NATIVE (0) // log native assembly
-#define SET_EA (0) // makes slower but "shows work" in the EA fake register like the interpreter
+#define SET_EA (0) // makes slower but "shows work" in the EA fake register like the interpreter
-#define DISABLE_FAST_REGISTERS (0) // set to 1 to turn off usage of register caching
-#define SINGLE_INSTRUCTION_MODE (0)
+#define DISABLE_FAST_REGISTERS (0) // set to 1 to turn off usage of register caching
+#define SINGLE_INSTRUCTION_MODE (0)
-#define ADDSUBV_DIRECT (0)
+#define ADDSUBV_DIRECT (0)
#define VERBOSE 0
-#define LOG(x) do { if (VERBOSE) logerror x; } while (0)
+#define LOG(x) do { if (VERBOSE) logerror x; } while (0)
#if SET_EA
#define SETEA(x) UML_MOV(block, mem(&sh2->ea), ireg(x))
@@ -56,25 +56,25 @@ using namespace uml;
***************************************************************************/
/* map variables */
-#define MAPVAR_PC M0
-#define MAPVAR_CYCLES M1
+#define MAPVAR_PC M0
+#define MAPVAR_CYCLES M1
/* size of the execution code cache */
-#define CACHE_SIZE (32 * 1024 * 1024)
+#define CACHE_SIZE (32 * 1024 * 1024)
/* compilation boundaries -- how far back/forward does the analysis extend? */
-#define COMPILE_BACKWARDS_BYTES 64
-#define COMPILE_FORWARDS_BYTES 256
-#define COMPILE_MAX_INSTRUCTIONS ((COMPILE_BACKWARDS_BYTES/2) + (COMPILE_FORWARDS_BYTES/2))
-#define COMPILE_MAX_SEQUENCE 64
+#define COMPILE_BACKWARDS_BYTES 64
+#define COMPILE_FORWARDS_BYTES 256
+#define COMPILE_MAX_INSTRUCTIONS ((COMPILE_BACKWARDS_BYTES/2) + (COMPILE_FORWARDS_BYTES/2))
+#define COMPILE_MAX_SEQUENCE 64
/* exit codes */
-#define EXECUTE_OUT_OF_CYCLES 0
-#define EXECUTE_MISSING_CODE 1
-#define EXECUTE_UNMAPPED_CODE 2
-#define EXECUTE_RESET_CACHE 3
+#define EXECUTE_OUT_OF_CYCLES 0
+#define EXECUTE_MISSING_CODE 1
+#define EXECUTE_UNMAPPED_CODE 2
+#define EXECUTE_RESET_CACHE 3
-#define PROBE_ADDRESS ~0
+#define PROBE_ADDRESS ~0
extern int sh2_describe(void *param, opcode_desc *desc, const opcode_desc *prev);
@@ -82,7 +82,7 @@ extern int sh2_describe(void *param, opcode_desc *desc, const opcode_desc *prev)
MACROS
***************************************************************************/
-#define R32(reg) sh2->regmap[reg]
+#define R32(reg) sh2->regmap[reg]
/***************************************************************************
STRUCTURES & TYPEDEFS
@@ -91,9 +91,9 @@ extern int sh2_describe(void *param, opcode_desc *desc, const opcode_desc *prev)
/* internal compiler state */
struct compiler_state
{
- UINT32 cycles; /* accumulated cycles */
- UINT8 checkints; /* need to check interrupts before next instruction */
- code_label labelnum; /* index for local labels */
+ UINT32 cycles; /* accumulated cycles */
+ UINT8 checkints; /* need to check interrupts before next instruction */
+ code_label labelnum; /* index for local labels */
};
/***************************************************************************
@@ -142,7 +142,7 @@ INLINE sh2_state *get_safe_token(device_t *device)
{
assert(device != NULL);
assert(device->type() == SH1 ||
- device->type() == SH2);
+ device->type() == SH2);
return *(sh2_state **)downcast<legacy_cpu_device *>(device)->token();
}
@@ -950,7 +950,7 @@ static void code_compile_block(sh2_state *sh2, UINT8 mode, offs_t pc)
/* add a code log entry */
if (LOG_UML)
- block->append_comment("-------------------------"); // comment
+ block->append_comment("-------------------------"); // comment
/* determine the last instruction in this sequence */
for (seqlast = seqhead; seqlast != NULL; seqlast = seqlast->next())
@@ -960,20 +960,20 @@ static void code_compile_block(sh2_state *sh2, UINT8 mode, offs_t pc)
/* if we don't have a hash for this mode/pc, or if we are overriding all, add one */
if (override || !drcuml->hash_exists(mode, seqhead->pc))
- UML_HASH(block, mode, seqhead->pc); // hash mode,pc
+ UML_HASH(block, mode, seqhead->pc); // hash mode,pc
/* if we already have a hash, and this is the first sequence, assume that we */
/* are recompiling due to being out of sync and allow future overrides */
else if (seqhead == desclist)
{
override = TRUE;
- UML_HASH(block, mode, seqhead->pc); // hash mode,pc
+ UML_HASH(block, mode, seqhead->pc); // hash mode,pc
}
/* otherwise, redispatch to that fixed PC and skip the rest of the processing */
else
{
- UML_LABEL(block, seqhead->pc | 0x80000000); // label seqhead->pc | 0x80000000
+ UML_LABEL(block, seqhead->pc | 0x80000000); // label seqhead->pc | 0x80000000
UML_HASHJMP(block, 0, seqhead->pc, *sh2->nocode);
// hashjmp <mode>,seqhead->pc,nocode
continue;
@@ -986,7 +986,7 @@ static void code_compile_block(sh2_state *sh2, UINT8 mode, offs_t pc)
/* label this instruction, if it may be jumped to locally */
if (seqhead->flags & OPFLAG_IS_BRANCH_TARGET)
{
- UML_LABEL(block, seqhead->pc | 0x80000000); // label seqhead->pc | 0x80000000
+ UML_LABEL(block, seqhead->pc | 0x80000000); // label seqhead->pc | 0x80000000
}
/* iterate over instructions in the sequence and compile them */
@@ -1007,7 +1007,7 @@ static void code_compile_block(sh2_state *sh2, UINT8 mode, offs_t pc)
}
/* count off cycles and go there */
- generate_update_cycles(sh2, block, &compiler, nextpc, TRUE); // <subtract cycles>
+ generate_update_cycles(sh2, block, &compiler, nextpc, TRUE); // <subtract cycles>
/* SH2 has no modes */
if (seqlast->next() == NULL || seqlast->next()->pc != nextpc)
@@ -1045,66 +1045,66 @@ static void static_generate_entry_point(sh2_state *sh2)
/* forward references */
alloc_handle(drcuml, &sh2->nocode, "nocode");
- alloc_handle(drcuml, &sh2->write32, "write32"); // necessary?
+ alloc_handle(drcuml, &sh2->write32, "write32"); // necessary?
alloc_handle(drcuml, &sh2->entry, "entry");
- UML_HANDLE(block, *sh2->entry); // handle entry
+ UML_HANDLE(block, *sh2->entry); // handle entry
/* load fast integer registers */
load_fast_iregs(sh2, block);
/* check for interrupts */
- UML_MOV(block, mem(&sh2->irqline), 0xffffffff); // mov irqline, #-1
- UML_CMP(block, mem(&sh2->pending_nmi), 0); // cmp pending_nmi, #0
- UML_JMPc(block, COND_Z, skip+2); // jz skip+2
+ UML_MOV(block, mem(&sh2->irqline), 0xffffffff); // mov irqline, #-1
+ UML_CMP(block, mem(&sh2->pending_nmi), 0); // cmp pending_nmi, #0
+ UML_JMPc(block, COND_Z, skip+2); // jz skip+2
- UML_MOV(block, mem(&sh2->pending_nmi), 0); // zap pending_nmi
- UML_JMP(block, skip+1); // and then go take it (evec is already set)
+ UML_MOV(block, mem(&sh2->pending_nmi), 0); // zap pending_nmi
+ UML_JMP(block, skip+1); // and then go take it (evec is already set)
- UML_LABEL(block, skip+2); // skip+2:
- UML_MOV(block, mem(&sh2->evec), 0xffffffff); // mov evec, -1
- UML_MOV(block, I0, 0xffffffff); // mov r0, -1 (r0 = irq)
- UML_AND(block, I1, I0, 0xffff); // and r1, 0xffff
+ UML_LABEL(block, skip+2); // skip+2:
+ UML_MOV(block, mem(&sh2->evec), 0xffffffff); // mov evec, -1
+ UML_MOV(block, I0, 0xffffffff); // mov r0, -1 (r0 = irq)
+ UML_AND(block, I1, I0, 0xffff); // and r1, 0xffff
- UML_LZCNT(block, I1, mem(&sh2->pending_irq)); // lzcnt r1, r1
- UML_CMP(block, I1, 32); // cmp r1, #32
- UML_JMPc(block, COND_Z, skip+4); // jz skip+4
+ UML_LZCNT(block, I1, mem(&sh2->pending_irq)); // lzcnt r1, r1
+ UML_CMP(block, I1, 32); // cmp r1, #32
+ UML_JMPc(block, COND_Z, skip+4); // jz skip+4
- UML_SUB(block, mem(&sh2->irqline), 31, I1); // sub irqline, #31, r1
+ UML_SUB(block, mem(&sh2->irqline), 31, I1); // sub irqline, #31, r1
- UML_LABEL(block, skip+4); // skip+4:
- UML_CMP(block, mem(&sh2->internal_irq_level), 0xffffffff); // cmp internal_irq_level, #-1
- UML_JMPc(block, COND_Z, skip+3); // jz skip+3
- UML_CMP(block, mem(&sh2->internal_irq_level), mem(&sh2->irqline)); // cmp internal_irq_level, irqline
- UML_JMPc(block, COND_LE, skip+3); // jle skip+3
+ UML_LABEL(block, skip+4); // skip+4:
+ UML_CMP(block, mem(&sh2->internal_irq_level), 0xffffffff); // cmp internal_irq_level, #-1
+ UML_JMPc(block, COND_Z, skip+3); // jz skip+3
+ UML_CMP(block, mem(&sh2->internal_irq_level), mem(&sh2->irqline)); // cmp internal_irq_level, irqline
+ UML_JMPc(block, COND_LE, skip+3); // jle skip+3
- UML_MOV(block, mem(&sh2->irqline), mem(&sh2->internal_irq_level)); // mov r0, internal_irq_level
+ UML_MOV(block, mem(&sh2->irqline), mem(&sh2->internal_irq_level)); // mov r0, internal_irq_level
- UML_LABEL(block, skip+3); // skip+3:
- UML_CMP(block, mem(&sh2->irqline), 0xffffffff); // cmp irqline, #-1
- UML_JMPc(block, COND_Z, skip+1); // jz skip+1
- UML_CALLC(block, cfunc_fastirq, sh2); // callc fastirq
+ UML_LABEL(block, skip+3); // skip+3:
+ UML_CMP(block, mem(&sh2->irqline), 0xffffffff); // cmp irqline, #-1
+ UML_JMPc(block, COND_Z, skip+1); // jz skip+1
+ UML_CALLC(block, cfunc_fastirq, sh2); // callc fastirq
- UML_LABEL(block, skip+1); // skip+1:
+ UML_LABEL(block, skip+1); // skip+1:
- UML_CMP(block, mem(&sh2->evec), 0xffffffff); // cmp evec, 0xffffffff
- UML_JMPc(block, COND_Z, skip); // jz skip
+ UML_CMP(block, mem(&sh2->evec), 0xffffffff); // cmp evec, 0xffffffff
+ UML_JMPc(block, COND_Z, skip); // jz skip
- UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_MOV(block, I1, mem(&sh2->irqsr)); // mov r1, irqsr
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_MOV(block, I1, mem(&sh2->irqsr)); // mov r1, irqsr
+ UML_CALLH(block, *sh2->write32); // call write32
- UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_MOV(block, I1, mem(&sh2->pc)); // mov r1, pc
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_MOV(block, I1, mem(&sh2->pc)); // mov r1, pc
+ UML_CALLH(block, *sh2->write32); // call write32
- UML_MOV(block, mem(&sh2->pc), mem(&sh2->evec)); // mov pc, evec
+ UML_MOV(block, mem(&sh2->pc), mem(&sh2->evec)); // mov pc, evec
- UML_LABEL(block, skip); // skip:
+ UML_LABEL(block, skip); // skip:
/* generate a hash jump via the current mode and PC */
- UML_HASHJMP(block, 0, mem(&sh2->pc), *sh2->nocode); // hashjmp <mode>,<pc>,nocode
+ UML_HASHJMP(block, 0, mem(&sh2->pc), *sh2->nocode); // hashjmp <mode>,<pc>,nocode
block->end();
}
@@ -1124,11 +1124,11 @@ static void static_generate_nocode_handler(sh2_state *sh2)
/* generate a hash jump via the current mode and PC */
alloc_handle(drcuml, &sh2->nocode, "nocode");
- UML_HANDLE(block, *sh2->nocode); // handle nocode
- UML_GETEXP(block, I0); // getexp i0
- UML_MOV(block, mem(&sh2->pc), I0); // mov [pc],i0
+ UML_HANDLE(block, *sh2->nocode); // handle nocode
+ UML_GETEXP(block, I0); // getexp i0
+ UML_MOV(block, mem(&sh2->pc), I0); // mov [pc],i0
save_fast_iregs(sh2, block);
- UML_EXIT(block, EXECUTE_MISSING_CODE); // exit EXECUTE_MISSING_CODE
+ UML_EXIT(block, EXECUTE_MISSING_CODE); // exit EXECUTE_MISSING_CODE
block->end();
}
@@ -1149,11 +1149,11 @@ static void static_generate_out_of_cycles(sh2_state *sh2)
/* generate a hash jump via the current mode and PC */
alloc_handle(drcuml, &sh2->out_of_cycles, "out_of_cycles");
- UML_HANDLE(block, *sh2->out_of_cycles); // handle out_of_cycles
- UML_GETEXP(block, I0); // getexp i0
- UML_MOV(block, mem(&sh2->pc), I0); // mov <pc>,i0
+ UML_HANDLE(block, *sh2->out_of_cycles); // handle out_of_cycles
+ UML_GETEXP(block, I0); // getexp i0
+ UML_MOV(block, mem(&sh2->pc), I0); // mov <pc>,i0
save_fast_iregs(sh2,block);
- UML_EXIT(block, EXECUTE_OUT_OF_CYCLES); // exit EXECUTE_OUT_OF_CYCLES
+ UML_EXIT(block, EXECUTE_OUT_OF_CYCLES); // exit EXECUTE_OUT_OF_CYCLES
block->end();
}
@@ -1176,34 +1176,34 @@ static void static_generate_memory_accessor(sh2_state *sh2, int size, int iswrit
/* add a global entry for this */
alloc_handle(drcuml, handleptr, name);
- UML_HANDLE(block, **handleptr); // handle *handleptr
+ UML_HANDLE(block, **handleptr); // handle *handleptr
// with internal handlers this becomes easier.
// if addr < 0x40000000 AND it with AM and do the read/write, else just do the read/write
- UML_TEST(block, I0, 0x80000000); // test r0, #0x80000000
- UML_JMPc(block, COND_NZ, label); // if high bit is set, don't mask
+ UML_TEST(block, I0, 0x80000000); // test r0, #0x80000000
+ UML_JMPc(block, COND_NZ, label); // if high bit is set, don't mask
- UML_CMP(block, I0, 0x40000000); // cmp #0x40000000, r0
- UML_JMPc(block, COND_AE, label); // bae label
+ UML_CMP(block, I0, 0x40000000); // cmp #0x40000000, r0
+ UML_JMPc(block, COND_AE, label); // bae label
- UML_AND(block, I0, I0, AM); // and r0, r0, #AM (0xc7ffffff)
+ UML_AND(block, I0, I0, AM); // and r0, r0, #AM (0xc7ffffff)
- UML_LABEL(block, label++); // label:
+ UML_LABEL(block, label++); // label:
if (iswrite)
{
switch (size)
{
case 1:
- UML_WRITE(block, I0, I1, SIZE_BYTE, SPACE_PROGRAM); // write r0, r1, program_byte
+ UML_WRITE(block, I0, I1, SIZE_BYTE, SPACE_PROGRAM); // write r0, r1, program_byte
break;
case 2:
- UML_WRITE(block, I0, I1, SIZE_WORD, SPACE_PROGRAM); // write r0, r1, program_word
+ UML_WRITE(block, I0, I1, SIZE_WORD, SPACE_PROGRAM); // write r0, r1, program_word
break;
case 4:
- UML_WRITE(block, I0, I1, SIZE_DWORD, SPACE_PROGRAM); // write r0, r1, program_dword
+ UML_WRITE(block, I0, I1, SIZE_DWORD, SPACE_PROGRAM); // write r0, r1, program_dword
break;
}
}
@@ -1212,20 +1212,20 @@ static void static_generate_memory_accessor(sh2_state *sh2, int size, int iswrit
switch (size)
{
case 1:
- UML_READ(block, I0, I0, SIZE_BYTE, SPACE_PROGRAM); // read r0, program_byte
+ UML_READ(block, I0, I0, SIZE_BYTE, SPACE_PROGRAM); // read r0, program_byte
break;
case 2:
- UML_READ(block, I0, I0, SIZE_WORD, SPACE_PROGRAM); // read r0, program_word
+ UML_READ(block, I0, I0, SIZE_WORD, SPACE_PROGRAM); // read r0, program_word
break;
case 4:
- UML_READ(block, I0, I0, SIZE_DWORD, SPACE_PROGRAM); // read r0, program_dword
+ UML_READ(block, I0, I0, SIZE_DWORD, SPACE_PROGRAM); // read r0, program_dword
break;
}
}
- UML_RET(block); // ret
+ UML_RET(block); // ret
block->end();
}
@@ -1407,7 +1407,7 @@ static void log_add_disasm_comment(drcuml_block *block, UINT32 pc, UINT32 op)
#if (LOG_UML)
char buffer[100];
DasmSH2(buffer, pc, op);
- block->append_comment("%08X: %s", pc, buffer); // comment
+ block->append_comment("%08X: %s", pc, buffer); // comment
#endif
}
@@ -1427,61 +1427,61 @@ static void generate_update_cycles(sh2_state *sh2, drcuml_block *block, compiler
compiler->labelnum += 4;
/* check for interrupts */
- UML_MOV(block, mem(&sh2->irqline), 0xffffffff); // mov irqline, #-1
- UML_CMP(block, mem(&sh2->pending_nmi), 0); // cmp pending_nmi, #0
- UML_JMPc(block, COND_Z, skip+2); // jz skip+2
+ UML_MOV(block, mem(&sh2->irqline), 0xffffffff); // mov irqline, #-1
+ UML_CMP(block, mem(&sh2->pending_nmi), 0); // cmp pending_nmi, #0
+ UML_JMPc(block, COND_Z, skip+2); // jz skip+2
- UML_MOV(block, mem(&sh2->pending_nmi), 0); // zap pending_nmi
- UML_JMP(block, skip+1); // and then go take it (evec is already set)
+ UML_MOV(block, mem(&sh2->pending_nmi), 0); // zap pending_nmi
+ UML_JMP(block, skip+1); // and then go take it (evec is already set)
- UML_LABEL(block, skip+2); // skip+2:
- UML_MOV(block, mem(&sh2->evec), 0xffffffff); // mov evec, -1
- UML_MOV(block, I0, 0xffffffff); // mov r0, -1 (r0 = irq)
- UML_AND(block, I1, I0, 0xffff); // and r1, r0, 0xffff
+ UML_LABEL(block, skip+2); // skip+2:
+ UML_MOV(block, mem(&sh2->evec), 0xffffffff); // mov evec, -1
+ UML_MOV(block, I0, 0xffffffff); // mov r0, -1 (r0 = irq)
+ UML_AND(block, I1, I0, 0xffff); // and r1, r0, 0xffff
- UML_LZCNT(block, I1, mem(&sh2->pending_irq)); // lzcnt r1, pending_irq
- UML_CMP(block, I1, 32); // cmp r1, #32
- UML_JMPc(block, COND_Z, skip+4); // jz skip+4
+ UML_LZCNT(block, I1, mem(&sh2->pending_irq)); // lzcnt r1, pending_irq
+ UML_CMP(block, I1, 32); // cmp r1, #32
+ UML_JMPc(block, COND_Z, skip+4); // jz skip+4
- UML_SUB(block, mem(&sh2->irqline), 31, I1); // sub irqline, #31, r1
+ UML_SUB(block, mem(&sh2->irqline), 31, I1); // sub irqline, #31, r1
- UML_LABEL(block, skip+4); // skip+4:
- UML_CMP(block, mem(&sh2->internal_irq_level), 0xffffffff); // cmp internal_irq_level, #-1
- UML_JMPc(block, COND_Z, skip+3); // jz skip+3
- UML_CMP(block, mem(&sh2->internal_irq_level), mem(&sh2->irqline)); // cmp internal_irq_level, irqline
- UML_JMPc(block, COND_LE, skip+3); // jle skip+3
+ UML_LABEL(block, skip+4); // skip+4:
+ UML_CMP(block, mem(&sh2->internal_irq_level), 0xffffffff); // cmp internal_irq_level, #-1
+ UML_JMPc(block, COND_Z, skip+3); // jz skip+3
+ UML_CMP(block, mem(&sh2->internal_irq_level), mem(&sh2->irqline)); // cmp internal_irq_level, irqline
+ UML_JMPc(block, COND_LE, skip+3); // jle skip+3
- UML_MOV(block, mem(&sh2->irqline), mem(&sh2->internal_irq_level)); // mov r0, internal_irq_level
+ UML_MOV(block, mem(&sh2->irqline), mem(&sh2->internal_irq_level)); // mov r0, internal_irq_level
- UML_LABEL(block, skip+3); // skip+3:
- UML_CMP(block, mem(&sh2->irqline), 0xffffffff); // cmp irqline, #-1
- UML_JMPc(block, COND_Z, skip+1); // jz skip+1
- UML_CALLC(block, cfunc_fastirq, sh2); // callc fastirq
+ UML_LABEL(block, skip+3); // skip+3:
+ UML_CMP(block, mem(&sh2->irqline), 0xffffffff); // cmp irqline, #-1
+ UML_JMPc(block, COND_Z, skip+1); // jz skip+1
+ UML_CALLC(block, cfunc_fastirq, sh2); // callc fastirq
- UML_LABEL(block, skip+1); // skip+1:
- UML_CMP(block, mem(&sh2->evec), 0xffffffff); // cmp evec, 0xffffffff
- UML_JMPc(block, COND_Z, skip); // jz skip
+ UML_LABEL(block, skip+1); // skip+1:
+ UML_CMP(block, mem(&sh2->evec), 0xffffffff); // cmp evec, 0xffffffff
+ UML_JMPc(block, COND_Z, skip); // jz skip
- UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_MOV(block, I1, mem(&sh2->irqsr)); // mov r1, irqsr
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_MOV(block, I1, mem(&sh2->irqsr)); // mov r1, irqsr
+ UML_CALLH(block, *sh2->write32); // call write32
- UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_MOV(block, I1, param); // mov r1, nextpc
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_MOV(block, I1, param); // mov r1, nextpc
+ UML_CALLH(block, *sh2->write32); // call write32
- UML_HASHJMP(block, 0, mem(&sh2->evec), *sh2->nocode); // hashjmp sh2->evec
+ UML_HASHJMP(block, 0, mem(&sh2->evec), *sh2->nocode); // hashjmp sh2->evec
- UML_LABEL(block, skip); // skip:
+ UML_LABEL(block, skip); // skip:
}
/* account for cycles */
if (compiler->cycles > 0)
{
- UML_SUB(block, mem(&sh2->icount), mem(&sh2->icount), MAPVAR_CYCLES); // sub icount,icount,cycles
- UML_MAPVAR(block, MAPVAR_CYCLES, 0); // mapvar cycles,0
+ UML_SUB(block, mem(&sh2->icount), mem(&sh2->icount), MAPVAR_CYCLES); // sub icount,icount,cycles
+ UML_MAPVAR(block, MAPVAR_CYCLES, 0); // mapvar cycles,0
if (allow_exception)
UML_EXHc(block, COND_S, *sh2->out_of_cycles, param);
// exh out_of_cycles,nextpc
@@ -1498,7 +1498,7 @@ static void generate_checksum_block(sh2_state *sh2, drcuml_block *block, compile
{
const opcode_desc *curdesc;
if (LOG_UML)
- block->append_comment("[Validation for %08X]", seqhead->pc); // comment
+ block->append_comment("[Validation for %08X]", seqhead->pc); // comment
/* loose verify or single instruction: just compare and fail */
if (!(sh2->drcoptions & SH2DRC_STRICT_VERIFY) || seqhead->next() == NULL)
@@ -1506,9 +1506,9 @@ static void generate_checksum_block(sh2_state *sh2, drcuml_block *block, compile
if (!(seqhead->flags & OPFLAG_VIRTUAL_NOOP))
{
void *base = sh2->direct->read_decrypted_ptr(seqhead->physpc, SH2_CODE_XOR(0));
- UML_LOAD(block, I0, base, 0, SIZE_WORD, SCALE_x2); // load i0,base,word
- UML_CMP(block, I0, seqhead->opptr.w[0]); // cmp i0,*opptr
- UML_EXHc(block, COND_NE, *sh2->nocode, epc(seqhead)); // exne nocode,seqhead->pc
+ UML_LOAD(block, I0, base, 0, SIZE_WORD, SCALE_x2); // load i0,base,word
+ UML_CMP(block, I0, seqhead->opptr.w[0]); // cmp i0,*opptr
+ UML_EXHc(block, COND_NE, *sh2->nocode, epc(seqhead)); // exne nocode,seqhead->pc
}
}
@@ -1520,25 +1520,25 @@ static void generate_checksum_block(sh2_state *sh2, drcuml_block *block, compile
if (!(curdesc->flags & OPFLAG_VIRTUAL_NOOP))
{
base = sh2->direct->read_decrypted_ptr(curdesc->physpc, SH2_CODE_XOR(0));
- UML_LOAD(block, I0, curdesc->opptr.w, 0, SIZE_WORD, SCALE_x2); // load i0,*opptr,0,word
- UML_CMP(block, I0, curdesc->opptr.w[0]); // cmp i0,*opptr
- UML_EXHc(block, COND_NE, *sh2->nocode, epc(seqhead)); // exne nocode,seqhead->pc
+ UML_LOAD(block, I0, curdesc->opptr.w, 0, SIZE_WORD, SCALE_x2); // load i0,*opptr,0,word
+ UML_CMP(block, I0, curdesc->opptr.w[0]); // cmp i0,*opptr
+ UML_EXHc(block, COND_NE, *sh2->nocode, epc(seqhead)); // exne nocode,seqhead->pc
}
#else
UINT32 sum = 0;
void *base = sh2->direct->read_decrypted_ptr(seqhead->physpc, SH2_CODE_XOR(0));
- UML_LOAD(block, I0, base, 0, SIZE_WORD, SCALE_x4); // load i0,base,word
+ UML_LOAD(block, I0, base, 0, SIZE_WORD, SCALE_x4); // load i0,base,word
sum += seqhead->opptr.w[0];
for (curdesc = seqhead->next(); curdesc != seqlast->next(); curdesc = curdesc->next())
if (!(curdesc->flags & OPFLAG_VIRTUAL_NOOP))
{
base = sh2->direct->read_decrypted_ptr(curdesc->physpc, SH2_CODE_XOR(0));
- UML_LOAD(block, I1, base, 0, SIZE_WORD, SCALE_x2); // load i1,*opptr,word
- UML_ADD(block, I0, I0, I1); // add i0,i0,i1
+ UML_LOAD(block, I1, base, 0, SIZE_WORD, SCALE_x2); // load i1,*opptr,word
+ UML_ADD(block, I0, I0, I1); // add i0,i0,i1
sum += curdesc->opptr.w[0];
}
- UML_CMP(block, I0, sum); // cmp i0,sum
- UML_EXHc(block, COND_NE, *sh2->nocode, epc(seqhead)); // exne nocode,seqhead->pc
+ UML_CMP(block, I0, sum); // cmp i0,sum
+ UML_EXHc(block, COND_NE, *sh2->nocode, epc(seqhead)); // exne nocode,seqhead->pc
#endif
}
}
@@ -1559,35 +1559,35 @@ static void generate_sequence_instruction(sh2_state *sh2, drcuml_block *block, c
/* set the PC map variable */
expc = (desc->flags & OPFLAG_IN_DELAY_SLOT) ? desc->pc - 1 : desc->pc;
- UML_MAPVAR(block, MAPVAR_PC, expc); // mapvar PC,expc
+ UML_MAPVAR(block, MAPVAR_PC, expc); // mapvar PC,expc
/* accumulate total cycles */
compiler->cycles += desc->cycles;
/* update the icount map variable */
- UML_MAPVAR(block, MAPVAR_CYCLES, compiler->cycles); // mapvar CYCLES,compiler->cycles
+ UML_MAPVAR(block, MAPVAR_CYCLES, compiler->cycles); // mapvar CYCLES,compiler->cycles
/* if we want a probe, add it here */
if (desc->pc == PROBE_ADDRESS)
{
- UML_MOV(block, mem(&sh2->pc), desc->pc); // mov [pc],desc->pc
- UML_CALLC(block, cfunc_printf_probe, sh2); // callc cfunc_printf_probe,sh2
+ UML_MOV(block, mem(&sh2->pc), desc->pc); // mov [pc],desc->pc
+ UML_CALLC(block, cfunc_printf_probe, sh2); // callc cfunc_printf_probe,sh2
}
/* if we are debugging, call the debugger */
if ((sh2->device->machine().debug_flags & DEBUG_FLAG_ENABLED) != 0)
{
- UML_MOV(block, mem(&sh2->pc), desc->pc); // mov [pc],desc->pc
+ UML_MOV(block, mem(&sh2->pc), desc->pc); // mov [pc],desc->pc
save_fast_iregs(sh2, block);
- UML_DEBUG(block, desc->pc); // debug desc->pc
+ UML_DEBUG(block, desc->pc); // debug desc->pc
}
- else // not debug, see what other reasons there are for flushing the PC
+ else // not debug, see what other reasons there are for flushing the PC
{
- if (sh2->drcoptions & SH2DRC_FLUSH_PC) // always flush?
+ if (sh2->drcoptions & SH2DRC_FLUSH_PC) // always flush?
{
- UML_MOV(block, mem(&sh2->pc), desc->pc); // mov sh2->pc, desc->pc
+ UML_MOV(block, mem(&sh2->pc), desc->pc); // mov sh2->pc, desc->pc
}
- else // check for driver-selected flushes
+ else // check for driver-selected flushes
{
int pcflush;
@@ -1595,7 +1595,7 @@ static void generate_sequence_instruction(sh2_state *sh2, drcuml_block *block, c
{
if (desc->pc == sh2->pcflushes[pcflush])
{
- UML_MOV(block, mem(&sh2->pc), desc->pc); // mov sh2->pc, desc->pc
+ UML_MOV(block, mem(&sh2->pc), desc->pc); // mov sh2->pc, desc->pc
}
}
}
@@ -1605,9 +1605,9 @@ static void generate_sequence_instruction(sh2_state *sh2, drcuml_block *block, c
/* if we hit an unmapped address, fatal error */
if (desc->flags & OPFLAG_COMPILER_UNMAPPED)
{
- UML_MOV(block, mem(&sh2->pc), desc->pc); // mov [pc],desc->pc
+ UML_MOV(block, mem(&sh2->pc), desc->pc); // mov [pc],desc->pc
save_fast_iregs(sh2, block);
- UML_EXIT(block, EXECUTE_UNMAPPED_CODE); // exit EXECUTE_UNMAPPED_CODE
+ UML_EXIT(block, EXECUTE_UNMAPPED_CODE); // exit EXECUTE_UNMAPPED_CODE
}
/* if this is an invalid opcode, die */
@@ -1622,9 +1622,9 @@ static void generate_sequence_instruction(sh2_state *sh2, drcuml_block *block, c
/* compile the instruction */
if (!generate_opcode(sh2, block, compiler, desc, ovrpc))
{
- UML_MOV(block, mem(&sh2->pc), desc->pc); // mov [pc],desc->pc
- UML_MOV(block, mem(&sh2->arg0), desc->opptr.w[0]); // mov [arg0],opcode
- UML_CALLC(block, cfunc_unimplemented, sh2); // callc cfunc_unimplemented
+ UML_MOV(block, mem(&sh2->pc), desc->pc); // mov [pc],desc->pc
+ UML_MOV(block, mem(&sh2->arg0), desc->opptr.w[0]); // mov [arg0],opcode
+ UML_CALLC(block, cfunc_unimplemented, sh2); // callc cfunc_unimplemented
}
}
}
@@ -1639,7 +1639,7 @@ static void generate_delay_slot(sh2_state *sh2, drcuml_block *block, compiler_st
/* compile the delay slot using temporary compiler state */
assert(desc->delay.first() != NULL);
- generate_sequence_instruction(sh2, block, &compiler_temp, desc->delay.first(), ovrpc); // <next instruction>
+ generate_sequence_instruction(sh2, block, &compiler_temp, desc->delay.first(), ovrpc); // <next instruction>
/* update the label */
compiler->labelnum = compiler_temp.labelnum;
@@ -1663,11 +1663,11 @@ static int generate_opcode(sh2_state *sh2, drcuml_block *block, compiler_state *
case 0:
return generate_group_0(sh2, block, compiler, desc, opcode, in_delay_slot, ovrpc);
- case 1: // MOVLS4
+ case 1: // MOVLS4
scratch = (opcode & 0x0f) * 4;
- UML_ADD(block, I0, R32(Rn), scratch); // add r0, Rn, scratch
- UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
- SETEA(0); // set ea for debug
+ UML_ADD(block, I0, R32(Rn), scratch); // add r0, Rn, scratch
+ UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
+ SETEA(0); // set ea for debug
UML_CALLH(block, *sh2->write32);
if (!in_delay_slot)
@@ -1681,12 +1681,12 @@ static int generate_opcode(sh2_state *sh2, drcuml_block *block, compiler_state *
case 4:
return generate_group_4(sh2, block, compiler, desc, opcode, in_delay_slot, ovrpc);
- case 5: // MOVLL4
+ case 5: // MOVLL4
scratch = (opcode & 0x0f) * 4;
- UML_ADD(block, I0, R32(Rm), scratch); // add r0, Rm, scratch
- SETEA(0); // set ea for debug
- UML_CALLH(block, *sh2->read32); // call read32
- UML_MOV(block, R32(Rn), I0); // mov Rn, r0
+ UML_ADD(block, I0, R32(Rm), scratch); // add r0, Rm, scratch
+ SETEA(0); // set ea for debug
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_MOV(block, R32(Rn), I0); // mov Rn, r0
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -1695,16 +1695,16 @@ static int generate_opcode(sh2_state *sh2, drcuml_block *block, compiler_state *
case 6:
return generate_group_6(sh2, block, compiler, desc, opcode, in_delay_slot, ovrpc);
- case 7: // ADDI
+ case 7: // ADDI
scratch = opcode & 0xff;
scratch2 = (UINT32)(INT32)(INT16)(INT8)scratch;
- UML_ADD(block, R32(Rn), R32(Rn), scratch2); // add Rn, Rn, scratch2
+ UML_ADD(block, R32(Rn), R32(Rn), scratch2); // add Rn, Rn, scratch2
return TRUE;
case 8:
return generate_group_8(sh2, block, compiler, desc, opcode, in_delay_slot, ovrpc);
- case 9: // MOVWI
+ case 9: // MOVWI
if (ovrpc == 0xffffffff)
{
scratch = (desc->pc + 2) + ((opcode & 0xff) * 2) + 2;
@@ -1716,49 +1716,49 @@ static int generate_opcode(sh2_state *sh2, drcuml_block *block, compiler_state *
if (sh2->drcoptions & SH2DRC_STRICT_PCREL)
{
- UML_MOV(block, I0, scratch); // mov r0, scratch
- SETEA(0); // set ea for debug
- UML_CALLH(block, *sh2->read16); // read16(r0, r1)
- UML_SEXT(block, R32(Rn), I0, SIZE_WORD); // sext Rn, r0, WORD
+ UML_MOV(block, I0, scratch); // mov r0, scratch
+ SETEA(0); // set ea for debug
+ UML_CALLH(block, *sh2->read16); // read16(r0, r1)
+ UML_SEXT(block, R32(Rn), I0, SIZE_WORD); // sext Rn, r0, WORD
}
else
{
scratch2 = (UINT32)(INT32)(INT16) RW(sh2, scratch);
- UML_MOV(block, R32(Rn), scratch2); // mov Rn, scratch2
+ UML_MOV(block, R32(Rn), scratch2); // mov Rn, scratch2
}
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
- case 10: // BRA
+ case 10: // BRA
disp = ((INT32)opcode << 20) >> 20;
- sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = pc+4 + disp*2 + 2
+ sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = pc+4 + disp*2 + 2
generate_delay_slot(sh2, block, compiler, desc, sh2->ea-2);
- generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // hashjmp sh2->ea
+ generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // hashjmp sh2->ea
return TRUE;
- case 11: // BSR
+ case 11: // BSR
// panicstr @ 403da22 relies on the delay slot clobbering the PR set by a BSR, so
// do this before running the delay slot
- UML_ADD(block, mem(&sh2->pr), desc->pc, 4); // add sh2->pr, desc->pc, #4 (skip the current insn & delay slot)
+ UML_ADD(block, mem(&sh2->pr), desc->pc, 4); // add sh2->pr, desc->pc, #4 (skip the current insn & delay slot)
disp = ((INT32)opcode << 20) >> 20;
- sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = pc+4 + disp*2 + 2
+ sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = pc+4 + disp*2 + 2
generate_delay_slot(sh2, block, compiler, desc, sh2->ea-2);
- generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // hashjmp sh2->ea
+ generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // hashjmp sh2->ea
return TRUE;
case 12:
return generate_group_12(sh2, block, compiler, desc, opcode, in_delay_slot, ovrpc);
- case 13: // MOVLI
+ case 13: // MOVLI
if (ovrpc == 0xffffffff)
{
scratch = ((desc->pc + 4) & ~3) + ((opcode & 0xff) * 4);
@@ -1770,27 +1770,27 @@ static int generate_opcode(sh2_state *sh2, drcuml_block *block, compiler_state *
if (sh2->drcoptions & SH2DRC_STRICT_PCREL)
{
- UML_MOV(block, I0, scratch); // mov r0, scratch
- UML_CALLH(block, *sh2->read32); // read32(r0, r1)
- UML_MOV(block, R32(Rn), I0); // mov Rn, r0
+ UML_MOV(block, I0, scratch); // mov r0, scratch
+ UML_CALLH(block, *sh2->read32); // read32(r0, r1)
+ UML_MOV(block, R32(Rn), I0); // mov Rn, r0
}
else
{
scratch2 = RL(sh2, scratch);
- UML_MOV(block, R32(Rn), scratch2); // mov Rn, scratch2
+ UML_MOV(block, R32(Rn), scratch2); // mov Rn, scratch2
}
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
- case 14: // MOVI
+ case 14: // MOVI
scratch = opcode & 0xff;
scratch2 = (UINT32)(INT32)(INT16)(INT8)scratch;
UML_MOV(block, R32(Rn), scratch2);
return TRUE;
- case 15: // NOP
+ case 15: // NOP
return TRUE;
}
@@ -1826,17 +1826,17 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
case 0x03: // BSRF(Rn);
if (sh2->cpu_type > CPU_TYPE_SH1)
{
- UML_ADD(block, mem(&sh2->target), R32(Rn), 4); // add target, Rm, #4
- UML_ADD(block, mem(&sh2->target), mem(&sh2->target), desc->pc); // add target, target, pc
+ UML_ADD(block, mem(&sh2->target), R32(Rn), 4); // add target, Rm, #4
+ UML_ADD(block, mem(&sh2->target), mem(&sh2->target), desc->pc); // add target, target, pc
// 32x Cosmic Carnage @ 6002cb0 relies on the delay slot
// clobbering the calculated PR, so do it first
- UML_ADD(block, mem(&sh2->pr), desc->pc, 4); // add sh2->pr, desc->pc, #4 (skip the current insn & delay slot)
+ UML_ADD(block, mem(&sh2->pr), desc->pc, 4); // add sh2->pr, desc->pc, #4 (skip the current insn & delay slot)
generate_delay_slot(sh2, block, compiler, desc, sh2->target);
- generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, mem(&sh2->target), *sh2->nocode); // jmp target
+ generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, mem(&sh2->target), *sh2->nocode); // jmp target
return TRUE;
}
break;
@@ -1845,9 +1845,9 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
case 0x14: // MOVBS0(Rm, Rn);
case 0x24: // MOVBS0(Rm, Rn);
case 0x34: // MOVBS0(Rm, Rn);
- UML_ADD(block, I0, R32(0), R32(Rn)); // add r0, R0, Rn
- UML_AND(block, I1, R32(Rm), 0x000000ff); // and r1, Rm, 0xff
- UML_CALLH(block, *sh2->write8); // call write8
+ UML_ADD(block, I0, R32(0), R32(Rn)); // add r0, R0, Rn
+ UML_AND(block, I1, R32(Rm), 0x000000ff); // and r1, Rm, 0xff
+ UML_CALLH(block, *sh2->write8); // call write8
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -1857,9 +1857,9 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
case 0x15: // MOVWS0(Rm, Rn);
case 0x25: // MOVWS0(Rm, Rn);
case 0x35: // MOVWS0(Rm, Rn);
- UML_ADD(block, I0, R32(0), R32(Rn)); // add r0, R0, Rn
- UML_AND(block, I1, R32(Rm), 0x0000ffff); // and r1, Rm, 0xffff
- UML_CALLH(block, *sh2->write16); // call write16
+ UML_ADD(block, I0, R32(0), R32(Rn)); // add r0, R0, Rn
+ UML_AND(block, I1, R32(Rm), 0x0000ffff); // and r1, Rm, 0xffff
+ UML_CALLH(block, *sh2->write16); // call write16
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -1869,9 +1869,9 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
case 0x16: // MOVLS0(Rm, Rn);
case 0x26: // MOVLS0(Rm, Rn);
case 0x36: // MOVLS0(Rm, Rn);
- UML_ADD(block, I0, R32(0), R32(Rn)); // add r0, R0, Rn
- UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_ADD(block, I0, R32(0), R32(Rn)); // add r0, R0, Rn
+ UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
+ UML_CALLH(block, *sh2->write32); // call write32
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -1883,25 +1883,25 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
case 0x37: // MULL(Rm, Rn);
if (sh2->cpu_type > CPU_TYPE_SH1)
{
- UML_MULU(block, mem(&sh2->macl), mem(&sh2->ea), R32(Rn), R32(Rm)); // mulu macl, ea, Rn, Rm
+ UML_MULU(block, mem(&sh2->macl), mem(&sh2->ea), R32(Rn), R32(Rm)); // mulu macl, ea, Rn, Rm
return TRUE;
}
break;
case 0x08: // CLRT();
- UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
+ UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
return TRUE;
case 0x0a: // STSMACH(Rn);
- UML_MOV(block, R32(Rn), mem(&sh2->mach)); // mov Rn, mach
+ UML_MOV(block, R32(Rn), mem(&sh2->mach)); // mov Rn, mach
return TRUE;
case 0x0b: // RTS();
- UML_MOV(block, mem(&sh2->target), mem(&sh2->pr)); // mov target, pr (in case of d-slot shenanigans)
+ UML_MOV(block, mem(&sh2->target), mem(&sh2->pr)); // mov target, pr (in case of d-slot shenanigans)
generate_delay_slot(sh2, block, compiler, desc, sh2->target);
- generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
+ generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
UML_HASHJMP(block, 0, mem(&sh2->target), *sh2->nocode);
return TRUE;
@@ -1909,9 +1909,9 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
case 0x1c: // MOVBL0(Rm, Rn);
case 0x2c: // MOVBL0(Rm, Rn);
case 0x3c: // MOVBL0(Rm, Rn);
- UML_ADD(block, I0, R32(0), R32(Rm)); // add r0, R0, Rm
- UML_CALLH(block, *sh2->read8); // call read8
- UML_SEXT(block, R32(Rn), I0, SIZE_BYTE); // sext Rn, r0, BYTE
+ UML_ADD(block, I0, R32(0), R32(Rm)); // add r0, R0, Rm
+ UML_CALLH(block, *sh2->read8); // call read8
+ UML_SEXT(block, R32(Rn), I0, SIZE_BYTE); // sext Rn, r0, BYTE
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -1921,9 +1921,9 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
case 0x1d: // MOVWL0(Rm, Rn);
case 0x2d: // MOVWL0(Rm, Rn);
case 0x3d: // MOVWL0(Rm, Rn);
- UML_ADD(block, I0, R32(0), R32(Rm)); // add r0, R0, Rm
- UML_CALLH(block, *sh2->read16); // call read16
- UML_SEXT(block, R32(Rn), I0, SIZE_WORD); // sext Rn, r0, WORD
+ UML_ADD(block, I0, R32(0), R32(Rm)); // add r0, R0, Rm
+ UML_CALLH(block, *sh2->read16); // call read16
+ UML_SEXT(block, R32(Rn), I0, SIZE_WORD); // sext Rn, r0, WORD
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -1933,9 +1933,9 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
case 0x1e: // MOVLL0(Rm, Rn);
case 0x2e: // MOVLL0(Rm, Rn);
case 0x3e: // MOVLL0(Rm, Rn);
- UML_ADD(block, I0, R32(0), R32(Rm)); // add r0, R0, Rm
- UML_CALLH(block, *sh2->read32); // call read32
- UML_MOV(block, R32(Rn), I0); // mov Rn, r0
+ UML_ADD(block, I0, R32(0), R32(Rm)); // add r0, R0, Rm
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_MOV(block, R32(Rn), I0); // mov Rn, r0
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -1956,19 +1956,19 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
break;
case 0x12: // STCGBR(Rn);
- UML_MOV(block, R32(Rn), mem(&sh2->gbr)); // mov Rn, gbr
+ UML_MOV(block, R32(Rn), mem(&sh2->gbr)); // mov Rn, gbr
return TRUE;
case 0x18: // SETT();
- UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
+ UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
return TRUE;
case 0x19: // DIV0U();
- UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~(M|Q|T)); // and sr, sr, ~(M|Q|T)
+ UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~(M|Q|T)); // and sr, sr, ~(M|Q|T)
return TRUE;
case 0x1a: // STSMACL(Rn);
- UML_MOV(block, R32(Rn), mem(&sh2->macl)); // mov Rn, macl
+ UML_MOV(block, R32(Rn), mem(&sh2->macl)); // mov Rn, macl
return TRUE;
case 0x1b: // SLEEP();
@@ -1976,30 +1976,30 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
// if an interrupt is taken, the return address is set to the next instruction
UML_CALLC(block, cfunc_checkirqs, sh2);
- UML_MOV(block, I0, mem(&sh2->evec)); // mov r0, evec
- UML_CMP(block, I0, 0xffffffff); // cmp r0, 0xffffffff
- UML_JMPc(block, COND_Z, compiler->labelnum); // jz skip
+ UML_MOV(block, I0, mem(&sh2->evec)); // mov r0, evec
+ UML_CMP(block, I0, 0xffffffff); // cmp r0, 0xffffffff
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jz skip
- UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_MOV(block, I1, mem(&sh2->irqsr)); // mov r1, irqsr
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_MOV(block, I1, mem(&sh2->irqsr)); // mov r1, irqsr
+ UML_CALLH(block, *sh2->write32); // call write32
- UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_MOV(block, I1, desc->pc+2); // mov r1, nextpc
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_MOV(block, I1, desc->pc+2); // mov r1, nextpc
+ UML_CALLH(block, *sh2->write32); // call write32
- UML_HASHJMP(block, 0, mem(&sh2->evec), *sh2->nocode); // hashjmp sh2->evec
+ UML_HASHJMP(block, 0, mem(&sh2->evec), *sh2->nocode); // hashjmp sh2->evec
- UML_LABEL(block, compiler->labelnum++); // skip:
+ UML_LABEL(block, compiler->labelnum++); // skip:
// now go "out of cycles"
if (compiler->cycles > 0)
{
- UML_MOV(block, mem(&sh2->icount), 0); // mov icount, #0
- UML_MAPVAR(block, MAPVAR_CYCLES, 0); // mapvar cycles,0
- UML_EXH(block, *sh2->out_of_cycles, desc->pc); // go out of cycles
+ UML_MOV(block, mem(&sh2->icount), 0); // mov icount, #0
+ UML_MAPVAR(block, MAPVAR_CYCLES, 0); // mapvar cycles,0
+ UML_EXH(block, *sh2->out_of_cycles, desc->pc); // go out of cycles
}
else
{
@@ -2009,52 +2009,52 @@ static int generate_group_0(sh2_state *sh2, drcuml_block *block, compiler_state
return TRUE;
case 0x22: // STCVBR(Rn);
- UML_MOV(block, R32(Rn), mem(&sh2->vbr)); // mov Rn, vbr
+ UML_MOV(block, R32(Rn), mem(&sh2->vbr)); // mov Rn, vbr
return TRUE;
case 0x23: // BRAF(Rn);
if (sh2->cpu_type > CPU_TYPE_SH1)
{
- UML_ADD(block, mem(&sh2->target), R32(Rn), desc->pc+4); // add target, Rn, pc+4
+ UML_ADD(block, mem(&sh2->target), R32(Rn), desc->pc+4); // add target, Rn, pc+4
generate_delay_slot(sh2, block, compiler, desc, sh2->target);
- generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, mem(&sh2->target), *sh2->nocode); // jmp target
+ generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, mem(&sh2->target), *sh2->nocode); // jmp target
return TRUE;
}
break;
case 0x28: // CLRMAC();
- UML_MOV(block, mem(&sh2->macl), 0); // mov macl, #0
- UML_MOV(block, mem(&sh2->mach), 0); // mov mach, #0
+ UML_MOV(block, mem(&sh2->macl), 0); // mov macl, #0
+ UML_MOV(block, mem(&sh2->mach), 0); // mov mach, #0
return TRUE;
case 0x29: // MOVT(Rn);
- UML_AND(block, R32(Rn), mem(&sh2->sr), T); // and Rn, sr, T
+ UML_AND(block, R32(Rn), mem(&sh2->sr), T); // and Rn, sr, T
return TRUE;
case 0x2a: // STSPR(Rn);
- UML_MOV(block, R32(Rn), mem(&sh2->pr)); // mov Rn, pr
+ UML_MOV(block, R32(Rn), mem(&sh2->pr)); // mov Rn, pr
return TRUE;
case 0x2b: // RTE();
generate_delay_slot(sh2, block, compiler, desc, 0xffffffff);
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_CALLH(block, *sh2->read32); // call read32
- UML_MOV(block, mem(&sh2->pc), I0); // mov pc, r0
- UML_ADD(block, R32(15), R32(15), 4); // add R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_MOV(block, mem(&sh2->pc), I0); // mov pc, r0
+ UML_ADD(block, R32(15), R32(15), 4); // add R15, R15, #4
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_CALLH(block, *sh2->read32); // call read32
- UML_MOV(block, mem(&sh2->sr), I0); // mov sr, r0
- UML_ADD(block, R32(15), R32(15), 4); // add R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_MOV(block, mem(&sh2->sr), I0); // mov sr, r0
+ UML_ADD(block, R32(15), R32(15), 4); // add R15, R15, #4
compiler->checkints = TRUE;
- UML_MOV(block, mem(&sh2->ea), mem(&sh2->pc)); // mov ea, pc
- generate_update_cycles(sh2, block, compiler, mem(&sh2->ea), TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, mem(&sh2->pc), *sh2->nocode); // and jump to the "resume PC"
+ UML_MOV(block, mem(&sh2->ea), mem(&sh2->pc)); // mov ea, pc
+ generate_update_cycles(sh2, block, compiler, mem(&sh2->ea), TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, mem(&sh2->pc), *sh2->nocode); // and jump to the "resume PC"
return TRUE;
}
@@ -2067,8 +2067,8 @@ static int generate_group_2(sh2_state *sh2, drcuml_block *block, compiler_state
switch (opcode & 15)
{
case 0: // MOVBS(Rm, Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_AND(block, I1, R32(Rm), 0xff); // and r1, Rm, 0xff
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_AND(block, I1, R32(Rm), 0xff); // and r1, Rm, 0xff
UML_CALLH(block, *sh2->write8);
if (!in_delay_slot)
@@ -2076,8 +2076,8 @@ static int generate_group_2(sh2_state *sh2, drcuml_block *block, compiler_state
return TRUE;
case 1: // MOVWS(Rm, Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_AND(block, I1, R32(Rm), 0xffff); // and r1, Rm, 0xffff
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_AND(block, I1, R32(Rm), 0xffff); // and r1, Rm, 0xffff
UML_CALLH(block, *sh2->write16);
if (!in_delay_slot)
@@ -2085,8 +2085,8 @@ static int generate_group_2(sh2_state *sh2, drcuml_block *block, compiler_state
return TRUE;
case 2: // MOVLS(Rm, Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
UML_CALLH(block, *sh2->write32);
if (!in_delay_slot)
@@ -2097,134 +2097,134 @@ static int generate_group_2(sh2_state *sh2, drcuml_block *block, compiler_state
return TRUE;
case 4: // MOVBM(Rm, Rn);
- UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
- UML_SUB(block, R32(Rn), R32(Rn), 1); // sub Rn, Rn, 1
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_CALLH(block, *sh2->write8); // call write8
+ UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
+ UML_SUB(block, R32(Rn), R32(Rn), 1); // sub Rn, Rn, 1
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_CALLH(block, *sh2->write8); // call write8
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 5: // MOVWM(Rm, Rn);
- UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
- UML_SUB(block, R32(Rn), R32(Rn), 2); // sub Rn, Rn, 2
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_CALLH(block, *sh2->write16); // call write16
+ UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
+ UML_SUB(block, R32(Rn), R32(Rn), 2); // sub Rn, Rn, 2
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_CALLH(block, *sh2->write16); // call write16
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 6: // MOVLM(Rm, Rn);
- UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
- UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, 4
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_MOV(block, I1, R32(Rm)); // mov r1, Rm
+ UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, 4
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_CALLH(block, *sh2->write32); // call write32
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 13: // XTRCT(Rm, Rn);
- UML_SHL(block, I0, R32(Rm), 16); // shl r0, Rm, #16
- UML_AND(block, I0, I0, 0xffff0000); // and r0, r0, #0xffff0000
+ UML_SHL(block, I0, R32(Rm), 16); // shl r0, Rm, #16
+ UML_AND(block, I0, I0, 0xffff0000); // and r0, r0, #0xffff0000
- UML_SHR(block, I1, R32(Rn), 16); // shr, r1, Rn, #16
- UML_AND(block, I1, I1, 0xffff); // and r1, r1, #0x0000ffff
+ UML_SHR(block, I1, R32(Rn), 16); // shr, r1, Rn, #16
+ UML_AND(block, I1, I1, 0xffff); // and r1, r1, #0x0000ffff
- UML_OR(block, R32(Rn), I0, I1); // or Rn, r0, r1
+ UML_OR(block, R32(Rn), I0, I1); // or Rn, r0, r1
return TRUE;
case 7: // DIV0S(Rm, Rn);
- UML_MOV(block, I0, mem(&sh2->sr)); // move r0, sr
- UML_AND(block, I0, I0, ~(Q|M|T)); // and r0, r0, ~(Q|M|T) (clear the Q,M, and T bits)
+ UML_MOV(block, I0, mem(&sh2->sr)); // move r0, sr
+ UML_AND(block, I0, I0, ~(Q|M|T)); // and r0, r0, ~(Q|M|T) (clear the Q,M, and T bits)
- UML_TEST(block, R32(Rn), 0x80000000); // test Rn, #0x80000000
- UML_JMPc(block, COND_Z, compiler->labelnum); // jz labelnum
+ UML_TEST(block, R32(Rn), 0x80000000); // test Rn, #0x80000000
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jz labelnum
- UML_OR(block, I0, I0, Q); // or r0, r0, Q
- UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_OR(block, I0, I0, Q); // or r0, r0, Q
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
- UML_TEST(block, R32(Rm), 0x80000000); // test Rm, #0x80000000
- UML_JMPc(block, COND_Z, compiler->labelnum); // jz labelnum
+ UML_TEST(block, R32(Rm), 0x80000000); // test Rm, #0x80000000
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jz labelnum
- UML_OR(block, I0, I0, M); // or r0, r0, M
- UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_OR(block, I0, I0, M); // or r0, r0, M
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
- UML_XOR(block, I1, R32(Rn), R32(Rm)); // xor r1, Rn, Rm
- UML_TEST(block, I1, 0x80000000); // test r1, #0x80000000
- UML_JMPc(block, COND_Z, compiler->labelnum); // jz labelnum
+ UML_XOR(block, I1, R32(Rn), R32(Rm)); // xor r1, Rn, Rm
+ UML_TEST(block, I1, 0x80000000); // test r1, #0x80000000
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jz labelnum
- UML_OR(block, I0, I0, T); // or r0, r0, T
- UML_LABEL(block, compiler->labelnum++); // labelnum:
- UML_MOV(block, mem(&sh2->sr), I0); // mov sr, r0
+ UML_OR(block, I0, I0, T); // or r0, r0, T
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_MOV(block, mem(&sh2->sr), I0); // mov sr, r0
return TRUE;
case 8: // TST(Rm, Rn);
- UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
- UML_TEST(block, R32(Rm), R32(Rn)); // test Rm, Rn
- UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz compiler->labelnum
+ UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
+ UML_TEST(block, R32(Rm), R32(Rn)); // test Rm, Rn
+ UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz compiler->labelnum
- UML_OR(block, I0, I0, T); // or r0, r0, T
- UML_LABEL(block, compiler->labelnum++); // desc->pc:
+ UML_OR(block, I0, I0, T); // or r0, r0, T
+ UML_LABEL(block, compiler->labelnum++); // desc->pc:
- UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
+ UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
return TRUE;
case 12: // CMPSTR(Rm, Rn);
- UML_XOR(block, I0, R32(Rn), R32(Rm)); // xor r0, Rn, Rm (temp)
+ UML_XOR(block, I0, R32(Rn), R32(Rm)); // xor r0, Rn, Rm (temp)
- UML_SHR(block, I1, I0, 24); // shr r1, r0, #24 (HH)
- UML_AND(block, I1, I1, 0xff); // and r1, r1, #0xff
+ UML_SHR(block, I1, I0, 24); // shr r1, r0, #24 (HH)
+ UML_AND(block, I1, I1, 0xff); // and r1, r1, #0xff
- UML_SHR(block, I2, I0, 16); // shr r2, r0, #16 (HL)
- UML_AND(block, I2, I2, 0xff); // and r2, r2, #0xff
+ UML_SHR(block, I2, I0, 16); // shr r2, r0, #16 (HL)
+ UML_AND(block, I2, I2, 0xff); // and r2, r2, #0xff
- UML_SHR(block, I3, I0, 8); // shr r3, r0, #8 (LH)
- UML_AND(block, I3, I3, 0xff); // and r3, r3, #0xff
+ UML_SHR(block, I3, I0, 8); // shr r3, r0, #8 (LH)
+ UML_AND(block, I3, I3, 0xff); // and r3, r3, #0xff
- UML_AND(block, I7, I0, 0xff); // and r7, r0, #0xff (LL)
+ UML_AND(block, I7, I0, 0xff); // and r7, r0, #0xff (LL)
- UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T (clear the T bit)
+ UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T (clear the T bit)
- UML_CMP(block, I1, 0); // cmp r1, #0
- UML_JMPc(block, COND_Z, compiler->labelnum); // jnz labelnum
- UML_CMP(block, I2, 0); // cmp r2, #0
- UML_JMPc(block, COND_Z, compiler->labelnum); // jnz labelnum
- UML_CMP(block, I3, 0); // cmp r3, #0
- UML_JMPc(block, COND_Z, compiler->labelnum); // jnz labelnum
- UML_CMP(block, I7, 0); // cmp r7, #0
- UML_JMPc(block, COND_NZ, compiler->labelnum+1); // jnz labelnum
+ UML_CMP(block, I1, 0); // cmp r1, #0
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jnz labelnum
+ UML_CMP(block, I2, 0); // cmp r2, #0
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jnz labelnum
+ UML_CMP(block, I3, 0); // cmp r3, #0
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jnz labelnum
+ UML_CMP(block, I7, 0); // cmp r7, #0
+ UML_JMPc(block, COND_NZ, compiler->labelnum+1); // jnz labelnum
- UML_LABEL(block, compiler->labelnum++); // labelnum:
- UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
- UML_LABEL(block, compiler->labelnum++); // labelnum+1:
+ UML_LABEL(block, compiler->labelnum++); // labelnum+1:
return TRUE;
case 9: // AND(Rm, Rn);
- UML_AND(block, R32(Rn), R32(Rn), R32(Rm)); // and Rn, Rn, Rm
+ UML_AND(block, R32(Rn), R32(Rn), R32(Rm)); // and Rn, Rn, Rm
return TRUE;
case 10: // XOR(Rm, Rn);
- UML_XOR(block, R32(Rn), R32(Rn), R32(Rm)); // xor Rn, Rn, Rm
+ UML_XOR(block, R32(Rn), R32(Rn), R32(Rm)); // xor Rn, Rn, Rm
return TRUE;
case 11: // OR(Rm, Rn);
- UML_OR(block, R32(Rn), R32(Rn), R32(Rm)); // or Rn, Rn, Rm
+ UML_OR(block, R32(Rn), R32(Rn), R32(Rm)); // or Rn, Rn, Rm
return TRUE;
case 14: // MULU(Rm, Rn);
- UML_AND(block, I0, R32(Rm), 0xffff); // and r0, Rm, 0xffff
- UML_AND(block, I1, R32(Rn), 0xffff); // and r1, Rn, 0xffff
- UML_MULU(block, mem(&sh2->macl), mem(&sh2->ea), I0, I1); // mulu macl, ea, r0, r1
+ UML_AND(block, I0, R32(Rm), 0xffff); // and r0, Rm, 0xffff
+ UML_AND(block, I1, R32(Rn), 0xffff); // and r1, Rn, 0xffff
+ UML_MULU(block, mem(&sh2->macl), mem(&sh2->ea), I0, I1); // mulu macl, ea, r0, r1
return TRUE;
case 15: // MULS(Rm, Rn);
- UML_SEXT(block, I0, R32(Rm), SIZE_WORD); // sext r0, Rm
- UML_SEXT(block, I1, R32(Rn), SIZE_WORD); // sext r1, Rn
- UML_MULS(block, mem(&sh2->macl), mem(&sh2->ea), I0, I1); // muls macl, ea, r0, r1
+ UML_SEXT(block, I0, R32(Rm), SIZE_WORD); // sext r0, Rm
+ UML_SEXT(block, I1, R32(Rn), SIZE_WORD); // sext r1, Rn
+ UML_MULS(block, mem(&sh2->macl), mem(&sh2->ea), I0, I1); // muls macl, ea, r0, r1
return TRUE;
}
@@ -2236,33 +2236,33 @@ static int generate_group_3(sh2_state *sh2, drcuml_block *block, compiler_state
switch (opcode & 15)
{
case 0: // CMPEQ(Rm, Rn); (equality)
- UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
- UML_SETc(block, COND_E, I0); // set E, r0
- UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
+ UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
+ UML_SETc(block, COND_E, I0); // set E, r0
+ UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
return TRUE;
case 2: // CMPHS(Rm, Rn); (unsigned greater than or equal)
- UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
- UML_SETc(block, COND_AE, I0); // set AE, r0
- UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
+ UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
+ UML_SETc(block, COND_AE, I0); // set AE, r0
+ UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
return TRUE;
case 3: // CMPGE(Rm, Rn); (signed greater than or equal)
- UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
- UML_SETc(block, COND_GE, I0); // set GE, r0
- UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
+ UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
+ UML_SETc(block, COND_GE, I0); // set GE, r0
+ UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
return TRUE;
case 6: // CMPHI(Rm, Rn); (unsigned greater than)
- UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
- UML_SETc(block, COND_A, I0); // set A, r0
- UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
+ UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
+ UML_SETc(block, COND_A, I0); // set A, r0
+ UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
return TRUE;
case 7: // CMPGT(Rm, Rn); (signed greater than)
- UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
- UML_SETc(block, COND_G, I0); // set G, r0
- UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
+ UML_CMP(block, R32(Rn), R32(Rm)); // cmp Rn, Rm
+ UML_SETc(block, COND_G, I0); // set G, r0
+ UML_ROLINS(block, mem(&sh2->sr), I0, 0, 1); // rolins sr, r0, 0, 1
return TRUE;
case 1: // NOP();
@@ -2293,24 +2293,24 @@ static int generate_group_3(sh2_state *sh2, drcuml_block *block, compiler_state
break;
case 8: // SUB(Rm, Rn);
- UML_SUB(block, R32(Rn), R32(Rn), R32(Rm)); // sub Rn, Rn, Rm
+ UML_SUB(block, R32(Rn), R32(Rn), R32(Rm)); // sub Rn, Rn, Rm
return TRUE;
case 12: // ADD(Rm, Rn);
- UML_ADD(block, R32(Rn), R32(Rn), R32(Rm)); // add Rn, Rn, Rm
+ UML_ADD(block, R32(Rn), R32(Rn), R32(Rm)); // add Rn, Rn, Rm
return TRUE;
case 10: // SUBC(Rm, Rn);
- UML_CARRY(block, mem(&sh2->sr), 0); // carry = T (T is bit 0 of SR)
- UML_SUBB(block, R32(Rn), R32(Rn), R32(Rm)); // addc Rn, Rn, Rm
- UML_SETc(block, COND_C, I0); // setc i0, C
+ UML_CARRY(block, mem(&sh2->sr), 0); // carry = T (T is bit 0 of SR)
+ UML_SUBB(block, R32(Rn), R32(Rn), R32(Rm)); // addc Rn, Rn, Rm
+ UML_SETc(block, COND_C, I0); // setc i0, C
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins sr,i0,0,T
return TRUE;
case 11: // SUBV(Rm, Rn);
#if ADDSUBV_DIRECT
- UML_SUB(block, R32(Rn), R32(Rn), R32(Rm)); // sub Rn, Rn, Rm
- UML_SETc(block, COND_V, I0); // setc i0, V
+ UML_SUB(block, R32(Rn), R32(Rn), R32(Rm)); // sub Rn, Rn, Rm
+ UML_SETc(block, COND_V, I0); // setc i0, V
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins [sr],i0,0,T
#else
save_fast_iregs(sh2, block);
@@ -2321,16 +2321,16 @@ static int generate_group_3(sh2_state *sh2, drcuml_block *block, compiler_state
return TRUE;
case 14: // ADDC(Rm, Rn);
- UML_CARRY(block, mem(&sh2->sr), 0); // carry = T (T is bit 0 of SR)
- UML_ADDC(block, R32(Rn), R32(Rn), R32(Rm)); // addc Rn, Rn, Rm
- UML_SETc(block, COND_C, I0); // setc i0, C
+ UML_CARRY(block, mem(&sh2->sr), 0); // carry = T (T is bit 0 of SR)
+ UML_ADDC(block, R32(Rn), R32(Rn), R32(Rm)); // addc Rn, Rn, Rm
+ UML_SETc(block, COND_C, I0); // setc i0, C
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins sr,i0,0,T
return TRUE;
case 15: // ADDV(Rm, Rn);
#if ADDSUBV_DIRECT
- UML_ADD(block, R32(Rn), R32(Rn), R32(Rm)); // add Rn, Rn, Rm
- UML_SETc(block, COND_V, I0); // setc i0, V
+ UML_ADD(block, R32(Rn), R32(Rn), R32(Rm)); // add Rn, Rn, Rm
+ UML_SETc(block, COND_V, I0); // setc i0, V
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins [sr],i0,0,T
#else
save_fast_iregs(sh2, block);
@@ -2348,68 +2348,68 @@ static int generate_group_4(sh2_state *sh2, drcuml_block *block, compiler_state
switch (opcode & 0x3F)
{
case 0x00: // SHLL(Rn);
- UML_SHL(block, R32(Rn), R32(Rn), 1); // shl Rn, Rn, 1
- UML_SETc(block, COND_C, I0); // set i0,C
+ UML_SHL(block, R32(Rn), R32(Rn), 1); // shl Rn, Rn, 1
+ UML_SETc(block, COND_C, I0); // set i0,C
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins [sr],i0,0,T
return TRUE;
case 0x01: // SHLR(Rn);
- UML_SHR(block, R32(Rn), R32(Rn), 1); // shr Rn, Rn, 1
- UML_SETc(block, COND_C, I0); // set i0,C
+ UML_SHR(block, R32(Rn), R32(Rn), 1); // shr Rn, Rn, 1
+ UML_SETc(block, COND_C, I0); // set i0,C
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins [sr],i0,0,T
return TRUE;
case 0x04: // ROTL(Rn);
- UML_ROL(block, R32(Rn), R32(Rn), 1); // rol Rn, Rn, 1
- UML_SETc(block, COND_C, I0); // set i0,C
+ UML_ROL(block, R32(Rn), R32(Rn), 1); // rol Rn, Rn, 1
+ UML_SETc(block, COND_C, I0); // set i0,C
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins [sr],i0,0,T
return TRUE;
case 0x05: // ROTR(Rn);
- UML_ROR(block, R32(Rn), R32(Rn), 1); // ror Rn, Rn, 1
- UML_SETc(block, COND_C, I0); // set i0,C
+ UML_ROR(block, R32(Rn), R32(Rn), 1); // ror Rn, Rn, 1
+ UML_SETc(block, COND_C, I0); // set i0,C
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins [sr],i0,0,T
return TRUE;
case 0x02: // STSMMACH(Rn);
- UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, #4
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_MOV(block, I1, mem(&sh2->mach)); // mov r1, mach
- SETEA(0); // set ea for debug
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, #4
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I1, mem(&sh2->mach)); // mov r1, mach
+ SETEA(0); // set ea for debug
+ UML_CALLH(block, *sh2->write32); // call write32
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x03: // STCMSR(Rn);
- UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, #4
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_MOV(block, I1, mem(&sh2->sr)); // mov r1, sr
- SETEA(0); // set ea for debug
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, #4
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I1, mem(&sh2->sr)); // mov r1, sr
+ SETEA(0); // set ea for debug
+ UML_CALLH(block, *sh2->write32); // call write32
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x06: // LDSMMACH(Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
SETEA(0);
- UML_CALLH(block, *sh2->read32); // call read32
- UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, #4
- UML_MOV(block, mem(&sh2->mach), I0); // mov mach, r0
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, #4
+ UML_MOV(block, mem(&sh2->mach), I0); // mov mach, r0
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x07: // LDCMSR(Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
SETEA(0);
- UML_CALLH(block, *sh2->read32); // call read32
- UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, #4
- UML_MOV(block, mem(&sh2->sr), I0); // mov sr, r0
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, #4
+ UML_MOV(block, mem(&sh2->sr), I0); // mov sr, r0
compiler->checkints = TRUE;
if (!in_delay_slot)
@@ -2442,23 +2442,23 @@ static int generate_group_4(sh2_state *sh2, drcuml_block *block, compiler_state
return TRUE;
case 0x0a: // LDSMACH(Rn);
- UML_MOV(block, mem(&sh2->mach), R32(Rn)); // mov mach, Rn
+ UML_MOV(block, mem(&sh2->mach), R32(Rn)); // mov mach, Rn
return TRUE;
case 0x0b: // JSR(Rn);
- UML_MOV(block, mem(&sh2->target), R32(Rn)); // mov target, Rn
+ UML_MOV(block, mem(&sh2->target), R32(Rn)); // mov target, Rn
- UML_ADD(block, mem(&sh2->pr), desc->pc, 4); // add sh2->pr, desc->pc, #4 (skip the current insn & delay slot)
+ UML_ADD(block, mem(&sh2->pr), desc->pc, 4); // add sh2->pr, desc->pc, #4 (skip the current insn & delay slot)
generate_delay_slot(sh2, block, compiler, desc, sh2->target-4);
- generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, mem(&sh2->target), *sh2->nocode); // and do the jump
+ generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, mem(&sh2->target), *sh2->nocode); // and do the jump
return TRUE;
case 0x0e: // LDCSR(Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_AND(block, I0, I0, FLAGS); // and r0, r0, FLAGS
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_AND(block, I0, I0, FLAGS); // and r0, r0, FLAGS
UML_MOV(block, mem(&sh2->sr), I0);
compiler->checkints = TRUE;
@@ -2477,207 +2477,207 @@ static int generate_group_4(sh2_state *sh2, drcuml_block *block, compiler_state
case 0x10: // DT(Rn);
if (sh2->cpu_type > CPU_TYPE_SH1)
{
- UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
- UML_SUB(block, R32(Rn), R32(Rn), 1); // sub Rn, Rn, 1
- UML_JMPc(block, COND_NZ, compiler->labelnum); // jz compiler->labelnum
+ UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
+ UML_SUB(block, R32(Rn), R32(Rn), 1); // sub Rn, Rn, 1
+ UML_JMPc(block, COND_NZ, compiler->labelnum); // jz compiler->labelnum
- UML_OR(block, I0, I0, T); // or r0, r0, T
- UML_LABEL(block, compiler->labelnum++); // desc->pc:
+ UML_OR(block, I0, I0, T); // or r0, r0, T
+ UML_LABEL(block, compiler->labelnum++); // desc->pc:
- UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
+ UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
return TRUE;
}
break;
case 0x11: // CMPPZ(Rn);
- UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
+ UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
- UML_CMP(block, R32(Rn), 0); // cmp Rn, 0
- UML_JMPc(block, COND_S, compiler->labelnum); // js compiler->labelnum (if negative)
+ UML_CMP(block, R32(Rn), 0); // cmp Rn, 0
+ UML_JMPc(block, COND_S, compiler->labelnum); // js compiler->labelnum (if negative)
- UML_OR(block, I0, I0, T); // or r0, r0, T
- UML_LABEL(block, compiler->labelnum++); // desc->pc:
+ UML_OR(block, I0, I0, T); // or r0, r0, T
+ UML_LABEL(block, compiler->labelnum++); // desc->pc:
- UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
+ UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
return TRUE;
case 0x15: // CMPPL(Rn);
- UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
+ UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
- UML_CMP(block, R32(Rn), 0); // cmp Rn, 0
+ UML_CMP(block, R32(Rn), 0); // cmp Rn, 0
- UML_JMPc(block, COND_S, compiler->labelnum); // js compiler->labelnum (if negative)
- UML_JMPc(block, COND_Z, compiler->labelnum); // jz compiler->labelnum (if zero)
+ UML_JMPc(block, COND_S, compiler->labelnum); // js compiler->labelnum (if negative)
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jz compiler->labelnum (if zero)
- UML_OR(block, I0, I0, T); // or r0, r0, T
+ UML_OR(block, I0, I0, T); // or r0, r0, T
- UML_LABEL(block, compiler->labelnum++); // desc->pc:
- UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
+ UML_LABEL(block, compiler->labelnum++); // desc->pc:
+ UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
return TRUE;
case 0x12: // STSMMACL(Rn);
- UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, #4
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_MOV(block, I1, mem(&sh2->macl)); // mov r1, macl
- SETEA(0); // set ea for debug
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, #4
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I1, mem(&sh2->macl)); // mov r1, macl
+ SETEA(0); // set ea for debug
+ UML_CALLH(block, *sh2->write32); // call write32
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x13: // STCMGBR(Rn);
- UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, #4
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_MOV(block, I1, mem(&sh2->gbr)); // mov r1, gbr
- SETEA(0); // set ea for debug
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, #4
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I1, mem(&sh2->gbr)); // mov r1, gbr
+ SETEA(0); // set ea for debug
+ UML_CALLH(block, *sh2->write32); // call write32
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x16: // LDSMMACL(Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
SETEA(0);
- UML_CALLH(block, *sh2->read32); // call read32
- UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, #4
- UML_MOV(block, mem(&sh2->macl), I0); // mov macl, r0
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, #4
+ UML_MOV(block, mem(&sh2->macl), I0); // mov macl, r0
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x17: // LDCMGBR(Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
SETEA(0);
- UML_CALLH(block, *sh2->read32); // call read32
- UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, #4
- UML_MOV(block, mem(&sh2->gbr), I0); // mov gbr, r0
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, #4
+ UML_MOV(block, mem(&sh2->gbr), I0); // mov gbr, r0
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x1a: // LDSMACL(Rn);
- UML_MOV(block, mem(&sh2->macl), R32(Rn)); // mov macl, Rn
+ UML_MOV(block, mem(&sh2->macl), R32(Rn)); // mov macl, Rn
return TRUE;
case 0x1b: // TAS(Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
SETEA(0);
- UML_CALLH(block, *sh2->read8); // call read8
+ UML_CALLH(block, *sh2->read8); // call read8
- UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T
+ UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T
- UML_CMP(block, I0, 0); // cmp r0, #0
- UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz labelnum
+ UML_CMP(block, I0, 0); // cmp r0, #0
+ UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz labelnum
- UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
+ UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
- UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
- UML_OR(block, I1, I0, 0x80); // or r1, r0, #0x80
+ UML_OR(block, I1, I0, 0x80); // or r1, r0, #0x80
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
- UML_CALLH(block, *sh2->write8); // write the value back
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_CALLH(block, *sh2->write8); // write the value back
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x1e: // LDCGBR(Rn);
- UML_MOV(block, mem(&sh2->gbr), R32(Rn)); // mov gbr, Rn
+ UML_MOV(block, mem(&sh2->gbr), R32(Rn)); // mov gbr, Rn
return TRUE;
case 0x20: // SHAL(Rn);
- UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T
- UML_SHR(block, I0, R32(Rn), 31); // shr r0, Rn, 31
- UML_AND(block, I0, I0, T); // and r0, r0, T
- UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), I0); // or sr, sr, r0
- UML_SHL(block, R32(Rn), R32(Rn), 1); // shl Rn, Rn, 1
+ UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T
+ UML_SHR(block, I0, R32(Rn), 31); // shr r0, Rn, 31
+ UML_AND(block, I0, I0, T); // and r0, r0, T
+ UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), I0); // or sr, sr, r0
+ UML_SHL(block, R32(Rn), R32(Rn), 1); // shl Rn, Rn, 1
return TRUE;
case 0x21: // SHAR(Rn);
- UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T
- UML_AND(block, I0, R32(Rn), T); // and r0, Rn, T
- UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), I0); // or sr, sr, r0
- UML_SAR(block, R32(Rn), R32(Rn), 1); // sar Rn, Rn, 1
+ UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T
+ UML_AND(block, I0, R32(Rn), T); // and r0, Rn, T
+ UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), I0); // or sr, sr, r0
+ UML_SAR(block, R32(Rn), R32(Rn), 1); // sar Rn, Rn, 1
return TRUE;
case 0x22: // STSMPR(Rn);
- UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, 4
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, 4
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
SETEA(0);
- UML_MOV(block, I1, mem(&sh2->pr)); // mov r1, pr
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_MOV(block, I1, mem(&sh2->pr)); // mov r1, pr
+ UML_CALLH(block, *sh2->write32); // call write32
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x23: // STCMVBR(Rn);
- UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, 4
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_SUB(block, R32(Rn), R32(Rn), 4); // sub Rn, Rn, 4
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
SETEA(0);
- UML_MOV(block, I1, mem(&sh2->vbr)); // mov r1, vbr
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_MOV(block, I1, mem(&sh2->vbr)); // mov r1, vbr
+ UML_CALLH(block, *sh2->write32); // call write32
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x24: // ROTCL(Rn);
- UML_CARRY(block, mem(&sh2->sr), 0); // carry sr,0
- UML_ROLC(block, R32(Rn), R32(Rn), 1); // rolc Rn,Rn,1
- UML_SETc(block, COND_C, I0); // set i0,C
+ UML_CARRY(block, mem(&sh2->sr), 0); // carry sr,0
+ UML_ROLC(block, R32(Rn), R32(Rn), 1); // rolc Rn,Rn,1
+ UML_SETc(block, COND_C, I0); // set i0,C
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins sr,i0,0,T
return TRUE;
case 0x25: // ROTCR(Rn);
- UML_CARRY(block, mem(&sh2->sr), 0); // carry sr,0
- UML_RORC(block, R32(Rn), R32(Rn), 1); // rorc Rn,Rn,1
- UML_SETc(block, COND_C, I0); // set i0,C
+ UML_CARRY(block, mem(&sh2->sr), 0); // carry sr,0
+ UML_RORC(block, R32(Rn), R32(Rn), 1); // rorc Rn,Rn,1
+ UML_SETc(block, COND_C, I0); // set i0,C
UML_ROLINS(block, mem(&sh2->sr), I0, 0, T); // rolins sr,i0,0,T
return TRUE;
case 0x26: // LDSMPR(Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
SETEA(0);
- UML_CALLH(block, *sh2->read32); // call read32
- UML_MOV(block, mem(&sh2->pr), I0); // mov sh2->pr, r0
- UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, Rn, #4
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_MOV(block, mem(&sh2->pr), I0); // mov sh2->pr, r0
+ UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, Rn, #4
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x27: // LDCMVBR(Rn);
- UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
+ UML_MOV(block, I0, R32(Rn)); // mov r0, Rn
SETEA(0);
- UML_CALLH(block, *sh2->read32); // call read32
- UML_MOV(block, mem(&sh2->vbr), I0); // mov sh2->vbr, r0
- UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, Rn, #4
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_MOV(block, mem(&sh2->vbr), I0); // mov sh2->vbr, r0
+ UML_ADD(block, R32(Rn), R32(Rn), 4); // add Rn, Rn, #4
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 0x2a: // LDSPR(Rn);
- UML_MOV(block, mem(&sh2->pr), R32(Rn)); // mov sh2->pr, Rn
+ UML_MOV(block, mem(&sh2->pr), R32(Rn)); // mov sh2->pr, Rn
return TRUE;
case 0x2b: // JMP(Rn);
- UML_MOV(block, mem(&sh2->target), R32(Rn)); // mov target, Rn
+ UML_MOV(block, mem(&sh2->target), R32(Rn)); // mov target, Rn
generate_delay_slot(sh2, block, compiler, desc, sh2->target);
- generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, mem(&sh2->target), *sh2->nocode); // jmp (target)
+ generate_update_cycles(sh2, block, compiler, mem(&sh2->target), TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, mem(&sh2->target), *sh2->nocode); // jmp (target)
return TRUE;
case 0x2e: // LDCVBR(Rn);
- UML_MOV(block, mem(&sh2->vbr), R32(Rn)); // mov vbr, Rn
+ UML_MOV(block, mem(&sh2->vbr), R32(Rn)); // mov vbr, Rn
return TRUE;
case 0x0c: // NOP();
@@ -2713,124 +2713,124 @@ static int generate_group_6(sh2_state *sh2, drcuml_block *block, compiler_state
switch (opcode & 15)
{
case 0: // MOVBL(Rm, Rn);
- UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
- SETEA(0); // debug: ea = r0
- UML_CALLH(block, *sh2->read8); // call read8
- UML_SEXT(block, R32(Rn), I0, SIZE_BYTE); // sext Rn, r0, BYTE
+ UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
+ SETEA(0); // debug: ea = r0
+ UML_CALLH(block, *sh2->read8); // call read8
+ UML_SEXT(block, R32(Rn), I0, SIZE_BYTE); // sext Rn, r0, BYTE
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 1: // MOVWL(Rm, Rn);
- UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
- SETEA(0); // debug: ea = r0
- UML_CALLH(block, *sh2->read16); // call read16
- UML_SEXT(block, R32(Rn), I0, SIZE_WORD); // sext Rn, r0, WORD
+ UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
+ SETEA(0); // debug: ea = r0
+ UML_CALLH(block, *sh2->read16); // call read16
+ UML_SEXT(block, R32(Rn), I0, SIZE_WORD); // sext Rn, r0, WORD
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 2: // MOVLL(Rm, Rn);
- UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
- SETEA(0); // debug: ea = r0
- UML_CALLH(block, *sh2->read32); // call read32
- UML_MOV(block, R32(Rn), I0); // mov Rn, r0
+ UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
+ SETEA(0); // debug: ea = r0
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_MOV(block, R32(Rn), I0); // mov Rn, r0
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 3: // MOV(Rm, Rn);
- UML_MOV(block, R32(Rn), R32(Rm)); // mov Rn, Rm
+ UML_MOV(block, R32(Rn), R32(Rm)); // mov Rn, Rm
return TRUE;
case 7: // NOT(Rm, Rn);
- UML_XOR(block, R32(Rn), R32(Rm), 0xffffffff); // xor Rn, Rm, 0xffffffff
+ UML_XOR(block, R32(Rn), R32(Rm), 0xffffffff); // xor Rn, Rm, 0xffffffff
return TRUE;
case 9: // SWAPW(Rm, Rn);
- UML_ROL(block, R32(Rn), R32(Rm), 16); // rol Rn, Rm, 16
+ UML_ROL(block, R32(Rn), R32(Rm), 16); // rol Rn, Rm, 16
return TRUE;
case 11: // NEG(Rm, Rn);
- UML_SUB(block, R32(Rn), 0, R32(Rm)); // sub Rn, 0, Rm
+ UML_SUB(block, R32(Rn), 0, R32(Rm)); // sub Rn, 0, Rm
return TRUE;
case 12: // EXTUB(Rm, Rn);
- UML_AND(block, R32(Rn), R32(Rm), 0x000000ff); // and Rn, Rm, 0xff
+ UML_AND(block, R32(Rn), R32(Rm), 0x000000ff); // and Rn, Rm, 0xff
return TRUE;
case 13: // EXTUW(Rm, Rn);
- UML_AND(block, R32(Rn), R32(Rm), 0x0000ffff); // and Rn, Rm, 0xffff
+ UML_AND(block, R32(Rn), R32(Rm), 0x0000ffff); // and Rn, Rm, 0xffff
return TRUE;
case 14: // EXTSB(Rm, Rn);
- UML_SEXT(block, R32(Rn), R32(Rm), SIZE_BYTE); // sext Rn, Rm, BYTE
+ UML_SEXT(block, R32(Rn), R32(Rm), SIZE_BYTE); // sext Rn, Rm, BYTE
return TRUE;
case 15: // EXTSW(Rm, Rn);
- UML_SEXT(block, R32(Rn), R32(Rm), SIZE_WORD); // sext Rn, Rm, WORD
+ UML_SEXT(block, R32(Rn), R32(Rm), SIZE_WORD); // sext Rn, Rm, WORD
return TRUE;
case 4: // MOVBP(Rm, Rn);
- UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
- UML_CALLH(block, *sh2->read8); // call read8
- UML_SEXT(block, R32(Rn), I0, SIZE_BYTE); // sext Rn, r0, BYTE
+ UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
+ UML_CALLH(block, *sh2->read8); // call read8
+ UML_SEXT(block, R32(Rn), I0, SIZE_BYTE); // sext Rn, r0, BYTE
if (Rm != Rn)
- UML_ADD(block, R32(Rm), R32(Rm), 1); // add Rm, Rm, #1
+ UML_ADD(block, R32(Rm), R32(Rm), 1); // add Rm, Rm, #1
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 5: // MOVWP(Rm, Rn);
- UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
- UML_CALLH(block, *sh2->read16); // call read16
- UML_SEXT(block, R32(Rn), I0, SIZE_WORD); // sext Rn, r0, WORD
+ UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
+ UML_CALLH(block, *sh2->read16); // call read16
+ UML_SEXT(block, R32(Rn), I0, SIZE_WORD); // sext Rn, r0, WORD
if (Rm != Rn)
- UML_ADD(block, R32(Rm), R32(Rm), 2); // add Rm, Rm, #2
+ UML_ADD(block, R32(Rm), R32(Rm), 2); // add Rm, Rm, #2
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 6: // MOVLP(Rm, Rn);
- UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
- UML_CALLH(block, *sh2->read32); // call read32
- UML_MOV(block, R32(Rn), I0); // mov Rn, r0
+ UML_MOV(block, I0, R32(Rm)); // mov r0, Rm
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_MOV(block, R32(Rn), I0); // mov Rn, r0
if (Rm != Rn)
- UML_ADD(block, R32(Rm), R32(Rm), 4); // add Rm, Rm, #4
+ UML_ADD(block, R32(Rm), R32(Rm), 4); // add Rm, Rm, #4
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 8: // SWAPB(Rm, Rn);
- UML_AND(block, I0, R32(Rm), 0xffff0000); // and r0, Rm, #0xffff0000
- UML_AND(block, I1, R32(Rm), 0x000000ff); // and r0, Rm, #0x000000ff
- UML_AND(block, I2, R32(Rm), 0x0000ff00); // and r0, Rm, #0x0000ff00
- UML_SHL(block, I1, I1, 8); // shl r1, r1, #8
- UML_SHR(block, I2, I2, 8); // shr r2, r2, #8
- UML_OR(block, I0, I0, I1); // or r0, r0, r1
- UML_OR(block, R32(Rn), I0, I2); // or Rn, r0, r2
+ UML_AND(block, I0, R32(Rm), 0xffff0000); // and r0, Rm, #0xffff0000
+ UML_AND(block, I1, R32(Rm), 0x000000ff); // and r0, Rm, #0x000000ff
+ UML_AND(block, I2, R32(Rm), 0x0000ff00); // and r0, Rm, #0x0000ff00
+ UML_SHL(block, I1, I1, 8); // shl r1, r1, #8
+ UML_SHR(block, I2, I2, 8); // shr r2, r2, #8
+ UML_OR(block, I0, I0, I1); // or r0, r0, r1
+ UML_OR(block, R32(Rn), I0, I2); // or Rn, r0, r2
return TRUE;
case 10: // NEGC(Rm, Rn);
- UML_MOV(block, I0, mem(&sh2->sr)); // mov r0, sr (save SR)
- UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T (clear the T bit)
- UML_CARRY(block, I0, 0); // carry = T (T is bit 0 of SR)
- UML_SUBB(block, R32(Rn), 0, R32(Rm)); // subb Rn, #0, Rm
+ UML_MOV(block, I0, mem(&sh2->sr)); // mov r0, sr (save SR)
+ UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T (clear the T bit)
+ UML_CARRY(block, I0, 0); // carry = T (T is bit 0 of SR)
+ UML_SUBB(block, R32(Rn), 0, R32(Rm)); // subb Rn, #0, Rm
- UML_JMPc(block, COND_NC, compiler->labelnum); // jnc labelnum
+ UML_JMPc(block, COND_NC, compiler->labelnum); // jnc labelnum
- UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
+ UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
- UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
return TRUE;
}
@@ -2848,9 +2848,9 @@ static int generate_group_8(sh2_state *sh2, drcuml_block *block, compiler_state
{
case 0 << 8: // MOVBS4(opcode & 0x0f, Rm);
udisp = (opcode & 0x0f);
- UML_ADD(block, I0, R32(Rm), udisp); // add r0, Rm, udisp
- UML_MOV(block, I1, R32(0)); // mov r1, R0
- UML_CALLH(block, *sh2->write8); // call write8
+ UML_ADD(block, I0, R32(Rm), udisp); // add r0, Rm, udisp
+ UML_MOV(block, I1, R32(0)); // mov r1, R0
+ UML_CALLH(block, *sh2->write8); // call write8
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -2858,9 +2858,9 @@ static int generate_group_8(sh2_state *sh2, drcuml_block *block, compiler_state
case 1 << 8: // MOVWS4(opcode & 0x0f, Rm);
udisp = (opcode & 0x0f) * 2;
- UML_ADD(block, I0, R32(Rm), udisp); // add r0, Rm, udisp
- UML_MOV(block, I1, R32(0)); // mov r1, R0
- UML_CALLH(block, *sh2->write16); // call write16
+ UML_ADD(block, I0, R32(Rm), udisp); // add r0, Rm, udisp
+ UML_MOV(block, I1, R32(0)); // mov r1, R0
+ UML_CALLH(block, *sh2->write16); // call write16
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -2877,10 +2877,10 @@ static int generate_group_8(sh2_state *sh2, drcuml_block *block, compiler_state
case 4<< 8: // MOVBL4(Rm, opcode & 0x0f);
udisp = opcode & 0x0f;
- UML_ADD(block, I0, R32(Rm), udisp); // add r0, Rm, udisp
+ UML_ADD(block, I0, R32(Rm), udisp); // add r0, Rm, udisp
SETEA(0);
- UML_CALLH(block, *sh2->read8); // call read8
- UML_SEXT(block, R32(0), I0, SIZE_BYTE); // sext R0, r0, BYTE
+ UML_CALLH(block, *sh2->read8); // call read8
+ UML_SEXT(block, R32(0), I0, SIZE_BYTE); // sext R0, r0, BYTE
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -2888,71 +2888,71 @@ static int generate_group_8(sh2_state *sh2, drcuml_block *block, compiler_state
case 5<< 8: // MOVWL4(Rm, opcode & 0x0f);
udisp = (opcode & 0x0f)*2;
- UML_ADD(block, I0, R32(Rm), udisp); // add r0, Rm, udisp
+ UML_ADD(block, I0, R32(Rm), udisp); // add r0, Rm, udisp
SETEA(0);
- UML_CALLH(block, *sh2->read16); // call read16
- UML_SEXT(block, R32(0), I0, SIZE_WORD); // sext R0, r0, WORD
+ UML_CALLH(block, *sh2->read16); // call read16
+ UML_SEXT(block, R32(0), I0, SIZE_WORD); // sext R0, r0, WORD
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
return TRUE;
case 8<< 8: // CMPIM(opcode & 0xff);
- UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
+ UML_AND(block, I0, mem(&sh2->sr), ~T); // and r0, sr, ~T (clear the T bit)
- UML_SEXT(block, I1, opcode&0xff, SIZE_BYTE); // sext r1, opcode&0xff, BYTE
- UML_CMP(block, I1, R32(0)); // cmp r1, R0
- UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz compiler->labelnum (if negative)
+ UML_SEXT(block, I1, opcode&0xff, SIZE_BYTE); // sext r1, opcode&0xff, BYTE
+ UML_CMP(block, I1, R32(0)); // cmp r1, R0
+ UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz compiler->labelnum (if negative)
- UML_OR(block, I0, I0, T); // or r0, r0, T
+ UML_OR(block, I0, I0, T); // or r0, r0, T
- UML_LABEL(block, compiler->labelnum++); // labelnum:
- UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_MOV(block, mem(&sh2->sr), I0); // mov sh2->sr, r0
return TRUE;
case 9<< 8: // BT(opcode & 0xff);
- UML_TEST(block, mem(&sh2->sr), T); // test sh2->sr, T
- UML_JMPc(block, COND_Z, compiler->labelnum); // jz compiler->labelnum
+ UML_TEST(block, mem(&sh2->sr), T); // test sh2->sr, T
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jz compiler->labelnum
disp = ((INT32)opcode << 24) >> 24;
- sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = destination
+ sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = destination
- generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // jmp sh2->ea
+ generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // jmp sh2->ea
- UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
return TRUE;
case 11<< 8: // BF(opcode & 0xff);
- UML_TEST(block, mem(&sh2->sr), T); // test sh2->sr, T
- UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz compiler->labelnum
+ UML_TEST(block, mem(&sh2->sr), T); // test sh2->sr, T
+ UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz compiler->labelnum
disp = ((INT32)opcode << 24) >> 24;
- sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = destination
+ sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = destination
- generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // jmp sh2->ea
+ generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // jmp sh2->ea
- UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
return TRUE;
case 13<< 8: // BTS(opcode & 0xff);
if (sh2->cpu_type > CPU_TYPE_SH1)
{
- UML_TEST(block, mem(&sh2->sr), T); // test sh2->sr, T
- UML_JMPc(block, COND_Z, compiler->labelnum); // jz compiler->labelnum
+ UML_TEST(block, mem(&sh2->sr), T); // test sh2->sr, T
+ UML_JMPc(block, COND_Z, compiler->labelnum); // jz compiler->labelnum
disp = ((INT32)opcode << 24) >> 24;
- sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = destination
+ sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = destination
- templabel = compiler->labelnum; // save our label
- compiler->labelnum++; // make sure the delay slot doesn't use it
+ templabel = compiler->labelnum; // save our label
+ compiler->labelnum++; // make sure the delay slot doesn't use it
generate_delay_slot(sh2, block, compiler, desc, sh2->ea-2);
- generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // jmp sh2->ea
+ generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // jmp sh2->ea
- UML_LABEL(block, templabel); // labelnum:
+ UML_LABEL(block, templabel); // labelnum:
return TRUE;
}
break;
@@ -2960,20 +2960,20 @@ static int generate_group_8(sh2_state *sh2, drcuml_block *block, compiler_state
case 15<< 8: // BFS(opcode & 0xff);
if (sh2->cpu_type > CPU_TYPE_SH1)
{
- UML_TEST(block, mem(&sh2->sr), T); // test sh2->sr, T
- UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz compiler->labelnum
+ UML_TEST(block, mem(&sh2->sr), T); // test sh2->sr, T
+ UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz compiler->labelnum
disp = ((INT32)opcode << 24) >> 24;
- sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = destination
+ sh2->ea = (desc->pc + 2) + disp * 2 + 2; // sh2->ea = destination
- templabel = compiler->labelnum; // save our label
- compiler->labelnum++; // make sure the delay slot doesn't use it
- generate_delay_slot(sh2, block, compiler, desc, sh2->ea-2); // delay slot only if the branch is taken
+ templabel = compiler->labelnum; // save our label
+ compiler->labelnum++; // make sure the delay slot doesn't use it
+ generate_delay_slot(sh2, block, compiler, desc, sh2->ea-2); // delay slot only if the branch is taken
- generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
- UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // jmp sh2->ea
+ generate_update_cycles(sh2, block, compiler, sh2->ea, TRUE); // <subtract cycles>
+ UML_HASHJMP(block, 0, sh2->ea, *sh2->nocode); // jmp sh2->ea
- UML_LABEL(block, templabel); // labelnum:
+ UML_LABEL(block, templabel); // labelnum:
return TRUE;
}
break;
@@ -2990,9 +2990,9 @@ static int generate_group_12(sh2_state *sh2, drcuml_block *block, compiler_state
{
case 0<<8: // MOVBSG(opcode & 0xff);
scratch = (opcode & 0xff);
- UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
- UML_AND(block, I1, R32(0), 0xff); // and r1, R0, 0xff
- UML_CALLH(block, *sh2->write8); // call write8
+ UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
+ UML_AND(block, I1, R32(0), 0xff); // and r1, R0, 0xff
+ UML_CALLH(block, *sh2->write8); // call write8
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -3000,9 +3000,9 @@ static int generate_group_12(sh2_state *sh2, drcuml_block *block, compiler_state
case 1<<8: // MOVWSG(opcode & 0xff);
scratch = (opcode & 0xff) * 2;
- UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
- UML_AND(block, I1, R32(0), 0xffff); // and r1, R0, 0xffff
- UML_CALLH(block, *sh2->write16); // call write16
+ UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
+ UML_AND(block, I1, R32(0), 0xffff); // and r1, R0, 0xffff
+ UML_CALLH(block, *sh2->write16); // call write16
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -3010,9 +3010,9 @@ static int generate_group_12(sh2_state *sh2, drcuml_block *block, compiler_state
case 2<<8: // MOVLSG(opcode & 0xff);
scratch = (opcode & 0xff) * 4;
- UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
- UML_MOV(block, I1, R32(0)); // mov r1, R0
- UML_CALLH(block, *sh2->write32); // call write32
+ UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
+ UML_MOV(block, I1, R32(0)); // mov r1, R0
+ UML_CALLH(block, *sh2->write32); // call write32
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -3020,29 +3020,29 @@ static int generate_group_12(sh2_state *sh2, drcuml_block *block, compiler_state
case 3<<8: // TRAPA(opcode & 0xff);
scratch = (opcode & 0xff) * 4;
- UML_ADD(block, mem(&sh2->ea), mem(&sh2->vbr), scratch); // add ea, vbr, scratch
+ UML_ADD(block, mem(&sh2->ea), mem(&sh2->vbr), scratch); // add ea, vbr, scratch
- UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_MOV(block, I1, mem(&sh2->sr)); // mov r1, sr
- UML_CALLH(block, *sh2->write32); // write32
+ UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_MOV(block, I1, mem(&sh2->sr)); // mov r1, sr
+ UML_CALLH(block, *sh2->write32); // write32
- UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
- UML_MOV(block, I0, R32(15)); // mov r0, R15
- UML_MOV(block, I1, desc->pc+2); // mov r1, pc+2
- UML_CALLH(block, *sh2->write32); // write32
+ UML_SUB(block, R32(15), R32(15), 4); // sub R15, R15, #4
+ UML_MOV(block, I0, R32(15)); // mov r0, R15
+ UML_MOV(block, I1, desc->pc+2); // mov r1, pc+2
+ UML_CALLH(block, *sh2->write32); // write32
- UML_MOV(block, I0, mem(&sh2->ea)); // mov r0, ea
- UML_CALLH(block, *sh2->read32); // read32
- UML_HASHJMP(block, 0, I0, *sh2->nocode); // jmp (r0)
+ UML_MOV(block, I0, mem(&sh2->ea)); // mov r0, ea
+ UML_CALLH(block, *sh2->read32); // read32
+ UML_HASHJMP(block, 0, I0, *sh2->nocode); // jmp (r0)
return TRUE;
case 4<<8: // MOVBLG(opcode & 0xff);
scratch = (opcode & 0xff);
- UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
- UML_CALLH(block, *sh2->read8); // call read16
- UML_SEXT(block, R32(0), I0, SIZE_BYTE); // sext R0, r0, BYTE
+ UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
+ UML_CALLH(block, *sh2->read8); // call read16
+ UML_SEXT(block, R32(0), I0, SIZE_BYTE); // sext R0, r0, BYTE
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -3050,9 +3050,9 @@ static int generate_group_12(sh2_state *sh2, drcuml_block *block, compiler_state
case 5<<8: // MOVWLG(opcode & 0xff);
scratch = (opcode & 0xff) * 2;
- UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
- UML_CALLH(block, *sh2->read16); // call read16
- UML_SEXT(block, R32(0), I0, SIZE_WORD); // sext R0, r0, WORD
+ UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
+ UML_CALLH(block, *sh2->read16); // call read16
+ UML_SEXT(block, R32(0), I0, SIZE_WORD); // sext R0, r0, WORD
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -3060,9 +3060,9 @@ static int generate_group_12(sh2_state *sh2, drcuml_block *block, compiler_state
case 6<<8: // MOVLLG(opcode & 0xff);
scratch = (opcode & 0xff) * 4;
- UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
- UML_CALLH(block, *sh2->read32); // call read32
- UML_MOV(block, R32(0), I0); // mov R0, r0
+ UML_ADD(block, I0, mem(&sh2->gbr), scratch); // add r0, gbr, scratch
+ UML_CALLH(block, *sh2->read32); // call read32
+ UML_MOV(block, R32(0), I0); // mov R0, r0
if (!in_delay_slot)
generate_update_cycles(sh2, block, compiler, desc->pc + 2, TRUE);
@@ -3072,76 +3072,76 @@ static int generate_group_12(sh2_state *sh2, drcuml_block *block, compiler_state
scratch = (opcode & 0xff) * 4;
scratch += ((desc->pc + 4) & ~3);
- UML_MOV(block, R32(0), scratch); // mov R0, scratch
+ UML_MOV(block, R32(0), scratch); // mov R0, scratch
return TRUE;
case 8<<8: // TSTI(opcode & 0xff);
scratch = opcode & 0xff;
- UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T (clear the T bit)
- UML_AND(block, I0, R32(0), scratch); // and r0, R0, scratch
- UML_CMP(block, I0, 0); // cmp r0, #0
- UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz labelnum
+ UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T (clear the T bit)
+ UML_AND(block, I0, R32(0), scratch); // and r0, R0, scratch
+ UML_CMP(block, I0, 0); // cmp r0, #0
+ UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz labelnum
- UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
+ UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
- UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
return TRUE;
case 9<<8: // ANDI(opcode & 0xff);
- UML_AND(block, R32(0), R32(0), opcode & 0xff); // and r0, r0, opcode & 0xff
+ UML_AND(block, R32(0), R32(0), opcode & 0xff); // and r0, r0, opcode & 0xff
return TRUE;
case 10<<8: // XORI(opcode & 0xff);
- UML_XOR(block, R32(0), R32(0), opcode & 0xff); // xor r0, r0, opcode & 0xff
+ UML_XOR(block, R32(0), R32(0), opcode & 0xff); // xor r0, r0, opcode & 0xff
return TRUE;
case 11<<8: // ORI(opcode & 0xff);
- UML_OR(block, R32(0), R32(0), opcode & 0xff); // or r0, r0, opcode & 0xff
+ UML_OR(block, R32(0), R32(0), opcode & 0xff); // or r0, r0, opcode & 0xff
return TRUE;
case 12<<8: // TSTM(opcode & 0xff);
- UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T (clear the T bit)
- UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
- UML_CALLH(block, *sh2->read8); // read8
+ UML_AND(block, mem(&sh2->sr), mem(&sh2->sr), ~T); // and sr, sr, ~T (clear the T bit)
+ UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
+ UML_CALLH(block, *sh2->read8); // read8
UML_AND(block, I0, I0, opcode & 0xff);
- UML_CMP(block, I0, 0); // cmp r0, #0
- UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz labelnum
+ UML_CMP(block, I0, 0); // cmp r0, #0
+ UML_JMPc(block, COND_NZ, compiler->labelnum); // jnz labelnum
- UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
+ UML_OR(block, mem(&sh2->sr), mem(&sh2->sr), T); // or sr, sr, T
- UML_LABEL(block, compiler->labelnum++); // labelnum:
+ UML_LABEL(block, compiler->labelnum++); // labelnum:
return TRUE;
case 13<<8: // ANDM(opcode & 0xff);
- UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
- UML_CALLH(block, *sh2->read8); // read8
+ UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
+ UML_CALLH(block, *sh2->read8); // read8
- UML_AND(block, I1, I0, opcode&0xff); // and r1, r0, #opcode&0xff
- UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
+ UML_AND(block, I1, I0, opcode&0xff); // and r1, r0, #opcode&0xff
+ UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
SETEA(0);
- UML_CALLH(block, *sh2->write8); // write8
+ UML_CALLH(block, *sh2->write8); // write8
return TRUE;
case 14<<8: // XORM(opcode & 0xff);
- UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
- UML_CALLH(block, *sh2->read8); // read8
+ UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
+ UML_CALLH(block, *sh2->read8); // read8
- UML_XOR(block, I1, I0, opcode&0xff); // xor r1, r0, #opcode&0xff
- UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
+ UML_XOR(block, I1, I0, opcode&0xff); // xor r1, r0, #opcode&0xff
+ UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
SETEA(0);
- UML_CALLH(block, *sh2->write8); // write8
+ UML_CALLH(block, *sh2->write8); // write8
return TRUE;
case 15<<8: // ORM(opcode & 0xff);
- UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
- UML_CALLH(block, *sh2->read8); // read8
+ UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
+ UML_CALLH(block, *sh2->read8); // read8
- UML_OR(block, I1, I0, opcode&0xff); // or r1, r0, #opcode&0xff
- UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
+ UML_OR(block, I1, I0, opcode&0xff); // or r1, r0, #opcode&0xff
+ UML_ADD(block, I0, R32(0), mem(&sh2->gbr)); // add r0, R0, gbr
SETEA(0);
- UML_CALLH(block, *sh2->write8); // write8
+ UML_CALLH(block, *sh2->write8); // write8
return TRUE;
}
@@ -3208,51 +3208,51 @@ static CPU_SET_INFO( sh2 )
switch (state)
{
/* --- the following bits of info are set as 64-bit signed integers --- */
- case CPUINFO_INT_INPUT_STATE + SH2_INT_VBLIN: sh2_set_irq_line(sh2, SH2_INT_VBLIN, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_VBLOUT: sh2_set_irq_line(sh2, SH2_INT_VBLOUT, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_HBLIN: sh2_set_irq_line(sh2, SH2_INT_HBLIN, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_TIMER0: sh2_set_irq_line(sh2, SH2_INT_TIMER0, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_TIMER1: sh2_set_irq_line(sh2, SH2_INT_TIMER1, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DSP: sh2_set_irq_line(sh2, SH2_INT_DSP, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_SOUND: sh2_set_irq_line(sh2, SH2_INT_SOUND, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_SMPC: sh2_set_irq_line(sh2, SH2_INT_SMPC, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_PAD: sh2_set_irq_line(sh2, SH2_INT_PAD, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA2: sh2_set_irq_line(sh2, SH2_INT_DMA2, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA1: sh2_set_irq_line(sh2, SH2_INT_DMA1, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA0: sh2_set_irq_line(sh2, SH2_INT_DMA0, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DMAILL: sh2_set_irq_line(sh2, SH2_INT_DMAILL, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_SPRITE: sh2_set_irq_line(sh2, SH2_INT_SPRITE, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_14: sh2_set_irq_line(sh2, SH2_INT_14, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_15: sh2_set_irq_line(sh2, SH2_INT_15, info->i); break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_ABUS: sh2_set_irq_line(sh2, SH2_INT_ABUS, info->i); break;
- case CPUINFO_INT_INPUT_STATE + INPUT_LINE_NMI: sh2_set_irq_line(sh2, INPUT_LINE_NMI, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_VBLIN: sh2_set_irq_line(sh2, SH2_INT_VBLIN, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_VBLOUT: sh2_set_irq_line(sh2, SH2_INT_VBLOUT, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_HBLIN: sh2_set_irq_line(sh2, SH2_INT_HBLIN, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_TIMER0: sh2_set_irq_line(sh2, SH2_INT_TIMER0, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_TIMER1: sh2_set_irq_line(sh2, SH2_INT_TIMER1, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DSP: sh2_set_irq_line(sh2, SH2_INT_DSP, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_SOUND: sh2_set_irq_line(sh2, SH2_INT_SOUND, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_SMPC: sh2_set_irq_line(sh2, SH2_INT_SMPC, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_PAD: sh2_set_irq_line(sh2, SH2_INT_PAD, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA2: sh2_set_irq_line(sh2, SH2_INT_DMA2, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA1: sh2_set_irq_line(sh2, SH2_INT_DMA1, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA0: sh2_set_irq_line(sh2, SH2_INT_DMA0, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DMAILL: sh2_set_irq_line(sh2, SH2_INT_DMAILL, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_SPRITE: sh2_set_irq_line(sh2, SH2_INT_SPRITE, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_14: sh2_set_irq_line(sh2, SH2_INT_14, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_15: sh2_set_irq_line(sh2, SH2_INT_15, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_ABUS: sh2_set_irq_line(sh2, SH2_INT_ABUS, info->i); break;
+ case CPUINFO_INT_INPUT_STATE + INPUT_LINE_NMI: sh2_set_irq_line(sh2, INPUT_LINE_NMI, info->i); break;
case CPUINFO_INT_REGISTER + SH2_PC:
- case CPUINFO_INT_PC: sh2->pc = info->i; sh2->delay = 0; break;
- case CPUINFO_INT_SP: sh2->r[15] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_PR: sh2->pr = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_SR: sh2->sr = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_GBR: sh2->gbr = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_VBR: sh2->vbr = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_MACH: sh2->mach = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_MACL: sh2->macl = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R0: sh2->r[ 0] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R1: sh2->r[ 1] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R2: sh2->r[ 2] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R3: sh2->r[ 3] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R4: sh2->r[ 4] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R5: sh2->r[ 5] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R6: sh2->r[ 6] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R7: sh2->r[ 7] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R8: sh2->r[ 8] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R9: sh2->r[ 9] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R10: sh2->r[10] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R11: sh2->r[11] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R12: sh2->r[12] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R13: sh2->r[13] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R14: sh2->r[14] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_R15: sh2->r[15] = info->i; break;
- case CPUINFO_INT_REGISTER + SH2_EA: sh2->ea = info->i; break;
+ case CPUINFO_INT_PC: sh2->pc = info->i; sh2->delay = 0; break;
+ case CPUINFO_INT_SP: sh2->r[15] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_PR: sh2->pr = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_SR: sh2->sr = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_GBR: sh2->gbr = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_VBR: sh2->vbr = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_MACH: sh2->mach = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_MACL: sh2->macl = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R0: sh2->r[ 0] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R1: sh2->r[ 1] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R2: sh2->r[ 2] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R3: sh2->r[ 3] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R4: sh2->r[ 4] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R5: sh2->r[ 5] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R6: sh2->r[ 6] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R7: sh2->r[ 7] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R8: sh2->r[ 8] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R9: sh2->r[ 9] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R10: sh2->r[10] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R11: sh2->r[11] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R12: sh2->r[12] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R13: sh2->r[13] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R14: sh2->r[14] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_R15: sh2->r[15] = info->i; break;
+ case CPUINFO_INT_REGISTER + SH2_EA: sh2->ea = info->i; break;
}
}
@@ -3267,96 +3267,96 @@ CPU_GET_INFO( sh2 )
switch (state)
{
/* --- the following bits of info are returned as 64-bit signed integers --- */
- case CPUINFO_INT_CONTEXT_SIZE: info->i = sizeof(sh2_state *); break;
- case CPUINFO_INT_INPUT_LINES: info->i = 16; break;
- case CPUINFO_INT_DEFAULT_IRQ_VECTOR: info->i = 0; break;
- case CPUINFO_INT_ENDIANNESS: info->i = ENDIANNESS_BIG; break;
- case CPUINFO_INT_CLOCK_MULTIPLIER: info->i = 1; break;
- case CPUINFO_INT_CLOCK_DIVIDER: info->i = 1; break;
- case CPUINFO_INT_MIN_INSTRUCTION_BYTES: info->i = 2; break;
- case CPUINFO_INT_MAX_INSTRUCTION_BYTES: info->i = 2; break;
- case CPUINFO_INT_MIN_CYCLES: info->i = 1; break;
- case CPUINFO_INT_MAX_CYCLES: info->i = 4; break;
-
- case CPUINFO_INT_DATABUS_WIDTH + AS_PROGRAM: info->i = 32; break;
- case CPUINFO_INT_ADDRBUS_WIDTH + AS_PROGRAM: info->i = 32; break;
- case CPUINFO_INT_ADDRBUS_SHIFT + AS_PROGRAM: info->i = 0; break;
- case CPUINFO_INT_DATABUS_WIDTH + AS_DATA: info->i = 0; break;
- case CPUINFO_INT_ADDRBUS_WIDTH + AS_DATA: info->i = 0; break;
- case CPUINFO_INT_ADDRBUS_SHIFT + AS_DATA: info->i = 0; break;
- case CPUINFO_INT_DATABUS_WIDTH + AS_IO: info->i = 0; break;
- case CPUINFO_INT_ADDRBUS_WIDTH + AS_IO: info->i = 0; break;
- case CPUINFO_INT_ADDRBUS_SHIFT + AS_IO: info->i = 0; break;
+ case CPUINFO_INT_CONTEXT_SIZE: info->i = sizeof(sh2_state *); break;
+ case CPUINFO_INT_INPUT_LINES: info->i = 16; break;
+ case CPUINFO_INT_DEFAULT_IRQ_VECTOR: info->i = 0; break;
+ case CPUINFO_INT_ENDIANNESS: info->i = ENDIANNESS_BIG; break;
+ case CPUINFO_INT_CLOCK_MULTIPLIER: info->i = 1; break;
+ case CPUINFO_INT_CLOCK_DIVIDER: info->i = 1; break;
+ case CPUINFO_INT_MIN_INSTRUCTION_BYTES: info->i = 2; break;
+ case CPUINFO_INT_MAX_INSTRUCTION_BYTES: info->i = 2; break;
+ case CPUINFO_INT_MIN_CYCLES: info->i = 1; break;
+ case CPUINFO_INT_MAX_CYCLES: info->i = 4; break;
+
+ case CPUINFO_INT_DATABUS_WIDTH + AS_PROGRAM: info->i = 32; break;
+ case CPUINFO_INT_ADDRBUS_WIDTH + AS_PROGRAM: info->i = 32; break;
+ case CPUINFO_INT_ADDRBUS_SHIFT + AS_PROGRAM: info->i = 0; break;
+ case CPUINFO_INT_DATABUS_WIDTH + AS_DATA: info->i = 0; break;
+ case CPUINFO_INT_ADDRBUS_WIDTH + AS_DATA: info->i = 0; break;
+ case CPUINFO_INT_ADDRBUS_SHIFT + AS_DATA: info->i = 0; break;
+ case CPUINFO_INT_DATABUS_WIDTH + AS_IO: info->i = 0; break;
+ case CPUINFO_INT_ADDRBUS_WIDTH + AS_IO: info->i = 0; break;
+ case CPUINFO_INT_ADDRBUS_SHIFT + AS_IO: info->i = 0; break;
// Internal maps
case CPUINFO_PTR_INTERNAL_MEMORY_MAP + AS_PROGRAM: info->internal_map32 = ADDRESS_MAP_NAME(sh2_internal_map); break;
- case CPUINFO_PTR_INTERNAL_MEMORY_MAP + AS_DATA: info->internal_map32 = NULL; break;
- case CPUINFO_PTR_INTERNAL_MEMORY_MAP + AS_IO: info->internal_map32 = NULL; break;
-
- case CPUINFO_INT_INPUT_STATE + SH2_INT_VBLIN: info->i = sh2->irq_line_state[SH2_INT_VBLIN]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_VBLOUT: info->i = sh2->irq_line_state[SH2_INT_VBLOUT]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_HBLIN: info->i = sh2->irq_line_state[SH2_INT_HBLIN]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_TIMER0: info->i = sh2->irq_line_state[SH2_INT_TIMER0]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_TIMER1: info->i = sh2->irq_line_state[SH2_INT_TIMER1]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DSP: info->i = sh2->irq_line_state[SH2_INT_DSP]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_SOUND: info->i = sh2->irq_line_state[SH2_INT_SOUND]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_SMPC: info->i = sh2->irq_line_state[SH2_INT_SMPC]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_PAD: info->i = sh2->irq_line_state[SH2_INT_PAD]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA2: info->i = sh2->irq_line_state[SH2_INT_DMA2]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA1: info->i = sh2->irq_line_state[SH2_INT_DMA1]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA0: info->i = sh2->irq_line_state[SH2_INT_DMA0]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_DMAILL: info->i = sh2->irq_line_state[SH2_INT_DMAILL]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_SPRITE: info->i = sh2->irq_line_state[SH2_INT_SPRITE]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_14: info->i = sh2->irq_line_state[SH2_INT_14]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_15: info->i = sh2->irq_line_state[SH2_INT_15]; break;
- case CPUINFO_INT_INPUT_STATE + SH2_INT_ABUS: info->i = sh2->irq_line_state[SH2_INT_ABUS]; break;
- case CPUINFO_INT_INPUT_STATE + INPUT_LINE_NMI: info->i = sh2->nmi_line_state; break;
-
- case CPUINFO_INT_PREVIOUSPC: info->i = sh2->ppc; break;
+ case CPUINFO_PTR_INTERNAL_MEMORY_MAP + AS_DATA: info->internal_map32 = NULL; break;
+ case CPUINFO_PTR_INTERNAL_MEMORY_MAP + AS_IO: info->internal_map32 = NULL; break;
+
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_VBLIN: info->i = sh2->irq_line_state[SH2_INT_VBLIN]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_VBLOUT: info->i = sh2->irq_line_state[SH2_INT_VBLOUT]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_HBLIN: info->i = sh2->irq_line_state[SH2_INT_HBLIN]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_TIMER0: info->i = sh2->irq_line_state[SH2_INT_TIMER0]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_TIMER1: info->i = sh2->irq_line_state[SH2_INT_TIMER1]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DSP: info->i = sh2->irq_line_state[SH2_INT_DSP]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_SOUND: info->i = sh2->irq_line_state[SH2_INT_SOUND]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_SMPC: info->i = sh2->irq_line_state[SH2_INT_SMPC]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_PAD: info->i = sh2->irq_line_state[SH2_INT_PAD]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA2: info->i = sh2->irq_line_state[SH2_INT_DMA2]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA1: info->i = sh2->irq_line_state[SH2_INT_DMA1]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DMA0: info->i = sh2->irq_line_state[SH2_INT_DMA0]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_DMAILL: info->i = sh2->irq_line_state[SH2_INT_DMAILL]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_SPRITE: info->i = sh2->irq_line_state[SH2_INT_SPRITE]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_14: info->i = sh2->irq_line_state[SH2_INT_14]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_15: info->i = sh2->irq_line_state[SH2_INT_15]; break;
+ case CPUINFO_INT_INPUT_STATE + SH2_INT_ABUS: info->i = sh2->irq_line_state[SH2_INT_ABUS]; break;
+ case CPUINFO_INT_INPUT_STATE + INPUT_LINE_NMI: info->i = sh2->nmi_line_state; break;
+
+ case CPUINFO_INT_PREVIOUSPC: info->i = sh2->ppc; break;
case CPUINFO_INT_PC:
- case CPUINFO_INT_REGISTER + SH2_PC: info->i = (sh2->delay) ? (sh2->delay & AM) : (sh2->pc & AM); break;
- case CPUINFO_INT_SP: info->i = sh2->r[15]; break;
- case CPUINFO_INT_REGISTER + SH2_PR: info->i = sh2->pr; break;
- case CPUINFO_INT_REGISTER + SH2_SR: info->i = sh2->sr; break;
- case CPUINFO_INT_REGISTER + SH2_GBR: info->i = sh2->gbr; break;
- case CPUINFO_INT_REGISTER + SH2_VBR: info->i = sh2->vbr; break;
- case CPUINFO_INT_REGISTER + SH2_MACH: info->i = sh2->mach; break;
- case CPUINFO_INT_REGISTER + SH2_MACL: info->i = sh2->macl; break;
- case CPUINFO_INT_REGISTER + SH2_R0: info->i = sh2->r[ 0]; break;
- case CPUINFO_INT_REGISTER + SH2_R1: info->i = sh2->r[ 1]; break;
- case CPUINFO_INT_REGISTER + SH2_R2: info->i = sh2->r[ 2]; break;
- case CPUINFO_INT_REGISTER + SH2_R3: info->i = sh2->r[ 3]; break;
- case CPUINFO_INT_REGISTER + SH2_R4: info->i = sh2->r[ 4]; break;
- case CPUINFO_INT_REGISTER + SH2_R5: info->i = sh2->r[ 5]; break;
- case CPUINFO_INT_REGISTER + SH2_R6: info->i = sh2->r[ 6]; break;
- case CPUINFO_INT_REGISTER + SH2_R7: info->i = sh2->r[ 7]; break;
- case CPUINFO_INT_REGISTER + SH2_R8: info->i = sh2->r[ 8]; break;
- case CPUINFO_INT_REGISTER + SH2_R9: info->i = sh2->r[ 9]; break;
- case CPUINFO_INT_REGISTER + SH2_R10: info->i = sh2->r[10]; break;
- case CPUINFO_INT_REGISTER + SH2_R11: info->i = sh2->r[11]; break;
- case CPUINFO_INT_REGISTER + SH2_R12: info->i = sh2->r[12]; break;
- case CPUINFO_INT_REGISTER + SH2_R13: info->i = sh2->r[13]; break;
- case CPUINFO_INT_REGISTER + SH2_R14: info->i = sh2->r[14]; break;
- case CPUINFO_INT_REGISTER + SH2_R15: info->i = sh2->r[15]; break;
- case CPUINFO_INT_REGISTER + SH2_EA: info->i = sh2->ea; break;
+ case CPUINFO_INT_REGISTER + SH2_PC: info->i = (sh2->delay) ? (sh2->delay & AM) : (sh2->pc & AM); break;
+ case CPUINFO_INT_SP: info->i = sh2->r[15]; break;
+ case CPUINFO_INT_REGISTER + SH2_PR: info->i = sh2->pr; break;
+ case CPUINFO_INT_REGISTER + SH2_SR: info->i = sh2->sr; break;
+ case CPUINFO_INT_REGISTER + SH2_GBR: info->i = sh2->gbr; break;
+ case CPUINFO_INT_REGISTER + SH2_VBR: info->i = sh2->vbr; break;
+ case CPUINFO_INT_REGISTER + SH2_MACH: info->i = sh2->mach; break;
+ case CPUINFO_INT_REGISTER + SH2_MACL: info->i = sh2->macl; break;
+ case CPUINFO_INT_REGISTER + SH2_R0: info->i = sh2->r[ 0]; break;
+ case CPUINFO_INT_REGISTER + SH2_R1: info->i = sh2->r[ 1]; break;
+ case CPUINFO_INT_REGISTER + SH2_R2: info->i = sh2->r[ 2]; break;
+ case CPUINFO_INT_REGISTER + SH2_R3: info->i = sh2->r[ 3]; break;
+ case CPUINFO_INT_REGISTER + SH2_R4: info->i = sh2->r[ 4]; break;
+ case CPUINFO_INT_REGISTER + SH2_R5: info->i = sh2->r[ 5]; break;
+ case CPUINFO_INT_REGISTER + SH2_R6: info->i = sh2->r[ 6]; break;
+ case CPUINFO_INT_REGISTER + SH2_R7: info->i = sh2->r[ 7]; break;
+ case CPUINFO_INT_REGISTER + SH2_R8: info->i = sh2->r[ 8]; break;
+ case CPUINFO_INT_REGISTER + SH2_R9: info->i = sh2->r[ 9]; break;
+ case CPUINFO_INT_REGISTER + SH2_R10: info->i = sh2->r[10]; break;
+ case CPUINFO_INT_REGISTER + SH2_R11: info->i = sh2->r[11]; break;
+ case CPUINFO_INT_REGISTER + SH2_R12: info->i = sh2->r[12]; break;
+ case CPUINFO_INT_REGISTER + SH2_R13: info->i = sh2->r[13]; break;
+ case CPUINFO_INT_REGISTER + SH2_R14: info->i = sh2->r[14]; break;
+ case CPUINFO_INT_REGISTER + SH2_R15: info->i = sh2->r[15]; break;
+ case CPUINFO_INT_REGISTER + SH2_EA: info->i = sh2->ea; break;
/* --- the following bits of info are returned as pointers to data or functions --- */
- case CPUINFO_FCT_SET_INFO: info->setinfo = CPU_SET_INFO_NAME(sh2); break;
- case CPUINFO_FCT_INIT: info->init = CPU_INIT_NAME(sh2); break;
- case CPUINFO_FCT_RESET: info->reset = CPU_RESET_NAME(sh2); break;
- case CPUINFO_FCT_EXIT: info->exit = CPU_EXIT_NAME(sh2); break;
- case CPUINFO_FCT_EXECUTE: info->execute = CPU_EXECUTE_NAME(sh2); break;
- case CPUINFO_FCT_BURN: info->burn = NULL; break;
- case CPUINFO_FCT_DISASSEMBLE: info->disassemble = CPU_DISASSEMBLE_NAME(sh2); break;
- case CPUINFO_PTR_INSTRUCTION_COUNTER: info->icount = &sh2->icount; break;
+ case CPUINFO_FCT_SET_INFO: info->setinfo = CPU_SET_INFO_NAME(sh2); break;
+ case CPUINFO_FCT_INIT: info->init = CPU_INIT_NAME(sh2); break;
+ case CPUINFO_FCT_RESET: info->reset = CPU_RESET_NAME(sh2); break;
+ case CPUINFO_FCT_EXIT: info->exit = CPU_EXIT_NAME(sh2); break;
+ case CPUINFO_FCT_EXECUTE: info->execute = CPU_EXECUTE_NAME(sh2); break;
+ case CPUINFO_FCT_BURN: info->burn = NULL; break;
+ case CPUINFO_FCT_DISASSEMBLE: info->disassemble = CPU_DISASSEMBLE_NAME(sh2); break;
+ case CPUINFO_PTR_INSTRUCTION_COUNTER: info->icount = &sh2->icount; break;
/* --- the following bits of info are returned as NULL-terminated strings --- */
- case CPUINFO_STR_NAME: strcpy(info->s, "SH-2"); break;
- case CPUINFO_STR_FAMILY: strcpy(info->s, "Hitachi SuperH RISC"); break;
- case CPUINFO_STR_VERSION: strcpy(info->s, "2.0"); break;
- case CPUINFO_STR_SOURCE_FILE: strcpy(info->s, __FILE__); break;
- case CPUINFO_STR_CREDITS: strcpy(info->s, "Copyright Nicola Salmoria and the MAME team, all rights reserved."); break;
+ case CPUINFO_STR_NAME: strcpy(info->s, "SH-2"); break;
+ case CPUINFO_STR_FAMILY: strcpy(info->s, "Hitachi SuperH RISC"); break;
+ case CPUINFO_STR_VERSION: strcpy(info->s, "2.0"); break;
+ case CPUINFO_STR_SOURCE_FILE: strcpy(info->s, __FILE__); break;
+ case CPUINFO_STR_CREDITS: strcpy(info->s, "Copyright Nicola Salmoria and the MAME team, all rights reserved."); break;
case CPUINFO_STR_FLAGS:
sprintf(info->s, "%c%c%d%c%c",
@@ -3367,30 +3367,30 @@ CPU_GET_INFO( sh2 )
sh2->sr & T ? 'T':'.');
break;
- case CPUINFO_STR_REGISTER + SH2_PC: sprintf(info->s, "PC :%08X", sh2->pc); break;
- case CPUINFO_STR_REGISTER + SH2_SR: sprintf(info->s, "SR :%08X", sh2->sr); break;
- case CPUINFO_STR_REGISTER + SH2_PR: sprintf(info->s, "PR :%08X", sh2->pr); break;
- case CPUINFO_STR_REGISTER + SH2_GBR: sprintf(info->s, "GBR :%08X", sh2->gbr); break;
- case CPUINFO_STR_REGISTER + SH2_VBR: sprintf(info->s, "VBR :%08X", sh2->vbr); break;
- case CPUINFO_STR_REGISTER + SH2_MACH: sprintf(info->s, "MACH:%08X", sh2->mach); break;
- case CPUINFO_STR_REGISTER + SH2_MACL: sprintf(info->s, "MACL:%08X", sh2->macl); break;
- case CPUINFO_STR_REGISTER + SH2_R0: sprintf(info->s, "R0 :%08X", sh2->r[ 0]); break;
- case CPUINFO_STR_REGISTER + SH2_R1: sprintf(info->s, "R1 :%08X", sh2->r[ 1]); break;
- case CPUINFO_STR_REGISTER + SH2_R2: sprintf(info->s, "R2 :%08X", sh2->r[ 2]); break;
- case CPUINFO_STR_REGISTER + SH2_R3: sprintf(info->s, "R3 :%08X", sh2->r[ 3]); break;
- case CPUINFO_STR_REGISTER + SH2_R4: sprintf(info->s, "R4 :%08X", sh2->r[ 4]); break;
- case CPUINFO_STR_REGISTER + SH2_R5: sprintf(info->s, "R5 :%08X", sh2->r[ 5]); break;
- case CPUINFO_STR_REGISTER + SH2_R6: sprintf(info->s, "R6 :%08X", sh2->r[ 6]); break;
- case CPUINFO_STR_REGISTER + SH2_R7: sprintf(info->s, "R7 :%08X", sh2->r[ 7]); break;
- case CPUINFO_STR_REGISTER + SH2_R8: sprintf(info->s, "R8 :%08X", sh2->r[ 8]); break;
- case CPUINFO_STR_REGISTER + SH2_R9: sprintf(info->s, "R9 :%08X", sh2->r[ 9]); break;
- case CPUINFO_STR_REGISTER + SH2_R10: sprintf(info->s, "R10 :%08X", sh2->r[10]); break;
- case CPUINFO_STR_REGISTER + SH2_R11: sprintf(info->s, "R11 :%08X", sh2->r[11]); break;
- case CPUINFO_STR_REGISTER + SH2_R12: sprintf(info->s, "R12 :%08X", sh2->r[12]); break;
- case CPUINFO_STR_REGISTER + SH2_R13: sprintf(info->s, "R13 :%08X", sh2->r[13]); break;
- case CPUINFO_STR_REGISTER + SH2_R14: sprintf(info->s, "R14 :%08X", sh2->r[14]); break;
- case CPUINFO_STR_REGISTER + SH2_R15: sprintf(info->s, "R15 :%08X", sh2->r[15]); break;
- case CPUINFO_STR_REGISTER + SH2_EA: sprintf(info->s, "EA :%08X", sh2->ea); break;
+ case CPUINFO_STR_REGISTER + SH2_PC: sprintf(info->s, "PC :%08X", sh2->pc); break;
+ case CPUINFO_STR_REGISTER + SH2_SR: sprintf(info->s, "SR :%08X", sh2->sr); break;
+ case CPUINFO_STR_REGISTER + SH2_PR: sprintf(info->s, "PR :%08X", sh2->pr); break;
+ case CPUINFO_STR_REGISTER + SH2_GBR: sprintf(info->s, "GBR :%08X", sh2->gbr); break;
+ case CPUINFO_STR_REGISTER + SH2_VBR: sprintf(info->s, "VBR :%08X", sh2->vbr); break;
+ case CPUINFO_STR_REGISTER + SH2_MACH: sprintf(info->s, "MACH:%08X", sh2->mach); break;
+ case CPUINFO_STR_REGISTER + SH2_MACL: sprintf(info->s, "MACL:%08X", sh2->macl); break;
+ case CPUINFO_STR_REGISTER + SH2_R0: sprintf(info->s, "R0 :%08X", sh2->r[ 0]); break;
+ case CPUINFO_STR_REGISTER + SH2_R1: sprintf(info->s, "R1 :%08X", sh2->r[ 1]); break;
+ case CPUINFO_STR_REGISTER + SH2_R2: sprintf(info->s, "R2 :%08X", sh2->r[ 2]); break;
+ case CPUINFO_STR_REGISTER + SH2_R3: sprintf(info->s, "R3 :%08X", sh2->r[ 3]); break;
+ case CPUINFO_STR_REGISTER + SH2_R4: sprintf(info->s, "R4 :%08X", sh2->r[ 4]); break;
+ case CPUINFO_STR_REGISTER + SH2_R5: sprintf(info->s, "R5 :%08X", sh2->r[ 5]); break;
+ case CPUINFO_STR_REGISTER + SH2_R6: sprintf(info->s, "R6 :%08X", sh2->r[ 6]); break;
+ case CPUINFO_STR_REGISTER + SH2_R7: sprintf(info->s, "R7 :%08X", sh2->r[ 7]); break;
+ case CPUINFO_STR_REGISTER + SH2_R8: sprintf(info->s, "R8 :%08X", sh2->r[ 8]); break;
+ case CPUINFO_STR_REGISTER + SH2_R9: sprintf(info->s, "R9 :%08X", sh2->r[ 9]); break;
+ case CPUINFO_STR_REGISTER + SH2_R10: sprintf(info->s, "R10 :%08X", sh2->r[10]); break;
+ case CPUINFO_STR_REGISTER + SH2_R11: sprintf(info->s, "R11 :%08X", sh2->r[11]); break;
+ case CPUINFO_STR_REGISTER + SH2_R12: sprintf(info->s, "R12 :%08X", sh2->r[12]); break;
+ case CPUINFO_STR_REGISTER + SH2_R13: sprintf(info->s, "R13 :%08X", sh2->r[13]); break;
+ case CPUINFO_STR_REGISTER + SH2_R14: sprintf(info->s, "R14 :%08X", sh2->r[14]); break;
+ case CPUINFO_STR_REGISTER + SH2_R15: sprintf(info->s, "R15 :%08X", sh2->r[15]); break;
+ case CPUINFO_STR_REGISTER + SH2_EA: sprintf(info->s, "EA :%08X", sh2->ea); break;
}
}
@@ -3404,16 +3404,16 @@ CPU_GET_INFO( sh1 )
switch (state)
{
/* --- the following bits of info are returned as pointers to data or functions --- */
- case CPUINFO_FCT_RESET: info->reset = CPU_RESET_NAME(sh1); break;
+ case CPUINFO_FCT_RESET: info->reset = CPU_RESET_NAME(sh1); break;
/* --- the following bits of info are returned as NULL-terminated strings --- */
- case CPUINFO_STR_NAME: strcpy(info->s, "SH-1"); break;
+ case CPUINFO_STR_NAME: strcpy(info->s, "SH-1"); break;
- default: CPU_GET_INFO_CALL(sh2); break;
+ default: CPU_GET_INFO_CALL(sh2); break;
}
}
DEFINE_LEGACY_CPU_DEVICE(SH1, sh1);
DEFINE_LEGACY_CPU_DEVICE(SH2, sh2);
-#endif // USE_SH2DRC
+#endif // USE_SH2DRC