summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.hgignore10
-rw-r--r--68kinst.c1114
-rw-r--r--68kinst.h113
-rw-r--r--Makefile57
-rw-r--r--backend.c21
-rw-r--r--backend.h70
-rw-r--r--backend_x86.c219
-rw-r--r--blastem.c373
-rw-r--r--blastem.h6
-rwxr-xr-xcomparetests.py52
-rw-r--r--debug.c69
-rw-r--r--debug.h2
-rw-r--r--default.cfg7
-rw-r--r--dis.c203
-rw-r--r--gdb_remote.c2
-rw-r--r--gen.c15
-rw-r--r--gen.h33
-rw-r--r--gen_arm.c21
-rw-r--r--gen_arm.h8
-rw-r--r--gen_test_hv.s68631
-rw-r--r--gen_x86.c1102
-rw-r--r--gen_x86.h278
-rwxr-xr-xgentests.py8
-rw-r--r--gst.c10
-rw-r--r--io.c466
-rw-r--r--io.h51
-rw-r--r--m68k_core.c924
-rw-r--r--m68k_core.h (renamed from m68k_to_x86.h)17
-rw-r--r--m68k_core_x86.c2519
-rw-r--r--m68k_internal.h116
-rw-r--r--m68k_to_x86.c5028
-rw-r--r--render_sdl.c12
-rw-r--r--runtime.S68
-rw-r--r--runtime_32.S57
-rw-r--r--tern.c12
-rw-r--r--tern.h3
-rw-r--r--test_x86.c4
-rw-r--r--testcases.txt169
-rw-r--r--trans.c18
-rw-r--r--transz80.c4
-rw-r--r--vdp.c915
-rw-r--r--vdp.h9
-rw-r--r--vos_prog_info.c100
-rw-r--r--vos_program_module.c208
-rw-r--r--vos_program_module.h134
-rw-r--r--ym2612.c2
-rw-r--r--z80_to_x86.c2353
-rw-r--r--z80_to_x86.h56
-rw-r--r--z80inst.c38
-rwxr-xr-xzcompare.py58
-rw-r--r--ztestgen.c83
-rw-r--r--ztestrun.c107
52 files changed, 10117 insertions, 7838 deletions
diff --git a/.hgignore b/.hgignore
index 5fd2fe1..b27d1e8 100644
--- a/.hgignore
+++ b/.hgignore
@@ -6,14 +6,22 @@ syntax: glob
*.jpg
*.pdf
*.tar.gz
+*.list
*~
starscream/*
+gxz80/*
+musashi/*
vdpreverse/*
nemesis/*
html/*
+generated_tests/*
+ztests/*
*.o
+*.list
blastem
dis
stateview
trans
-
+zdis
+ztestrun
+address.log
diff --git a/68kinst.c b/68kinst.c
index b9e25aa..f53aabe 100644
--- a/68kinst.c
+++ b/68kinst.c
@@ -19,7 +19,7 @@ uint32_t sign_extend8(uint32_t val)
uint16_t *m68k_decode_op_ex(uint16_t *cur, uint8_t mode, uint8_t reg, uint8_t size, m68k_op_info *dst)
{
- uint16_t ext;
+ uint16_t ext, tmp;
dst->addr_mode = mode;
switch(mode)
{
@@ -36,15 +36,95 @@ uint16_t *m68k_decode_op_ex(uint16_t *cur, uint8_t mode, uint8_t reg, uint8_t si
dst->params.regs.displacement = sign_extend16(ext);
break;
case MODE_AREG_INDEX_MEM:
- #ifdef M68020
- //TODO: implement me for M68020+ support
- #else
+ dst->params.regs.pri = reg;
+ ext = *(++cur);
+ dst->params.regs.sec = ext >> 11;//includes areg/dreg bit, reg num and word/long bit
+#ifdef M68020
+ dst->params.regs.scale = ext >> 9 & 3;
+ if (ext & 0x100)
+ {
+ dst->params.regs.disp_sizes = ext >> 4 & 3;
+ switch (dst->params.regs.disp_sizes)
+ {
+ case 0:
+ //reserved
+ return NULL;
+ case 1:
+ dst->params.regs.displacement = 0;
+ break;
+ case 2:
+ dst->params.regs.displacement = sign_extend16(*(cur++));
+ break;
+ case 3:
+ tmp = *(cur++);
+ dst->params.regs.displacement = tmp << 16 | *(cur++);
+ break;
+ }
+ if (ext & 0x3)
+ {
+ //memory indirect
+ switch (ext & 0xC4)
+ {
+ case 0x00:
+ dst->addr_mode = MODE_AREG_PREINDEX;
+ break;
+ case 0x04:
+ dst->addr_mode = MODE_AREG_POSTINDEX;
+ break;
+ case 0x40:
+ dst->addr_mode = MODE_AREG_MEM_INDIRECT;
+ break;
+ case 0x80:
+ dst->addr_mode = MODE_PREINDEX;
+ break;
+ case 0x84:
+ dst->addr_mode = MODE_POSTINDEX;
+ break;
+ case 0xC0:
+ dst->addr_mode = MODE_MEM_INDIRECT;
+ break;
+ }
+ dst->params.regs.disp_sizes |= ext << 4 & 0x30;
+ switch (ext & 0x3)
+ {
+ case 0:
+ //reserved
+ return NULL;
+ case 1:
+ dst->params.regs.outer_disp = 0;
+ break;
+ case 2:
+ dst->params.regs.outer_disp = sign_extend16(*(cur++));
+ break;
+ case 3:
+ tmp = *(cur++);
+ dst->params.regs.outer_disp = tmp << 16 | *(cur++);
+ break;
+ }
+ } else {
+ switch (ext >> 6 & 3)
+ {
+ case 0:
+ dst->addr_mode = MODE_AREG_INDEX_BASE_DISP;
+ break;
+ case 1:
+ dst->addr_mode = MODE_AREG_BASE_DISP;
+ break;
+ case 2:
+ dst->addr_mode = MODE_INDEX_BASE_DISP;
+ break;
+ case 3:
+ dst->addr_mode = MODE_BASE_DISP;
+ break;
+ }
+ }
+ } else {
+#endif
dst->addr_mode = MODE_AREG_INDEX_DISP8;
- dst->params.regs.pri = reg;
- ext = *(++cur);
- dst->params.regs.sec = ext >> 11;//includes areg/dreg bit, reg num and word/long bit
dst->params.regs.displacement = sign_extend8(ext&0xFF);
- #endif
+#ifdef M68020
+ }
+#endif
break;
case MODE_PC_INDIRECT_ABS_IMMED:
switch(reg)
@@ -60,13 +140,93 @@ uint16_t *m68k_decode_op_ex(uint16_t *cur, uint8_t mode, uint8_t reg, uint8_t si
dst->params.immed = ext << 16 | *(++cur);
break;
case 3:
-#ifdef M68020
- //TODO: Implement me for M68020+ support;
-#else
- dst->addr_mode = MODE_PC_INDEX_DISP8;
ext = *(++cur);
dst->params.regs.sec = ext >> 11;//includes areg/dreg bit, reg num and word/long bit
- dst->params.regs.displacement = sign_extend8(ext&0xFF);
+#ifdef M68020
+ dst->params.regs.scale = ext >> 9 & 3;
+ if (ext & 0x100)
+ {
+ dst->params.regs.disp_sizes = ext >> 4 & 3;
+ switch (dst->params.regs.disp_sizes)
+ {
+ case 0:
+ //reserved
+ return NULL;
+ case 1:
+ dst->params.regs.displacement = 0;
+ break;
+ case 2:
+ dst->params.regs.displacement = sign_extend16(*(cur++));
+ break;
+ case 3:
+ tmp = *(cur++);
+ dst->params.regs.displacement = tmp << 16 | *(cur++);
+ break;
+ }
+ if (ext & 0x3)
+ {
+ //memory indirect
+ switch (ext & 0xC4)
+ {
+ case 0x00:
+ dst->addr_mode = MODE_PC_PREINDEX;
+ break;
+ case 0x04:
+ dst->addr_mode = MODE_PC_POSTINDEX;
+ break;
+ case 0x40:
+ dst->addr_mode = MODE_PC_MEM_INDIRECT;
+ break;
+ case 0x80:
+ dst->addr_mode = MODE_ZPC_PREINDEX;
+ break;
+ case 0x84:
+ dst->addr_mode = MODE_ZPC_POSTINDEX;
+ break;
+ case 0xC0:
+ dst->addr_mode = MODE_ZPC_MEM_INDIRECT;
+ break;
+ }
+ dst->params.regs.disp_sizes |= ext << 4 & 0x30;
+ switch (ext & 0x3)
+ {
+ case 0:
+ //reserved
+ return NULL;
+ case 1:
+ dst->params.regs.outer_disp = 0;
+ break;
+ case 2:
+ dst->params.regs.outer_disp = sign_extend16(*(cur++));
+ break;
+ case 3:
+ tmp = *(cur++);
+ dst->params.regs.outer_disp = tmp << 16 | *(cur++);
+ break;
+ }
+ } else {
+ switch (ext >> 6 & 3)
+ {
+ case 0:
+ dst->addr_mode = MODE_PC_INDEX_BASE_DISP;
+ break;
+ case 1:
+ dst->addr_mode = MODE_PC_BASE_DISP;
+ break;
+ case 2:
+ dst->addr_mode = MODE_ZPC_INDEX_BASE_DISP;
+ break;
+ case 3:
+ dst->addr_mode = MODE_ZPC_BASE_DISP;
+ break;
+ }
+ }
+ } else {
+#endif
+ dst->addr_mode = MODE_PC_INDEX_DISP8;
+ dst->params.regs.displacement = sign_extend8(ext&0xFF);
+#ifdef M68020
+ }
#endif
break;
case 2:
@@ -172,7 +332,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
if (decoded->dst.addr_mode == MODE_REG) {
decoded->extra.size = OPSIZE_LONG;
@@ -202,7 +362,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op_ex(istream, opmode, reg, decoded->extra.size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
if (decoded->dst.addr_mode == MODE_REG) {
decoded->extra.size = OPSIZE_LONG;
@@ -248,7 +408,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op_ex(istream, opmode, reg, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
break;
@@ -287,7 +447,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op_ex(istream, opmode, reg, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
break;
@@ -314,7 +474,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op_ex(istream, opmode, reg, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
break;
case 3:
@@ -340,7 +500,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op_ex(istream, opmode, reg, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
break;
case 4:
@@ -365,7 +525,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_BYTE, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
break;
case 5:
@@ -403,7 +563,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op_ex(istream, opmode, reg, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
break;
@@ -427,15 +587,29 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
decoded->src.params.immed = (immed << 16) | *(++istream);
break;
}
- istream = m68k_decode_op_ex(istream, opmode, reg, size, &(decoded->dst));
+ istream = m68k_decode_op_ex(istream, opmode, reg, decoded->extra.size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
break;
case 7:
-
-
+#ifdef M68010
+ decoded->op = M68K_MOVES;
+ decoded->extra.size = *istream >> 6 & 0x3;
+ immed = *(++istream);
+ reg = immed >> 12 & 0x7;
+ opmode = immed & 0x8000 ? MODE_AREG : MODE_REG;
+ if (immed & 0x800) {
+ decoded->src.addr_mode = opmode;
+ decoded->src.params.regs.pri = reg;
+ m68k_decode_op_ex(istream, *start >> 3 & 0x7, *start & 0x7, decoded->extra.size, &(decoded->dst));
+ } else {
+ m68k_decode_op_ex(istream, *start >> 3 & 0x7, *start & 0x7, decoded->extra.size, &(decoded->src));
+ decoded->dst.addr_mode = opmode;
+ decoded->dst.params.regs.pri = reg;
+ }
+#endif
break;
}
}
@@ -450,12 +624,12 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
istream = m68k_decode_op_ex(istream, opmode, reg, decoded->extra.size, &(decoded->dst));
if (!istream || decoded->dst.addr_mode == MODE_IMMEDIATE) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
break;
case MISC:
@@ -468,7 +642,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
if (*istream & 0x100) {
@@ -489,7 +663,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
opmode = (*istream >> 3) & 0x7;
@@ -504,7 +678,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op_ex(istream, opmode, reg, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
if (decoded->src.addr_mode == MODE_PC_DISPLACE || decoded->src.addr_mode == MODE_PC_INDEX_DISP8) {
//adjust displacement to account for extra instruction word
@@ -516,7 +690,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op_ex(istream, opmode, reg, decoded->extra.size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
} else {
@@ -536,7 +710,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream= m68k_decode_op(istream, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
break;
case 1:
@@ -546,7 +720,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
decoded->op = M68K_MOVE_FROM_CCR;
size = OPSIZE_WORD;
#else
- return istream+1;
+ break;
#endif
} else {
decoded->op = M68K_CLR;
@@ -555,7 +729,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream= m68k_decode_op(istream, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
break;
case 2:
@@ -566,14 +740,14 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream= m68k_decode_op(istream, size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
decoded->op = M68K_NEG;
istream= m68k_decode_op(istream, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
decoded->extra.size = size;
@@ -586,14 +760,14 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream= m68k_decode_op(istream, size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
decoded->op = M68K_NOT;
istream= m68k_decode_op(istream, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
decoded->extra.size = size;
@@ -648,7 +822,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_BYTE, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else if((*istream & 0x1C0) == 0x40) {
decoded->op = M68K_PEA;
@@ -656,7 +830,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_LONG, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
}
@@ -678,7 +852,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
}
@@ -702,7 +876,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_UNSIZED, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
//it would appear bit 6 needs to be set for it to be a valid instruction here
@@ -783,6 +957,33 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
case 7:
//MOVEC
#ifdef M68010
+ decoded->op = M68K_MOVEC;
+ immed = *(++istream);
+ reg = immed >> 12 & 0x7;
+ opmode = immed & 0x8000 ? MODE_AREG : MODE_REG;
+ immed &= 0xFFF;
+ if (immed & 0x800) {
+ if (immed > MAX_HIGH_CR) {
+ decoded->op = M68K_INVALID;
+ break;
+ } else {
+ immed = immed - 0x800 + CR_USP;
+ }
+ } else {
+ if (immed > MAX_LOW_CR) {
+ decoded->op = M68K_INVALID;
+ break;
+ }
+ }
+ if (*start & 1) {
+ decoded->src.addr_mode = opmode;
+ decoded->src.params.regs.pri = reg;
+ decoded->dst.params.immed = immed;
+ } else {
+ decoded->dst.addr_mode = opmode;
+ decoded->dst.params.regs.pri = reg;
+ decoded->src.params.immed = immed;
+ }
#endif
break;
}
@@ -816,7 +1017,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_BYTE, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
} else {
@@ -837,7 +1038,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
break;
@@ -865,7 +1066,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
case MOVEQ:
if (*istream & 0x100) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
decoded->op = M68K_MOVE;
decoded->variant = VAR_QUICK;
@@ -891,11 +1092,12 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_WORD, &(decoded->src));
if (!istream || decoded->src.addr_mode == MODE_AREG) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
break;
case 4:
decoded->op = M68K_SBCD;
+ decoded->extra.size = OPSIZE_BYTE;
decoded->dst.addr_mode = decoded->src.addr_mode = *istream & 0x8 ? MODE_AREG_PREDEC : MODE_REG;
decoded->src.params.regs.pri = *istream & 0x7;
decoded->dst.params.regs.pri = (*istream >> 9) & 0x7;
@@ -916,7 +1118,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_WORD, &(decoded->src));
if (!istream || decoded->src.addr_mode == MODE_AREG) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
break;
}
@@ -929,7 +1131,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
decoded->dst.addr_mode = MODE_REG;
@@ -937,7 +1139,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
}
@@ -956,7 +1158,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_LONG, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
decoded->extra.size = size;
@@ -965,7 +1167,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
} else {
@@ -993,7 +1195,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
break;
@@ -1011,14 +1213,14 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
reg = m68k_reg_quick_field(*istream);
istream = m68k_decode_op(istream, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
decoded->extra.size = size;
if (decoded->dst.addr_mode == MODE_AREG) {
@@ -1046,7 +1248,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
break;
@@ -1069,7 +1271,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_WORD, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else if(!(*istream & 0xF0)) {
decoded->op = M68K_ABCD;
@@ -1100,7 +1302,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
} else {
@@ -1112,7 +1314,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_WORD, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
decoded->op = M68K_AND;
@@ -1122,7 +1324,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
}
@@ -1141,7 +1343,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_LONG, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else {
decoded->extra.size = size;
@@ -1150,7 +1352,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, size, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
} else {
@@ -1178,7 +1380,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, decoded->extra.size, &(decoded->src));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
}
break;
@@ -1215,7 +1417,7 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
istream = m68k_decode_op(istream, OPSIZE_WORD, &(decoded->dst));
if (!istream) {
decoded->op = M68K_INVALID;
- return start+1;
+ break;
}
} else if((*istream & 0xC0) != 0xC0) {
switch(((*istream >> 2) & 0x6) | ((*istream >> 8) & 1))
@@ -1263,6 +1465,56 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
} else {
#ifdef M68020
//TODO: Implement bitfield instructions for M68020+ support
+ switch (*istream >> 8 & 7)
+ {
+ case 0:
+ decoded->op = M68K_BFTST; //<ea>
+ break;
+ case 1:
+ decoded->op = M68K_BFEXTU; //<ea>, Dn
+ break;
+ case 2:
+ decoded->op = M68K_BFCHG; //<ea>
+ break;
+ case 3:
+ decoded->op = M68K_BFEXTS; //<ea>, Dn
+ break;
+ case 4:
+ decoded->op = M68K_BFCLR; //<ea>
+ break;
+ case 5:
+ decoded->op = M68K_BFFFO; //<ea>, Dn
+ break;
+ case 6:
+ decoded->op = M68K_BFSET; //<ea>
+ break;
+ case 7:
+ decoded->op = M68K_BFINS; //Dn, <ea>
+ break;
+ }
+ opmode = *istream >> 3 & 0x7;
+ reg = *istream & 0x7;
+ m68k_op_info *ea, *other;
+ if (decoded->op == M68K_BFEXTU || decoded->op == M68K_BFEXTS || decoded->op == M68K_BFFFO)
+ {
+ ea = &(decoded->src);
+ other = &(decoded->dst);
+ } else {
+ ea = &(decoded->dst);
+ other = &(decoded->dst);
+ }
+ if (*istream & 0x100)
+ {
+ immed = *(istream++);
+ other->addr_mode = MODE_REG;
+ other->params.regs.pri = immed >> 12 & 0x7;
+ } else {
+ immed = *(istream++);
+ }
+ decoded->extra.size = OPSIZE_UNSIZED;
+ istream = m68k_decode_op_ex(istream, opmode, reg, decoded->extra.size, ea);
+ ea->addr_mode |= M68K_FLAG_BITFIELD;
+ ea->bitfield = immed & 0xFFF;
#endif
}
break;
@@ -1270,6 +1522,10 @@ uint16_t * m68k_decode(uint16_t * istream, m68kinst * decoded, uint32_t address)
//TODO: Implement me
break;
}
+ if (decoded->op == M68K_INVALID) {
+ decoded->src.params.immed = *start;
+ return start + 1;
+ }
return istream+1;
}
@@ -1416,7 +1672,43 @@ char * mnemonics[] = {
"trapv",
"tst",
"unlk",
- "invalid"
+ "invalid",
+#ifdef M68010
+ "bkpt",
+ "move", //from ccr
+ "movec",
+ "moves",
+ "rtd",
+#endif
+#ifdef M68020
+ "bfchg",
+ "bfclr",
+ "bfexts",
+ "bfextu",
+ "bfffo",
+ "bfins",
+ "bfset",
+ "bftst",
+ "callm",
+ "cas",
+ "cas2",
+ "chk2",
+ "cmp2",
+ "cpbcc",
+ "cpdbcc",
+ "cpgen",
+ "cprestore",
+ "cpsave",
+ "cpscc",
+ "cptrapcc",
+ "divsl",
+ "divul",
+ "extb",
+ "pack",
+ "rtm",
+ "trapcc",
+ "unpk"
+#endif
};
char * cond_mnem[] = {
@@ -1437,55 +1729,651 @@ char * cond_mnem[] = {
"gt",
"le"
};
+#ifdef M68010
+char * cr_mnem[] = {
+ "SFC",
+ "DFC",
+#ifdef M68020
+ "CACR",
+#endif
+ "USP",
+ "VBR",
+#ifdef M68020
+ "CAAR",
+ "MSP",
+ "ISP"
+#endif
+};
+#endif
-int m68k_disasm_op(m68k_op_info *decoded, char *dst, int need_comma, uint8_t labels, uint32_t address)
+int m68k_disasm_op(m68k_op_info *decoded, char *dst, int need_comma, uint8_t labels, uint32_t address, format_label_fun label_fun, void * data)
{
char * c = need_comma ? "," : "";
- switch(decoded->addr_mode)
+ int ret = 0;
+#ifdef M68020
+ uint8_t addr_mode = decoded->addr_mode & (~M68K_FLAG_BITFIELD);
+#else
+ uint8_t addr_mode = decoded->addr_mode;
+#endif
+ switch(addr_mode)
{
case MODE_REG:
- return sprintf(dst, "%s d%d", c, decoded->params.regs.pri);
+ ret = sprintf(dst, "%s d%d", c, decoded->params.regs.pri);
+ break;
case MODE_AREG:
- return sprintf(dst, "%s a%d", c, decoded->params.regs.pri);
+ ret = sprintf(dst, "%s a%d", c, decoded->params.regs.pri);
+ break;
case MODE_AREG_INDIRECT:
- return sprintf(dst, "%s (a%d)", c, decoded->params.regs.pri);
+ ret = sprintf(dst, "%s (a%d)", c, decoded->params.regs.pri);
+ break;
case MODE_AREG_POSTINC:
- return sprintf(dst, "%s (a%d)+", c, decoded->params.regs.pri);
+ ret = sprintf(dst, "%s (a%d)+", c, decoded->params.regs.pri);
+ break;
case MODE_AREG_PREDEC:
- return sprintf(dst, "%s -(a%d)", c, decoded->params.regs.pri);
+ ret = sprintf(dst, "%s -(a%d)", c, decoded->params.regs.pri);
+ break;
case MODE_AREG_DISPLACE:
- return sprintf(dst, "%s (%d, a%d)", c, decoded->params.regs.displacement, decoded->params.regs.pri);
+ ret = sprintf(dst, "%s (%d, a%d)", c, decoded->params.regs.displacement, decoded->params.regs.pri);
+ break;
case MODE_AREG_INDEX_DISP8:
- return sprintf(dst, "%s (%d, a%d, %c%d.%c)", c, decoded->params.regs.displacement, decoded->params.regs.pri, (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w');
+#ifdef M68020
+ if (decoded->params.regs.scale)
+ {
+ ret = sprintf(dst, "%s (%d, a%d, %c%d.%c*%d)", c, decoded->params.regs.displacement, decoded->params.regs.pri, (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ } else {
+#endif
+ ret = sprintf(dst, "%s (%d, a%d, %c%d.%c)", c, decoded->params.regs.displacement, decoded->params.regs.pri, (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w');
+#ifdef M68020
+ }
+#endif
+ break;
+#ifdef M68020
+ case MODE_AREG_INDEX_BASE_DISP:
+ if (decoded->params.regs.disp_sizes > 1)
+ {
+ ret = sprintf(dst, "%s (%d.%c, a%d, %c%d.%c*%d)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes == 2 ? 'w' : 'l', decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ } else {
+ ret = sprintf(dst, "%s (a%d, %c%d.%c*%d)", c, decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ }
+ break;
+ case MODE_AREG_PREINDEX:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([a%d, %c%d.%c*%d])", c, decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, a%d, %c%d.%c*%d])", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l', decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([a%d, %c%d.%c*%d], %d.%c)", c, decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, a%d, %c%d.%c*%d], %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l', decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_AREG_POSTINDEX:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([a%d], %c%d.%c*%d)", c, decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, a%d], %c%d.%c*%d)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l', decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([a%d], %c%d.%c*%d, %d.%c)", c, decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, a%d], %c%d.%c*%d, %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l', decoded->params.regs.pri,
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_AREG_MEM_INDIRECT:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([a%d])", c, decoded->params.regs.pri);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, a%d])", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l', decoded->params.regs.pri);
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([a%d], %d.%c)", c, decoded->params.regs.pri, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, a%d], %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l', decoded->params.regs.pri,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_AREG_BASE_DISP:
+ if (decoded->params.regs.disp_sizes > 1)
+ {
+ ret = sprintf(dst, "%s (%d.%c, a%d)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes == 2 ? 'w' : 'l', decoded->params.regs.pri);
+ } else {
+ //this is a lossy representation of the encoded instruction
+ //not sure if there's a better way to print it though
+ ret = sprintf(dst, "%s (a%d)", c, decoded->params.regs.pri);
+ }
+ break;
+ case MODE_INDEX_BASE_DISP:
+ if (decoded->params.regs.disp_sizes > 1)
+ {
+ ret = sprintf(dst, "%s (%d.%c, %c%d.%c*%d)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ } else {
+ ret = sprintf(dst, "%s (%c%d.%c*%d)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale);
+ }
+ break;
+ case MODE_PREINDEX:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([%c%d.%c*%d])", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, %c%d.%c*%d])", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([%c%d.%c*%d], %d.%c)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, %c%d.%c*%d], %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_POSTINDEX:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([], %c%d.%c*%d)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c], %c%d.%c*%d)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([], %c%d.%c*%d, %d.%c)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c], %c%d.%c*%d, %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_MEM_INDIRECT:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([])", c);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c])", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l');
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([], %d.%c)", c, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c], %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l', decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_BASE_DISP:
+ if (decoded->params.regs.disp_sizes > 1)
+ {
+ ret = sprintf(dst, "%s (%d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l');
+ } else {
+ ret = sprintf(dst, "%s ()", c);
+ }
+ break;
+#endif
case MODE_IMMEDIATE:
case MODE_IMMEDIATE_WORD:
- return sprintf(dst, (decoded->params.immed <= 128 ? "%s #%d" : "%s #$%X"), c, decoded->params.immed);
+ ret = sprintf(dst, (decoded->params.immed <= 128 ? "%s #%d" : "%s #$%X"), c, decoded->params.immed);
+ break;
case MODE_ABSOLUTE_SHORT:
if (labels) {
- return sprintf(dst, "%s ADR_%X.w", c, decoded->params.immed);
+ ret = sprintf(dst, "%s ", c);
+ ret += label_fun(dst+ret, decoded->params.immed, data);
+ strcat(dst+ret, ".w");
+ ret = ret + 2;
} else {
- return sprintf(dst, "%s $%X.w", c, decoded->params.immed);
+ ret = sprintf(dst, "%s $%X.w", c, decoded->params.immed);
}
+ break;
case MODE_ABSOLUTE:
if (labels) {
- return sprintf(dst, "%s ADR_%X.l", c, decoded->params.immed);
+ ret = sprintf(dst, "%s ", c);
+ ret += label_fun(dst+ret, decoded->params.immed, data);
+ strcat(dst+ret, ".l");
+ ret = ret + 2;
} else {
- return sprintf(dst, "%s $%X", c, decoded->params.immed);
+ ret = sprintf(dst, "%s $%X", c, decoded->params.immed);
}
+ break;
case MODE_PC_DISPLACE:
if (labels) {
- return sprintf(dst, "%s ADR_%X(pc)", c, address + 2 + decoded->params.regs.displacement);
+ ret = sprintf(dst, "%s ", c);
+ ret += label_fun(dst+ret, address + 2 + decoded->params.regs.displacement, data);
+ strcat(dst+ret, "(pc)");
+ ret = ret + 4;
} else {
- return sprintf(dst, "%s (%d, pc)", c, decoded->params.regs.displacement);
+ ret = sprintf(dst, "%s (%d, pc)", c, decoded->params.regs.displacement);
}
+ break;
case MODE_PC_INDEX_DISP8:
- return sprintf(dst, "%s (%d, pc, %c%d.%c)", c, decoded->params.regs.displacement, (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w');
+#ifdef M68020
+ if (decoded->params.regs.scale)
+ {
+ ret = sprintf(dst, "%s (%d, pc, %c%d.%c*%d)", c, decoded->params.regs.displacement, (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ } else {
+#endif
+ ret = sprintf(dst, "%s (%d, pc, %c%d.%c)", c, decoded->params.regs.displacement, (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w');
+#ifdef M68020
+ }
+#endif
+ break;
+#ifdef M68020
+ case MODE_PC_INDEX_BASE_DISP:
+ if (decoded->params.regs.disp_sizes > 1)
+ {
+ ret = sprintf(dst, "%s (%d.%c, pc, %c%d.%c*%d)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ } else {
+ ret = sprintf(dst, "%s (pc, %c%d.%c*%d)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale);
+ }
+ break;
+ case MODE_PC_PREINDEX:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([pc, %c%d.%c*%d])", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, pc, %c%d.%c*%d])", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([pc, %c%d.%c*%d], %d.%c)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, pc, %c%d.%c*%d], %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_PC_POSTINDEX:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([pc], %c%d.%c*%d)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, pc], %c%d.%c*%d)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([pc], %c%d.%c*%d, %d.%c)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, pc], %c%d.%c*%d, %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_PC_MEM_INDIRECT:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([pc])", c);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, pc])", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l');
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([pc], %d.%c)", c, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, pc], %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l', decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_PC_BASE_DISP:
+ if (decoded->params.regs.disp_sizes > 1)
+ {
+ ret = sprintf(dst, "%s (%d.%c, pc)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l');
+ } else {
+ ret = sprintf(dst, "%s (pc)", c);
+ }
+ break;
+ case MODE_ZPC_INDEX_BASE_DISP:
+ if (decoded->params.regs.disp_sizes > 1)
+ {
+ ret = sprintf(dst, "%s (%d.%c, zpc, %c%d.%c*%d)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ } else {
+ ret = sprintf(dst, "%s (zpc, %c%d.%c*%d)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale);
+ }
+ break;
+ case MODE_ZPC_PREINDEX:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([zpc, %c%d.%c*%d])", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, zpc, %c%d.%c*%d])", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([zpc, %c%d.%c*%d], %d.%c)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, zpc, %c%d.%c*%d], %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_ZPC_POSTINDEX:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([zpc], %c%d.%c*%d)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, zpc], %c%d.%c*%d)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale);
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([zpc], %c%d.%c*%d, %d.%c)", c, (decoded->params.regs.sec & 0x10) ? 'a': 'd',
+ (decoded->params.regs.sec >> 1) & 0x7, (decoded->params.regs.sec & 1) ? 'l': 'w',
+ 1 << decoded->params.regs.scale, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, zpc], %c%d.%c*%d, %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l',
+ (decoded->params.regs.sec & 0x10) ? 'a': 'd', (decoded->params.regs.sec >> 1) & 0x7,
+ (decoded->params.regs.sec & 1) ? 'l': 'w', 1 << decoded->params.regs.scale,
+ decoded->params.regs.outer_disp, decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_ZPC_MEM_INDIRECT:
+ switch (decoded->params.regs.disp_sizes)
+ {
+ case 0x11:
+ //no base displacement or outer displacement
+ ret = sprintf(dst, "%s ([zpc])", c);
+ break;
+ case 0x12:
+ case 0x13:
+ //base displacement only
+ ret = sprintf(dst, "%s ([%d.%c, zpc])", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l');
+ break;
+ case 0x21:
+ case 0x31:
+ //outer displacement only
+ ret = sprintf(dst, "%s ([zpc], %d.%c)", c, decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ case 0x22:
+ case 0x23:
+ case 0x32:
+ case 0x33:
+ //both outer and inner displacement
+ ret = sprintf(dst, "%s ([%d.%c, zpc], %d.%c)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l', decoded->params.regs.outer_disp,
+ decoded->params.regs.disp_sizes & 0x30 == 0x20 ? 'w' : 'l');
+ break;
+ }
+ break;
+ case MODE_ZPC_BASE_DISP:
+ if (decoded->params.regs.disp_sizes > 1)
+ {
+ ret = sprintf(dst, "%s (%d.%c, zpc)", c, decoded->params.regs.displacement,
+ decoded->params.regs.disp_sizes & 3 == 2 ? 'w' : 'l');
+ } else {
+ ret = sprintf(dst, "%s (zpc)", c);
+ }
+ break;
+#endif
default:
- return 0;
+ ret = 0;
}
+#ifdef M68020
+ if (decoded->addr_mode & M68K_FLAG_BITFIELD)
+ {
+ switch (decoded->bitfield & 0x820)
+ {
+ case 0:
+ return ret + sprintf(dst+ret, " {$%X:%d}", decoded->bitfield >> 6 & 0x1F, decoded->bitfield & 0x1F ? decoded->bitfield & 0x1F : 32);
+ case 0x20:
+ return ret + sprintf(dst+ret, " {$%X:d%d}", decoded->bitfield >> 6 & 0x1F, decoded->bitfield & 0x7);
+ case 0x800:
+ return ret + sprintf(dst+ret, " {d%d:%d}", decoded->bitfield >> 6 & 0x7, decoded->bitfield & 0x1F ? decoded->bitfield & 0x1F : 32);
+ case 0x820:
+ return ret + sprintf(dst+ret, " {d%d:d%d}", decoded->bitfield >> 6 & 0x7, decoded->bitfield & 0x7);
+ }
+ }
+#endif
+ return ret;
}
-int m68k_disasm_movem_op(m68k_op_info *decoded, m68k_op_info *other, char *dst, int need_comma, uint8_t labels, uint32_t address)
+int m68k_disasm_movem_op(m68k_op_info *decoded, m68k_op_info *other, char *dst, int need_comma, uint8_t labels, uint32_t address, format_label_fun label_fun, void * data)
{
int8_t dir, reg, bit, regnum, last=-1, lastreg, first=-1;
char *rtype, *last_rtype;
@@ -1539,11 +2427,16 @@ int m68k_disasm_movem_op(m68k_op_info *decoded, m68k_op_info *other, char *dst,
}
return oplen;
} else {
- return m68k_disasm_op(decoded, dst, need_comma, labels, address);
+ return m68k_disasm_op(decoded, dst, need_comma, labels, address, label_fun, data);
}
}
-int m68k_disasm_ex(m68kinst * decoded, char * dst, uint8_t labels)
+int m68k_default_label_fun(char * dst, uint32_t address, void * data)
+{
+ return sprintf(dst, "ADR_%X", address);
+}
+
+int m68k_disasm_ex(m68kinst * decoded, char * dst, uint8_t labels, format_label_fun label_fun, void * data)
{
int ret,op1len;
uint8_t size;
@@ -1561,9 +2454,11 @@ int m68k_disasm_ex(m68kinst * decoded, char * dst, uint8_t labels)
if (decoded->op != M68K_SCC) {
if (labels) {
if (decoded->op == M68K_DBCC) {
- ret += sprintf(dst+ret, " d%d, ADR_%X", decoded->dst.params.regs.pri, decoded->address + 2 + decoded->src.params.immed);
+ ret += sprintf(dst+ret, " d%d, ", decoded->dst.params.regs.pri);
+ ret += label_fun(dst+ret, decoded->address + 2 + decoded->src.params.immed, data);
} else {
- ret += sprintf(dst+ret, " ADR_%X", decoded->address + 2 + decoded->src.params.immed);
+ dst[ret++] = ' ';
+ ret += label_fun(dst+ret, decoded->address + 2 + decoded->src.params.immed, data);
}
} else {
if (decoded->op == M68K_DBCC) {
@@ -1577,8 +2472,8 @@ int m68k_disasm_ex(m68kinst * decoded, char * dst, uint8_t labels)
break;
case M68K_BSR:
if (labels) {
- ret = sprintf(dst, "bsr%s ADR_%X", decoded->variant == VAR_BYTE ? ".s" : "",
- decoded->address + 2 + decoded->src.params.immed);
+ ret = sprintf(dst, "bsr%s ", decoded->variant == VAR_BYTE ? ".s" : "");
+ ret += label_fun(dst+ret, decoded->address + 2 + decoded->src.params.immed, data);
} else {
ret = sprintf(dst, "bsr%s #%d <%X>", decoded->variant == VAR_BYTE ? ".s" : "", decoded->src.params.immed, decoded->address + 2 + decoded->src.params.immed);
}
@@ -1586,7 +2481,7 @@ int m68k_disasm_ex(m68kinst * decoded, char * dst, uint8_t labels)
case M68K_MOVE_FROM_SR:
ret = sprintf(dst, "%s", mnemonics[decoded->op]);
ret += sprintf(dst + ret, " SR");
- ret += m68k_disasm_op(&(decoded->dst), dst + ret, 1, labels, decoded->address);
+ ret += m68k_disasm_op(&(decoded->dst), dst + ret, 1, labels, decoded->address, label_fun, data);
return ret;
case M68K_ANDI_SR:
case M68K_EORI_SR:
@@ -1598,19 +2493,34 @@ int m68k_disasm_ex(m68kinst * decoded, char * dst, uint8_t labels)
case M68K_MOVE_CCR:
case M68K_ORI_CCR:
ret = sprintf(dst, "%s", mnemonics[decoded->op]);
- ret += m68k_disasm_op(&(decoded->src), dst + ret, 0, labels, decoded->address);
+ ret += m68k_disasm_op(&(decoded->src), dst + ret, 0, labels, decoded->address, label_fun, data);
ret += sprintf(dst + ret, ", %s", special_op);
return ret;
case M68K_MOVE_USP:
ret = sprintf(dst, "%s", mnemonics[decoded->op]);
if (decoded->src.addr_mode != MODE_UNUSED) {
- ret += m68k_disasm_op(&(decoded->src), dst + ret, 0, labels, decoded->address);
+ ret += m68k_disasm_op(&(decoded->src), dst + ret, 0, labels, decoded->address, label_fun, data);
ret += sprintf(dst + ret, ", USP");
} else {
ret += sprintf(dst + ret, "USP, ");
- ret += m68k_disasm_op(&(decoded->dst), dst + ret, 0, labels, decoded->address);
+ ret += m68k_disasm_op(&(decoded->dst), dst + ret, 0, labels, decoded->address, label_fun, data);
}
return ret;
+ case M68K_INVALID:
+ ret = sprintf(dst, "dc.w $%X", decoded->src.params.immed);
+ return ret;
+#ifdef M68010
+ case M68K_MOVEC:
+ ret = sprintf(dst, "%s ", mnemonics[decoded->op]);
+ if (decoded->src.addr_mode == MODE_UNUSED) {
+ ret += sprintf(dst + ret, "%s, ", cr_mnem[decoded->src.params.immed]);
+ ret += m68k_disasm_op(&(decoded->dst), dst + ret, 0, labels, decoded->address, label_fun, data);
+ } else {
+ ret += m68k_disasm_op(&(decoded->src), dst + ret, 0, labels, decoded->address, label_fun, data);
+ ret += sprintf(dst + ret, ", %s", cr_mnem[decoded->dst.params.immed]);
+ }
+ return ret;
+#endif
default:
size = decoded->extra.size;
ret = sprintf(dst, "%s%s%s",
@@ -1619,23 +2529,27 @@ int m68k_disasm_ex(m68kinst * decoded, char * dst, uint8_t labels)
size == OPSIZE_BYTE ? ".b" : (size == OPSIZE_WORD ? ".w" : (size == OPSIZE_LONG ? ".l" : "")));
}
if (decoded->op == M68K_MOVEM) {
- op1len = m68k_disasm_movem_op(&(decoded->src), &(decoded->dst), dst + ret, 0, labels, decoded->address);
+ op1len = m68k_disasm_movem_op(&(decoded->src), &(decoded->dst), dst + ret, 0, labels, decoded->address, label_fun, data);
ret += op1len;
- ret += m68k_disasm_movem_op(&(decoded->dst), &(decoded->src), dst + ret, op1len, labels, decoded->address);
+ ret += m68k_disasm_movem_op(&(decoded->dst), &(decoded->src), dst + ret, op1len, labels, decoded->address, label_fun, data);
} else {
- op1len = m68k_disasm_op(&(decoded->src), dst + ret, 0, labels, decoded->address);
+ op1len = m68k_disasm_op(&(decoded->src), dst + ret, 0, labels, decoded->address, label_fun, data);
ret += op1len;
- ret += m68k_disasm_op(&(decoded->dst), dst + ret, op1len, labels, decoded->address);
+ ret += m68k_disasm_op(&(decoded->dst), dst + ret, op1len, labels, decoded->address, label_fun, data);
}
return ret;
}
int m68k_disasm(m68kinst * decoded, char * dst)
{
- return m68k_disasm_ex(decoded, dst, 0);
+ return m68k_disasm_ex(decoded, dst, 0, NULL, NULL);
}
-int m68k_disasm_labels(m68kinst * decoded, char * dst)
+int m68k_disasm_labels(m68kinst * decoded, char * dst, format_label_fun label_fun, void * data)
{
- return m68k_disasm_ex(decoded, dst, 1);
+ if (!label_fun)
+ {
+ label_fun = m68k_default_label_fun;
+ }
+ return m68k_disasm_ex(decoded, dst, 1, label_fun, data);
}
diff --git a/68kinst.h b/68kinst.h
index 768793f..0d5072f 100644
--- a/68kinst.h
+++ b/68kinst.h
@@ -8,6 +8,13 @@
#include <stdint.h>
+#ifdef M68030
+#define M68020
+#endif
+#ifdef M68020
+#define M68010
+#endif
+
typedef enum {
BIT_MOVEP_IMMED = 0,
MOVE_BYTE,
@@ -97,7 +104,43 @@ typedef enum {
M68K_TRAPV,
M68K_TST,
M68K_UNLK,
- M68K_INVALID
+ M68K_INVALID,
+#ifdef M68010
+ M68K_BKPT,
+ M68K_MOVE_FROM_CCR,
+ M68K_MOVEC,
+ M68K_MOVES,
+ M68K_RTD,
+#endif
+#ifdef M68020
+ M68K_BFCHG,
+ M68K_BFCLR,
+ M68K_BFEXTS,
+ M68K_BFEXTU,
+ M68K_BFFFO,
+ M68K_BFINS,
+ M68K_BFSET,
+ M68K_BFTST,
+ M68K_CALLM,
+ M68K_CAS,
+ M68K_CAS2,
+ M68K_CHK2,
+ M68K_CMP2,
+ M68K_CP_BCC,
+ M68K_CP_DBCC,
+ M68K_CP_GEN,
+ M68K_CP_RESTORE,
+ M68K_CP_SAVE,
+ M68K_CP_SCC,
+ M68K_CP_TRAPCC,
+ M68K_DIVSL,
+ M68K_DIVUL,
+ M68K_EXTB,
+ M68K_PACK,
+ M68K_RTM,
+ M68K_TRAPCC,
+ M68K_UNPK,
+#endif
} m68K_op;
typedef enum {
@@ -130,19 +173,40 @@ typedef enum {
//expanded values
MODE_AREG_INDEX_DISP8,
#ifdef M68020
- MODE_AREG_INDEX_DISP32,
+ MODE_AREG_INDEX_BASE_DISP,
+ MODE_AREG_PREINDEX,
+ MODE_AREG_POSTINDEX,
+ MODE_AREG_MEM_INDIRECT,
+ MODE_AREG_BASE_DISP,
+ MODE_INDEX_BASE_DISP,
+ MODE_PREINDEX,
+ MODE_POSTINDEX,
+ MODE_MEM_INDIRECT,
+ MODE_BASE_DISP,
#endif
MODE_ABSOLUTE_SHORT,
MODE_ABSOLUTE,
MODE_PC_DISPLACE,
MODE_PC_INDEX_DISP8,
#ifdef M68020
- MODE_PC_INDEX_DISP32,
+ MODE_PC_INDEX_BASE_DISP,
+ MODE_PC_PREINDEX,
+ MODE_PC_POSTINDEX,
+ MODE_PC_MEM_INDIRECT,
+ MODE_PC_BASE_DISP,
+ MODE_ZPC_INDEX_BASE_DISP,
+ MODE_ZPC_PREINDEX,
+ MODE_ZPC_POSTINDEX,
+ MODE_ZPC_MEM_INDIRECT,
+ MODE_ZPC_BASE_DISP,
#endif
MODE_IMMEDIATE,
MODE_IMMEDIATE_WORD,//used to indicate an immediate operand that only uses a single extension word even for a long operation
MODE_UNUSED
} m68k_addr_modes;
+#ifdef M68020
+#define M68K_FLAG_BITFIELD 0x80
+#endif
typedef enum {
COND_TRUE,
@@ -163,13 +227,49 @@ typedef enum {
COND_LESS_EQ
} m68K_condition;
+#ifdef M68010
+typedef enum {
+ CR_SFC,
+ CR_DFC,
+#ifdef M68020
+ CR_CACR,
+#endif
+ CR_USP,
+ CR_VBR,
+#ifdef M68020
+ CR_CAAR,
+ CR_MSP,
+ CR_ISP
+#endif
+} m68k_control_reg;
+
+#ifdef M68020
+#define MAX_HIGH_CR 0x804
+#define MAX_LOW_CR 0x002
+#else
+#define MAX_HIGH_CR 0x801
+#define MAX_LOW_CR 0x001
+#endif
+
+#endif
+
typedef struct {
- uint8_t addr_mode;
+#ifdef M68020
+ uint16_t bitfield;
+#endif
+ uint8_t addr_mode;
union {
struct {
uint8_t pri;
uint8_t sec;
+#ifdef M68020
+ uint8_t scale;
+ uint8_t disp_sizes;
+#endif
int32_t displacement;
+#ifdef M68020
+ int32_t outer_disp;
+#endif
} regs;
uint32_t immed;
} params;
@@ -229,12 +329,15 @@ typedef enum {
VECTOR_TRAP_15
} m68k_vector;
+typedef int (*format_label_fun)(char * dst, uint32_t address, void * data);
+
uint16_t * m68k_decode(uint16_t * istream, m68kinst * dst, uint32_t address);
uint32_t m68k_branch_target(m68kinst * inst, uint32_t *dregs, uint32_t *aregs);
uint8_t m68k_is_branch(m68kinst * inst);
uint8_t m68k_is_noncall_branch(m68kinst * inst);
int m68k_disasm(m68kinst * decoded, char * dst);
-int m68k_disasm_labels(m68kinst * decoded, char * dst);
+int m68k_disasm_labels(m68kinst * decoded, char * dst, format_label_fun label_fun, void * data);
+int m68k_default_label_fun(char * dst, uint32_t address, void * data);
#endif
diff --git a/Makefile b/Makefile
index 0eb57cd..784ac8f 100644
--- a/Makefile
+++ b/Makefile
@@ -12,11 +12,12 @@ LIBS=sdl glew gl
endif
endif
-LDFLAGS:=-lm $(shell pkg-config --libs $(LIBS))
ifdef DEBUG
-CFLAGS:=-ggdb -std=gnu99 $(shell pkg-config --cflags-only-I $(LIBS)) -Wreturn-type -Werror=return-type
+CFLAGS:=-ggdb -std=gnu99 $(shell pkg-config --cflags-only-I $(LIBS)) -Wreturn-type -Werror=return-type -Werror=implicit-function-declaration
+LDFLAGS:=-ggdb -lm $(shell pkg-config --libs $(LIBS))
else
-CFLAGS:=-O2 -std=gnu99 $(shell pkg-config --cflags-only-I $(LIBS)) -Wreturn-type -Werror=return-type
+CFLAGS:=-O2 -flto -std=gnu99 $(shell pkg-config --cflags-only-I $(LIBS)) -Wreturn-type -Werror=return-type -Werror=implicit-function-declaration
+LDFLAGS:=-O2 -flto -lm $(shell pkg-config --libs $(LIBS))
endif
ifdef PROFILE
@@ -27,6 +28,16 @@ ifdef NOGL
CFLAGS+= -DDISABLE_OPENGL
endif
+ifdef M68030
+CFLAGS+= -DM68030
+endif
+ifdef M68020
+CFLAGS+= -DM68020
+endif
+ifdef M68010
+CFLAGS+= -DM68010
+endif
+
ifndef CPU
CPU:=$(shell uname -m)
endif
@@ -35,17 +46,19 @@ ifeq ($(OS),Darwin)
LDFLAGS+= -framework OpenGL
endif
-TRANSOBJS=gen_x86.o backend.o mem.o
-M68KOBJS=68kinst.o m68k_to_x86.o
+TRANSOBJS=gen.o backend.o mem.o
+M68KOBJS=68kinst.o m68k_core.o
ifeq ($(CPU),x86_64)
-M68KOBJS+= runtime.o
+M68KOBJS+= runtime.o m68k_core_x86.o
+TRANSOBJS+= gen_x86.o backend_x86.o
else
ifeq ($(CPU),i686)
-M68KOBJS+= runtime_32.o
+M68KOBJS+= runtime_32.o m68k_core_x86.o
+TRANSOBJS+= gen_x86.o backend_x86.o
endif
endif
-Z80OBJS=z80inst.o z80_to_x86.o zruntime.o
+Z80OBJS=z80inst.o z80_to_x86.o
AUDIOOBJS=ym2612.o psg.o wave.o
CONFIGOBJS=config.o tern.o util.o
@@ -53,21 +66,26 @@ MAINOBJS=blastem.o debug.o gdb_remote.o vdp.o render_sdl.o io.o $(CONFIGOBJS) gs
ifeq ($(CPU),x86_64)
CFLAGS+=-DX86_64
-MAINOBJS+= $(Z80OBJS)
else
ifeq ($(CPU),i686)
CFLAGS+=-DX86_32
endif
endif
+ifdef NOZ80
+CFLAGS+=-DNO_Z80
+else
+MAINOBJS+= $(Z80OBJS)
+endif
+
all : dis zdis stateview vgmplay blastem
blastem : $(MAINOBJS)
- $(CC) -ggdb -o blastem $(MAINOBJS) $(LDFLAGS)
+ $(CC) -o blastem $(MAINOBJS) $(LDFLAGS)
-dis : dis.o 68kinst.o
- $(CC) -o dis dis.o 68kinst.o
+dis : dis.o 68kinst.o tern.o vos_program_module.o
+ $(CC) -o dis dis.o 68kinst.o tern.o vos_program_module.o
zdis : zdis.o z80inst.o
$(CC) -o zdis zdis.o z80inst.o
@@ -96,18 +114,21 @@ vgmplay : vgmplay.o render_sdl.o $(CONFIGOBJS) $(AUDIOOBJS)
testgst : testgst.o gst.o
$(CC) -o testgst testgst.o gst.o
-test_x86 : test_x86.o gen_x86.o
- $(CC) -o test_x86 test_x86.o gen_x86.o
+test_x86 : test_x86.o gen_x86.o gen.o
+ $(CC) -o test_x86 test_x86.o gen_x86.o gen.o
-test_arm : test_arm.o gen_arm.o mem.o
- $(CC) -o test_arm test_arm.o gen_arm.o mem.o
+test_arm : test_arm.o gen_arm.o mem.o gen.o
+ $(CC) -o test_arm test_arm.o gen_arm.o mem.o gen.o
gen_fib : gen_fib.o gen_x86.o mem.o
$(CC) -o gen_fib gen_fib.o gen_x86.o mem.o
-offsets : offsets.c z80_to_x86.h m68k_to_x86.h
+offsets : offsets.c z80_to_x86.h m68k_core.h
$(CC) -o offsets offsets.c
+vos_prog_info : vos_prog_info.o vos_program_module.o
+ $(CC) -o vos_prog_info vos_prog_info.o vos_program_module.o
+
%.o : %.S
$(CC) -c -o $@ $<
@@ -115,7 +136,7 @@ offsets : offsets.c z80_to_x86.h m68k_to_x86.h
$(CC) $(CFLAGS) -c -o $@ $<
%.bin : %.s68
- vasmm68k_mot -Fbin -m68000 -no-opt -spaces -o $@ $<
+ vasmm68k_mot -Fbin -m68000 -no-opt -spaces -o $@ -L $@.list $<
%.bin : %.sz8
vasmz80_mot -Fbin -spaces -o $@ $<
diff --git a/backend.c b/backend.c
index eaae9d7..8f65f25 100644
--- a/backend.c
+++ b/backend.c
@@ -51,3 +51,24 @@ void process_deferred(deferred_addr ** head_ptr, void * context, native_addr_fun
}
}
+void * get_native_pointer(uint32_t address, void ** mem_pointers, cpu_options * opts)
+{
+ memmap_chunk const * memmap = opts->memmap;
+ address &= opts->address_mask;
+ for (uint32_t chunk = 0; chunk < opts->memmap_chunks; chunk++)
+ {
+ if (address >= memmap[chunk].start && address < memmap[chunk].end) {
+ if (!(memmap[chunk].flags & MMAP_READ)) {
+ return NULL;
+ }
+ uint8_t * base = memmap[chunk].flags & MMAP_PTR_IDX
+ ? mem_pointers[memmap[chunk].ptr_index]
+ : memmap[chunk].buffer;
+ if (!base) {
+ return NULL;
+ }
+ return base + (address & memmap[chunk].mask);
+ }
+ }
+ return NULL;
+}
diff --git a/backend.h b/backend.h
index d35a1ec..c3ae875 100644
--- a/backend.h
+++ b/backend.h
@@ -8,22 +8,25 @@
#include <stdint.h>
#include <stdio.h>
+#include "gen.h"
#define INVALID_OFFSET 0xFFFFFFFF
#define EXTENSION_WORD 0xFFFFFFFE
+#define CYCLE_NEVER 0xFFFFFFFF
+#if defined(X86_32) || defined(X86_64)
typedef struct {
int32_t disp;
uint8_t mode;
uint8_t base;
uint8_t index;
- uint8_t cycles;
-} x86_ea;
-
-#if defined(X86_64) || defined(X86_32)
-typedef uint8_t* code_ptr;
+} host_ea;
#else
-typedef uint32_t* code_ptr;
+typedef struct {
+ int32_t disp;
+ uint8_t mode;
+ uint8_t base;
+} host_ea;
#endif
typedef struct {
@@ -44,23 +47,6 @@ typedef enum {
WRITE_8
} ftype;
-typedef struct {
- uint32_t flags;
- native_map_slot *native_code_map;
- deferred_addr *deferred;
- code_ptr cur_code;
- code_ptr code_end;
- uint8_t **ram_inst_sizes;
- code_ptr save_context;
- code_ptr load_context;
- code_ptr handle_cycle_limit;
- code_ptr handle_cycle_limit_int;
- uint8_t context_reg;
- uint8_t scratch1;
- uint8_t scratch2;
-} cpu_options;
-
-
#define MMAP_READ 0x01
#define MMAP_WRITE 0x02
#define MMAP_CODE 0x04
@@ -68,6 +54,7 @@ typedef struct {
#define MMAP_ONLY_ODD 0x10
#define MMAP_ONLY_EVEN 0x20
#define MMAP_FUNC_NULL 0x40
+#define MMAP_BYTESWAP 0x80
typedef uint16_t (*read_16_fun)(uint32_t address, void * context);
typedef uint8_t (*read_8_fun)(uint32_t address, void * context);
@@ -87,11 +74,48 @@ typedef struct {
write_8_fun write_8;
} memmap_chunk;
+typedef struct {
+ uint32_t flags;
+ native_map_slot *native_code_map;
+ deferred_addr *deferred;
+ code_info code;
+ uint8_t **ram_inst_sizes;
+ memmap_chunk const *memmap;
+ code_ptr save_context;
+ code_ptr load_context;
+ code_ptr handle_cycle_limit;
+ code_ptr handle_cycle_limit_int;
+ code_ptr handle_code_write;
+ uint32_t memmap_chunks;
+ uint32_t address_mask;
+ uint32_t max_address;
+ uint32_t bus_cycles;
+ uint32_t clock_divider;
+ int32_t mem_ptr_off;
+ int32_t ram_flags_off;
+ uint8_t ram_flags_shift;
+ uint8_t address_size;
+ uint8_t byte_swap;
+ uint8_t context_reg;
+ uint8_t cycles;
+ uint8_t limit;
+ uint8_t scratch1;
+ uint8_t scratch2;
+} cpu_options;
+
typedef uint8_t * (*native_addr_func)(void * context, uint32_t address);
deferred_addr * defer_address(deferred_addr * old_head, uint32_t address, uint8_t *dest);
void remove_deferred_until(deferred_addr **head_ptr, deferred_addr * remove_to);
void process_deferred(deferred_addr ** head_ptr, void * context, native_addr_func get_native);
+void cycles(cpu_options *opts, uint32_t num);
+void check_cycles_int(cpu_options *opts, uint32_t address);
+void check_cycles(cpu_options * opts);
+void check_code_prologue(code_info *code);
+
+code_ptr gen_mem_fun(cpu_options * opts, memmap_chunk const * memmap, uint32_t num_chunks, ftype fun_type, code_ptr *after_inc);
+void * get_native_pointer(uint32_t address, void ** mem_pointers, cpu_options * opts);
+
#endif //BACKEND_H_
diff --git a/backend_x86.c b/backend_x86.c
new file mode 100644
index 0000000..7c289de
--- /dev/null
+++ b/backend_x86.c
@@ -0,0 +1,219 @@
+#include "backend.h"
+#include "gen_x86.h"
+
+void cycles(cpu_options *opts, uint32_t num)
+{
+ add_ir(&opts->code, num*opts->clock_divider, opts->cycles, SZ_D);
+}
+
+void check_cycles_int(cpu_options *opts, uint32_t address)
+{
+ code_info *code = &opts->code;
+ cmp_rr(code, opts->cycles, opts->limit, SZ_D);
+ code_ptr jmp_off = code->cur+1;
+ jcc(code, CC_NC, jmp_off+1);
+ mov_ir(code, address, opts->scratch1, SZ_D);
+ call(code, opts->handle_cycle_limit_int);
+ *jmp_off = code->cur - (jmp_off+1);
+}
+
+void check_cycles(cpu_options * opts)
+{
+ code_info *code = &opts->code;
+ cmp_rr(code, opts->cycles, opts->limit, SZ_D);
+ check_alloc_code(code, MAX_INST_LEN*2);
+ code_ptr jmp_off = code->cur+1;
+ jcc(code, CC_NC, jmp_off+1);
+ call(code, opts->handle_cycle_limit);
+ *jmp_off = code->cur - (jmp_off+1);
+}
+
+void check_code_prologue(code_info *code)
+{
+ check_alloc_code(code, MAX_INST_LEN*4);
+}
+
+code_ptr gen_mem_fun(cpu_options * opts, memmap_chunk const * memmap, uint32_t num_chunks, ftype fun_type, code_ptr *after_inc)
+{
+ code_info *code = &opts->code;
+ code_ptr start = code->cur;
+ check_cycles(opts);
+ cycles(opts, opts->bus_cycles);
+ if (after_inc) {
+ *after_inc = code->cur;
+ }
+ if (opts->address_size == SZ_D && opts->address_mask < 0xFFFFFFFF) {
+ and_ir(code, opts->address_mask, opts->scratch1, SZ_D);
+ }
+ code_ptr lb_jcc = NULL, ub_jcc = NULL;
+ uint8_t is_write = fun_type == WRITE_16 || fun_type == WRITE_8;
+ uint8_t adr_reg = is_write ? opts->scratch2 : opts->scratch1;
+ uint16_t access_flag = is_write ? MMAP_WRITE : MMAP_READ;
+ uint8_t size = (fun_type == READ_16 || fun_type == WRITE_16) ? SZ_W : SZ_B;
+ for (uint32_t chunk = 0; chunk < num_chunks; chunk++)
+ {
+ if (memmap[chunk].start > 0) {
+ cmp_ir(code, memmap[chunk].start, adr_reg, opts->address_size);
+ lb_jcc = code->cur + 1;
+ jcc(code, CC_C, code->cur + 2);
+ }
+ if (memmap[chunk].end < opts->max_address) {
+ cmp_ir(code, memmap[chunk].end, adr_reg, opts->address_size);
+ ub_jcc = code->cur + 1;
+ jcc(code, CC_NC, code->cur + 2);
+ }
+
+ if (memmap[chunk].mask != opts->address_mask) {
+ and_ir(code, memmap[chunk].mask, adr_reg, opts->address_size);
+ }
+ void * cfun;
+ switch (fun_type)
+ {
+ case READ_16:
+ cfun = memmap[chunk].read_16;
+ break;
+ case READ_8:
+ cfun = memmap[chunk].read_8;
+ break;
+ case WRITE_16:
+ cfun = memmap[chunk].write_16;
+ break;
+ case WRITE_8:
+ cfun = memmap[chunk].write_8;
+ break;
+ default:
+ cfun = NULL;
+ }
+ if(memmap[chunk].flags & access_flag) {
+ if (memmap[chunk].flags & MMAP_PTR_IDX) {
+ if (memmap[chunk].flags & MMAP_FUNC_NULL) {
+ cmp_irdisp(code, 0, opts->context_reg, opts->mem_ptr_off + sizeof(void*) * memmap[chunk].ptr_index, SZ_PTR);
+ code_ptr not_null = code->cur + 1;
+ jcc(code, CC_NZ, code->cur + 2);
+ call(code, opts->save_context);
+ if (is_write) {
+ call_args_abi(code, cfun, 3, opts->scratch2, opts->context_reg, opts->scratch1);
+ mov_rr(code, RAX, opts->context_reg, SZ_PTR);
+ } else {
+ push_r(code, opts->context_reg);
+ call_args_abi(code, cfun, 2, opts->scratch1, opts->context_reg);
+ pop_r(code, opts->context_reg);
+ mov_rr(code, RAX, opts->scratch1, size);
+ }
+ jmp(code, opts->load_context);
+
+ *not_null = code->cur - (not_null + 1);
+ }
+ if ((opts->byte_swap || memmap[chunk].flags & MMAP_BYTESWAP) && size == SZ_B) {
+ xor_ir(code, 1, adr_reg, opts->address_size);
+ }
+ if (opts->address_size != SZ_D) {
+ movzx_rr(code, adr_reg, adr_reg, opts->address_size, SZ_D);
+ }
+ add_rdispr(code, opts->context_reg, opts->mem_ptr_off + sizeof(void*) * memmap[chunk].ptr_index, adr_reg, SZ_PTR);
+ if (is_write) {
+ mov_rrind(code, opts->scratch1, opts->scratch2, size);
+
+ } else {
+ mov_rindr(code, opts->scratch1, opts->scratch1, size);
+ }
+ } else {
+ uint8_t tmp_size = size;
+ if (size == SZ_B) {
+ if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) {
+ bt_ir(code, 0, adr_reg, opts->address_size);
+ code_ptr good_addr = code->cur + 1;
+ jcc(code, (memmap[chunk].flags & MMAP_ONLY_ODD) ? CC_C : CC_NC, code->cur + 2);
+ if (!is_write) {
+ mov_ir(code, 0xFF, opts->scratch1, SZ_B);
+ }
+ retn(code);
+ *good_addr = code->cur - (good_addr + 1);
+ shr_ir(code, 1, adr_reg, opts->address_size);
+ } else if (opts->byte_swap || memmap[chunk].flags & MMAP_BYTESWAP) {
+ xor_ir(code, 1, adr_reg, opts->address_size);
+ }
+ } else if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) {
+ tmp_size = SZ_B;
+ shr_ir(code, 1, adr_reg, opts->address_size);
+ if ((memmap[chunk].flags & MMAP_ONLY_EVEN) && is_write) {
+ shr_ir(code, 8, opts->scratch1, SZ_W);
+ }
+ }
+ if (opts->address_size != SZ_D) {
+ movzx_rr(code, adr_reg, adr_reg, opts->address_size, SZ_D);
+ }
+ if ((intptr_t)memmap[chunk].buffer <= 0x7FFFFFFF && (intptr_t)memmap[chunk].buffer >= -2147483648) {
+ if (is_write) {
+ mov_rrdisp(code, opts->scratch1, opts->scratch2, (intptr_t)memmap[chunk].buffer, tmp_size);
+ } else {
+ mov_rdispr(code, opts->scratch1, (intptr_t)memmap[chunk].buffer, opts->scratch1, tmp_size);
+ }
+ } else {
+ if (is_write) {
+ push_r(code, opts->scratch1);
+ mov_ir(code, (intptr_t)memmap[chunk].buffer, opts->scratch1, SZ_PTR);
+ add_rr(code, opts->scratch1, opts->scratch2, SZ_PTR);
+ pop_r(code, opts->scratch1);
+ mov_rrind(code, opts->scratch1, opts->scratch2, tmp_size);
+ } else {
+ mov_ir(code, (intptr_t)memmap[chunk].buffer, opts->scratch2, SZ_PTR);
+ mov_rindexr(code, opts->scratch2, opts->scratch1, 1, opts->scratch1, tmp_size);
+ }
+ }
+ if (size != tmp_size && !is_write) {
+ if (memmap[chunk].flags & MMAP_ONLY_EVEN) {
+ shl_ir(code, 8, opts->scratch1, SZ_W);
+ mov_ir(code, 0xFF, opts->scratch1, SZ_B);
+ } else {
+ or_ir(code, 0xFF00, opts->scratch1, SZ_W);
+ }
+ }
+ }
+ if (is_write && (memmap[chunk].flags & MMAP_CODE)) {
+ mov_rr(code, opts->scratch2, opts->scratch1, opts->address_size);
+ shr_ir(code, opts->ram_flags_shift, opts->scratch1, opts->address_size);
+ bt_rrdisp(code, opts->scratch1, opts->context_reg, opts->ram_flags_off, opts->address_size);
+ code_ptr not_code = code->cur + 1;
+ jcc(code, CC_NC, code->cur + 2);
+ call(code, opts->save_context);
+ call_args(code, opts->handle_code_write, 2, opts->scratch2, opts->context_reg);
+ mov_rr(code, RAX, opts->context_reg, SZ_PTR);
+ call(code, opts->load_context);
+ *not_code = code->cur - (not_code+1);
+ }
+ retn(code);
+ } else if (cfun) {
+ call(code, opts->save_context);
+ if (is_write) {
+ call_args_abi(code, cfun, 3, opts->scratch2, opts->context_reg, opts->scratch1);
+ mov_rr(code, RAX, opts->context_reg, SZ_PTR);
+ } else {
+ push_r(code, opts->context_reg);
+ call_args_abi(code, cfun, 2, opts->scratch1, opts->context_reg);
+ pop_r(code, opts->context_reg);
+ mov_rr(code, RAX, opts->scratch1, size);
+ }
+ jmp(code, opts->load_context);
+ } else {
+ //Not sure the best course of action here
+ if (!is_write) {
+ mov_ir(code, size == SZ_B ? 0xFF : 0xFFFF, opts->scratch1, size);
+ }
+ retn(code);
+ }
+ if (lb_jcc) {
+ *lb_jcc = code->cur - (lb_jcc+1);
+ lb_jcc = NULL;
+ }
+ if (ub_jcc) {
+ *ub_jcc = code->cur - (ub_jcc+1);
+ ub_jcc = NULL;
+ }
+ }
+ if (!is_write) {
+ mov_ir(code, size == SZ_B ? 0xFF : 0xFFFF, opts->scratch1, size);
+ }
+ retn(code);
+ return start;
+}
diff --git a/blastem.c b/blastem.c
index 962ea9b..e41f550 100644
--- a/blastem.c
+++ b/blastem.c
@@ -4,7 +4,7 @@
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#include "68kinst.h"
-#include "m68k_to_x86.h"
+#include "m68k_core.h"
#include "z80_to_x86.h"
#include "mem.h"
#include "vdp.h"
@@ -33,7 +33,7 @@
#define MAX_SOUND_CYCLES 100000
-uint32_t mclks_per_frame = MCLKS_LINE*LINES_NTSC;
+uint32_t mclk_target = 0;
uint16_t cart[CARTRIDGE_WORDS];
uint16_t ram[RAM_WORDS];
@@ -125,26 +125,18 @@ uint16_t read_dma_value(uint32_t address)
return 0;
}
-//TODO: Make these dependent on the video mode
-//#define VINT_CYCLE ((MCLKS_LINE * 225 + (148 + 40) * 4)/MCLKS_PER_68K)
-#define ZVINT_CYCLE ((MCLKS_LINE * 225 + (148 + 40) * 4)/MCLKS_PER_Z80)
-//#define VINT_CYCLE ((MCLKS_LINE * 226)/MCLKS_PER_68K)
-//#define ZVINT_CYCLE ((MCLKS_LINE * 226)/MCLKS_PER_Z80)
-
void adjust_int_cycle(m68k_context * context, vdp_context * v_context)
{
context->int_cycle = CYCLE_NEVER;
if ((context->status & 0x7) < 6) {
uint32_t next_vint = vdp_next_vint(v_context);
if (next_vint != CYCLE_NEVER) {
- next_vint /= MCLKS_PER_68K;
context->int_cycle = next_vint;
context->int_num = 6;
}
if ((context->status & 0x7) < 4) {
uint32_t next_hint = vdp_next_hint(v_context);
if (next_hint != CYCLE_NEVER) {
- next_hint /= MCLKS_PER_68K;
if (next_hint < context->int_cycle) {
context->int_cycle = next_hint;
context->int_num = 4;
@@ -163,12 +155,6 @@ void adjust_int_cycle(m68k_context * context, vdp_context * v_context)
int break_on_sync = 0;
int save_state = 0;
-uint8_t reset = 1;
-uint8_t need_reset = 0;
-uint8_t busreq = 0;
-uint8_t busack = 0;
-uint32_t busack_cycle = CYCLE_NEVER;
-uint8_t new_busack = 0;
//#define DO_DEBUG_PRINT
#ifdef DO_DEBUG_PRINT
#define dprintf printf
@@ -180,32 +166,22 @@ uint8_t new_busack = 0;
#define Z80_VINT_DURATION 128
-void sync_z80(z80_context * z_context, uint32_t mclks)
+void z80_next_int_pulse(z80_context * z_context)
{
-#ifdef X86_64
- if (z80_enabled && !reset && !busreq) {
genesis_context * gen = z_context->system;
- z_context->sync_cycle = mclks / MCLKS_PER_Z80;
- if (z_context->current_cycle < z_context->sync_cycle) {
- if (need_reset) {
- z80_reset(z_context);
- need_reset = 0;
- }
- uint32_t vint_cycle = vdp_next_vint_z80(gen->vdp) / MCLKS_PER_Z80;
- while (z_context->current_cycle < z_context->sync_cycle) {
- if (z_context->iff1 && z_context->current_cycle < (vint_cycle + Z80_VINT_DURATION)) {
- z_context->int_cycle = vint_cycle < z_context->int_enable_cycle ? z_context->int_enable_cycle : vint_cycle;
- }
- z_context->target_cycle = z_context->sync_cycle < z_context->int_cycle ? z_context->sync_cycle : z_context->int_cycle;
- dprintf("Running Z80 from cycle %d to cycle %d. Native PC: %p\n", z_context->current_cycle, z_context->sync_cycle, z_context->native_pc);
- z80_run(z_context);
- dprintf("Z80 ran to cycle %d\n", z_context->current_cycle);
+ z_context->int_pulse_start = vdp_next_vint_z80(gen->vdp);
+ z_context->int_pulse_end = z_context->int_pulse_start + Z80_VINT_DURATION * MCLKS_PER_Z80;
}
- }
+
+void sync_z80(z80_context * z_context, uint32_t mclks)
+{
+#ifndef NO_Z80
+ if (z80_enabled) {
+ z80_run(z_context, mclks);
} else
#endif
{
- z_context->current_cycle = mclks / MCLKS_PER_Z80;
+ z_context->current_cycle = mclks;
}
}
@@ -232,17 +208,13 @@ m68k_context * sync_components(m68k_context * context, uint32_t address)
genesis_context * gen = context->system;
vdp_context * v_context = gen->vdp;
z80_context * z_context = gen->z80;
- uint32_t mclks = context->current_cycle * MCLKS_PER_68K;
+ uint32_t mclks = context->current_cycle;
sync_z80(z_context, mclks);
- if (mclks >= mclks_per_frame) {
sync_sound(gen, mclks);
- gen->ym->current_cycle -= mclks_per_frame;
- gen->psg->cycles -= mclks_per_frame;
- if (gen->ym->write_cycle != CYCLE_NEVER) {
- gen->ym->write_cycle = gen->ym->write_cycle >= mclks_per_frame/MCLKS_PER_68K ? gen->ym->write_cycle - mclks_per_frame/MCLKS_PER_68K : 0;
- }
- //printf("reached frame end | 68K Cycles: %d, MCLK Cycles: %d\n", context->current_cycle, mclks);
- vdp_run_context(v_context, mclks_per_frame);
+ if (mclks >= mclk_target) {
+ vdp_run_context(v_context, mclk_target);
+ if (vdp_is_frame_over(v_context)) {
+ //printf("reached frame end | MCLK Cycles: %d, Target: %d, VDP cycles: %d\n", mclks, mclk_target, v_context->cycles);
if (!headless) {
break_on_sync |= wait_render_frame(v_context, frame_limit);
@@ -253,32 +225,30 @@ m68k_context * sync_components(m68k_context * context, uint32_t address)
}
}
frame++;
- mclks -= mclks_per_frame;
- vdp_adjust_cycles(v_context, mclks_per_frame);
- io_adjust_cycles(gen->ports, context->current_cycle, mclks_per_frame/MCLKS_PER_68K);
- io_adjust_cycles(gen->ports+1, context->current_cycle, mclks_per_frame/MCLKS_PER_68K);
- io_adjust_cycles(gen->ports+2, context->current_cycle, mclks_per_frame/MCLKS_PER_68K);
- if (busack_cycle != CYCLE_NEVER) {
- if (busack_cycle > mclks_per_frame/MCLKS_PER_68K) {
- busack_cycle -= mclks_per_frame/MCLKS_PER_68K;
- } else {
- busack_cycle = CYCLE_NEVER;
- busack = new_busack;
+ mclks -= mclk_target;
+ vdp_adjust_cycles(v_context, mclk_target);
+ io_adjust_cycles(gen->ports, context->current_cycle, mclk_target);
+ io_adjust_cycles(gen->ports+1, context->current_cycle, mclk_target);
+ io_adjust_cycles(gen->ports+2, context->current_cycle, mclk_target);
+ context->current_cycle -= mclk_target;
+ z80_adjust_cycles(z_context, mclk_target);
+ gen->ym->current_cycle -= mclk_target;
+ gen->psg->cycles -= mclk_target;
+ if (gen->ym->write_cycle != CYCLE_NEVER) {
+ gen->ym->write_cycle = gen->ym->write_cycle >= mclk_target ? gen->ym->write_cycle - mclk_target : 0;
}
+ if (mclks) {
+ vdp_run_context(v_context, mclks);
}
- context->current_cycle -= mclks_per_frame/MCLKS_PER_68K;
- if (z_context->current_cycle >= mclks_per_frame/MCLKS_PER_Z80) {
- z_context->current_cycle -= mclks_per_frame/MCLKS_PER_Z80;
+ mclk_target = vdp_cycles_to_frame_end(v_context);
+ context->sync_cycle = mclk_target;
} else {
- z_context->current_cycle = 0;
- }
- if (mclks) {
vdp_run_context(v_context, mclks);
+ mclk_target = vdp_cycles_to_frame_end(v_context);
}
} else {
//printf("running VDP for %d cycles\n", mclks - v_context->cycles);
vdp_run_context(v_context, mclks);
- sync_sound(gen, mclks);
}
if (context->int_ack) {
vdp_int_ack(v_context, context->int_ack);
@@ -292,9 +262,10 @@ m68k_context * sync_components(m68k_context * context, uint32_t address)
}
if (save_state) {
save_state = 0;
+ //advance Z80 core to the start of an instruction
while (!z_context->pc)
{
- sync_z80(z_context, z_context->current_cycle * MCLKS_PER_Z80 + MCLKS_PER_Z80);
+ sync_z80(z_context, z_context->current_cycle + MCLKS_PER_Z80);
}
save_gst(gen, "savestate.gst", address);
}
@@ -317,32 +288,24 @@ m68k_context * vdp_port_write(uint32_t vdp_port, m68k_context * context, uint16_
int blocked;
uint32_t before_cycle = v_context->cycles;
if (vdp_port < 4) {
- gen->bus_busy = 1;
while (vdp_data_port_write(v_context, value) < 0) {
while(v_context->flags & FLAG_DMA_RUN) {
- vdp_run_dma_done(v_context, mclks_per_frame);
- if (v_context->cycles >= mclks_per_frame) {
- context->current_cycle = v_context->cycles / MCLKS_PER_68K;
- if (context->current_cycle * MCLKS_PER_68K < mclks_per_frame) {
- ++context->current_cycle;
- }
+ vdp_run_dma_done(v_context, mclk_target);
+ if (v_context->cycles >= mclk_target) {
+ context->current_cycle = v_context->cycles;
sync_components(context, 0);
}
}
- //context->current_cycle = v_context->cycles / MCLKS_PER_68K;
+ //context->current_cycle = v_context->cycles;
}
} else if(vdp_port < 8) {
- gen->bus_busy = 1;
blocked = vdp_control_port_write(v_context, value);
if (blocked) {
while (blocked) {
while(v_context->flags & FLAG_DMA_RUN) {
- vdp_run_dma_done(v_context, mclks_per_frame);
- if (v_context->cycles >= mclks_per_frame) {
- context->current_cycle = v_context->cycles / MCLKS_PER_68K;
- if (context->current_cycle * MCLKS_PER_68K < mclks_per_frame) {
- ++context->current_cycle;
- }
+ vdp_run_dma_done(v_context, mclk_target);
+ if (v_context->cycles >= mclk_target) {
+ context->current_cycle = v_context->cycles;
sync_components(context, 0);
}
}
@@ -360,21 +323,18 @@ m68k_context * vdp_port_write(uint32_t vdp_port, m68k_context * context, uint16_
exit(1);
}
if (v_context->cycles != before_cycle) {
- //printf("68K paused for %d (%d) cycles at cycle %d (%d) for write\n", v_context->cycles / MCLKS_PER_68K - context->current_cycle, v_context->cycles - before_cycle, context->current_cycle, before_cycle);
- context->current_cycle = v_context->cycles / MCLKS_PER_68K;
+ //printf("68K paused for %d (%d) cycles at cycle %d (%d) for write\n", v_context->cycles - context->current_cycle, v_context->cycles - before_cycle, context->current_cycle, before_cycle);
+ context->current_cycle = v_context->cycles;
+ //Lock the Z80 out of the bus until the VDP access is complete
+ gen->bus_busy = 1;
+ sync_z80(gen->z80, v_context->cycles);
+ gen->bus_busy = 0;
}
} else if (vdp_port < 0x18) {
- sync_sound(gen, context->current_cycle * MCLKS_PER_68K);
psg_write(gen->psg, value);
} else {
//TODO: Implement undocumented test register(s)
}
- if (gen->bus_busy)
- {
- //Lock the Z80 out of the bus until the VDP access is complete
- sync_z80(gen->z80, v_context->cycles);
- gen->bus_busy = 0;
- }
return context;
}
@@ -383,17 +343,18 @@ m68k_context * vdp_port_write_b(uint32_t vdp_port, m68k_context * context, uint8
return vdp_port_write(vdp_port, context, vdp_port < 0x10 ? value | value << 8 : ((vdp_port & 1) ? value : 0));
}
-z80_context * z80_vdp_port_write(uint16_t vdp_port, z80_context * context, uint8_t value) asm("z80_vdp_port_write");
-z80_context * z80_vdp_port_write(uint16_t vdp_port, z80_context * context, uint8_t value)
+void * z80_vdp_port_write(uint32_t vdp_port, void * vcontext, uint8_t value)
{
+ z80_context * context = vcontext;
genesis_context * gen = context->system;
+ vdp_port &= 0xFF;
if (vdp_port & 0xE0) {
printf("machine freeze due to write to Z80 address %X\n", 0x7F00 | vdp_port);
exit(1);
}
if (vdp_port < 0x10) {
//These probably won't currently interact well with the 68K accessing the VDP
- vdp_run_context(gen->vdp, context->current_cycle * MCLKS_PER_Z80);
+ vdp_run_context(gen->vdp, context->current_cycle);
if (vdp_port < 4) {
vdp_data_port_write(gen->vdp, value << 8 | value);
} else if (vdp_port < 8) {
@@ -403,7 +364,7 @@ z80_context * z80_vdp_port_write(uint16_t vdp_port, z80_context * context, uint8
exit(1);
}
} else if (vdp_port < 0x18) {
- sync_sound(gen, context->current_cycle * MCLKS_PER_Z80);
+ sync_sound(gen, context->current_cycle);
psg_write(gen->psg, value);
} else {
vdp_test_port_write(gen->vdp, value);
@@ -438,8 +399,13 @@ uint16_t vdp_port_read(uint32_t vdp_port, m68k_context * context)
value = vdp_test_port_read(v_context);
}
if (v_context->cycles != before_cycle) {
- //printf("68K paused for %d (%d) cycles at cycle %d (%d) for read\n", v_context->cycles / MCLKS_PER_68K - context->current_cycle, v_context->cycles - before_cycle, context->current_cycle, before_cycle);
- context->current_cycle = v_context->cycles / MCLKS_PER_68K;
+ //printf("68K paused for %d (%d) cycles at cycle %d (%d) for read\n", v_context->cycles - context->current_cycle, v_context->cycles - before_cycle, context->current_cycle, before_cycle);
+ context->current_cycle = v_context->cycles;
+ //Lock the Z80 out of the bus until the VDP access is complete
+ genesis_context *gen = context->system;
+ gen->bus_busy = 1;
+ sync_z80(gen->z80, v_context->cycles);
+ gen->bus_busy = 0;
}
return value;
}
@@ -454,30 +420,49 @@ uint8_t vdp_port_read_b(uint32_t vdp_port, m68k_context * context)
}
}
+uint8_t z80_vdp_port_read(uint32_t vdp_port, void * vcontext)
+{
+ z80_context * context = vcontext;
+ if (vdp_port & 0xE0) {
+ printf("machine freeze due to read from Z80 address %X\n", 0x7F00 | vdp_port);
+ exit(1);
+ }
+ genesis_context * gen = context->system;
+ vdp_port &= 0x1F;
+ uint16_t ret;
+ if (vdp_port < 0x10) {
+ //These probably won't currently interact well with the 68K accessing the VDP
+ vdp_run_context(gen->vdp, context->current_cycle);
+ if (vdp_port < 4) {
+ ret = vdp_data_port_read(gen->vdp);
+ } else if (vdp_port < 8) {
+ ret = vdp_control_port_read(gen->vdp);
+ } else {
+ printf("Illegal write to HV Counter port %X\n", vdp_port);
+ exit(1);
+ }
+ } else {
+ //TODO: Figure out the correct value today
+ ret = 0xFFFF;
+ }
+ return vdp_port & 1 ? ret : ret >> 8;
+}
+
uint32_t zram_counter = 0;
-#define Z80_ACK_DELAY 3
-#define Z80_BUSY_DELAY 1//TODO: Find the actual value for this
-#define Z80_REQ_BUSY 1
-#define Z80_REQ_ACK 0
-#define Z80_RES_BUSACK reset
m68k_context * io_write(uint32_t location, m68k_context * context, uint8_t value)
{
genesis_context * gen = context->system;
if (location < 0x10000) {
- if (busack_cycle <= context->current_cycle) {
- busack = new_busack;
- busack_cycle = CYCLE_NEVER;
- }
- if (!(busack || reset)) {
+ if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) {
location &= 0x7FFF;
if (location < 0x4000) {
z80_ram[location & 0x1FFF] = value;
-#ifdef X86_64
+#ifndef NO_Z80
z80_handle_code_write(location & 0x1FFF, gen->z80);
#endif
} else if (location < 0x6000) {
- sync_sound(gen, context->current_cycle * MCLKS_PER_68K);
+ sync_sound(gen, context->current_cycle);
if (location & 1) {
ym_data_write(gen->ym, value);
} else if(location & 2) {
@@ -523,22 +508,15 @@ m68k_context * io_write(uint32_t location, m68k_context * context, uint8_t value
}
} else {
if (location == 0x1100) {
- if (busack_cycle <= context->current_cycle) {
- busack = new_busack;
- busack_cycle = CYCLE_NEVER;
- }
if (value & 1) {
dputs("bus requesting Z80");
-
- if(!reset && !busreq) {
- sync_z80(gen->z80, context->current_cycle * MCLKS_PER_68K + Z80_ACK_DELAY*MCLKS_PER_Z80);
- busack_cycle = (gen->z80->current_cycle * MCLKS_PER_Z80) / MCLKS_PER_68K;//context->current_cycle + Z80_ACK_DELAY;
- new_busack = Z80_REQ_ACK;
+ if (z80_enabled) {
+ z80_assert_busreq(gen->z80, context->current_cycle);
+ } else {
+ gen->z80->busack = 1;
}
- busreq = 1;
} else {
- sync_z80(gen->z80, context->current_cycle * MCLKS_PER_68K);
- if (busreq) {
+ if (gen->z80->busreq) {
dputs("releasing z80 bus");
#ifdef DO_DEBUG_PRINT
char fname[20];
@@ -547,30 +525,27 @@ m68k_context * io_write(uint32_t location, m68k_context * context, uint8_t value
fwrite(z80_ram, 1, sizeof(z80_ram), f);
fclose(f);
#endif
- busack_cycle = ((gen->z80->current_cycle + Z80_BUSY_DELAY) * MCLKS_PER_Z80) / MCLKS_PER_68K;
- new_busack = Z80_REQ_BUSY;
- busreq = 0;
}
- //busack_cycle = CYCLE_NEVER;
- //busack = Z80_REQ_BUSY;
-
+ if (z80_enabled) {
+ z80_clear_busreq(gen->z80, context->current_cycle);
+ } else {
+ gen->z80->busack = 0;
+ }
}
} else if (location == 0x1200) {
- sync_z80(gen->z80, context->current_cycle * MCLKS_PER_68K);
+ sync_z80(gen->z80, context->current_cycle);
if (value & 1) {
- if (reset && busreq) {
- new_busack = 0;
- busack_cycle = ((gen->z80->current_cycle + Z80_ACK_DELAY) * MCLKS_PER_Z80) / MCLKS_PER_68K;//context->current_cycle + Z80_ACK_DELAY;
- }
- //TODO: Deal with the scenario in which reset is not asserted long enough
- if (reset) {
- need_reset = 1;
- //TODO: Add necessary delay between release of reset and start of execution
- gen->z80->current_cycle = (context->current_cycle * MCLKS_PER_68K) / MCLKS_PER_Z80 + 16;
+ if (z80_enabled) {
+ z80_clear_reset(gen->z80, context->current_cycle);
+ } else {
+ gen->z80->reset = 0;
}
- reset = 0;
} else {
- reset = 1;
+ if (z80_enabled) {
+ z80_assert_reset(gen->z80, context->current_cycle);
+ } else {
+ gen->z80->reset = 1;
+ }
}
}
}
@@ -598,16 +573,12 @@ uint8_t io_read(uint32_t location, m68k_context * context)
uint8_t value;
genesis_context *gen = context->system;
if (location < 0x10000) {
- if (busack_cycle <= context->current_cycle) {
- busack = new_busack;
- busack_cycle = CYCLE_NEVER;
- }
- if (!(busack==Z80_REQ_BUSY || reset)) {
+ if (!z80_enabled || z80_get_busack(gen->z80, context->current_cycle)) {
location &= 0x7FFF;
if (location < 0x4000) {
value = z80_ram[location & 0x1FFF];
} else if (location < 0x6000) {
- sync_sound(gen, context->current_cycle * MCLKS_PER_68K);
+ sync_sound(gen, context->current_cycle);
value = ym_read_status(gen->ym);
} else {
value = 0xFF;
@@ -647,14 +618,10 @@ uint8_t io_read(uint32_t location, m68k_context * context)
}
} else {
if (location == 0x1100) {
- if (busack_cycle <= context->current_cycle) {
- busack = new_busack;
- busack_cycle = CYCLE_NEVER;
- }
- value = Z80_RES_BUSACK || busack;
- dprintf("Byte read of BUSREQ returned %d @ %d (reset: %d, busack: %d, busack_cycle %d)\n", value, context->current_cycle, reset, busack, busack_cycle);
+ value = z80_enabled ? !z80_get_busack(gen->z80, context->current_cycle) : !gen->z80->busack;
+ dprintf("Byte read of BUSREQ returned %d @ %d (reset: %d)\n", value, context->current_cycle, gen->z80->reset);
} else if (location == 0x1200) {
- value = !reset;
+ value = !gen->z80->reset;
} else {
value = 0xFF;
printf("Byte read of unknown IO location: %X\n", location);
@@ -675,11 +642,11 @@ uint16_t io_read_w(uint32_t location, m68k_context * context)
return value;
}
-extern z80_context * z80_write_ym(uint16_t location, z80_context * context, uint8_t value) asm("z80_write_ym");
-z80_context * z80_write_ym(uint16_t location, z80_context * context, uint8_t value)
+void * z80_write_ym(uint32_t location, void * vcontext, uint8_t value)
{
+ z80_context * context = vcontext;
genesis_context * gen = context->system;
- sync_sound(gen, context->current_cycle * MCLKS_PER_Z80);
+ sync_sound(gen, context->current_cycle);
if (location & 1) {
ym_data_write(gen->ym, value);
} else if (location & 2) {
@@ -690,14 +657,77 @@ z80_context * z80_write_ym(uint16_t location, z80_context * context, uint8_t val
return context;
}
-extern uint8_t z80_read_ym(uint16_t location, z80_context * context) asm("z80_read_ym");
-uint8_t z80_read_ym(uint16_t location, z80_context * context)
+uint8_t z80_read_ym(uint32_t location, void * vcontext)
{
+ z80_context * context = vcontext;
genesis_context * gen = context->system;
- sync_sound(gen, context->current_cycle * MCLKS_PER_Z80);
+ sync_sound(gen, context->current_cycle);
return ym_read_status(gen->ym);
}
+uint8_t z80_read_bank(uint32_t location, void * vcontext)
+{
+ z80_context * context = vcontext;
+ genesis_context *gen = context->system;
+ if (gen->bus_busy) {
+ context->current_cycle = context->sync_cycle;
+ }
+ //typical delay from bus arbitration
+ context->current_cycle += 3 * MCLKS_PER_Z80;
+ //TODO: add cycle for an access right after a previous one
+
+ location &= 0x7FFF;
+ if (context->mem_pointers[1]) {
+ return context->mem_pointers[1][location ^ 1];
+ }
+ uint32_t address = context->bank_reg << 15 | location;
+ if (address >= 0xC00000 && address < 0xE00000) {
+ return z80_vdp_port_read(location & 0xFF, context);
+ } else {
+ fprintf(stderr, "Unhandled read by Z80 from address %X through banked memory area (%X)\n", address, context->bank_reg << 15);
+ }
+ return 0;
+}
+
+void *z80_write_bank(uint32_t location, void * vcontext, uint8_t value)
+{
+ z80_context * context = vcontext;
+ genesis_context *gen = context->system;
+ if (gen->bus_busy) {
+ context->current_cycle = context->sync_cycle;
+ }
+ //typical delay from bus arbitration
+ context->current_cycle += 3 * MCLKS_PER_Z80;
+ //TODO: add cycle for an access right after a previous one
+
+ location &= 0x7FFF;
+ uint32_t address = context->bank_reg << 15 | location;
+ if (address >= 0xE00000) {
+ address &= 0xFFFF;
+ ((uint8_t *)ram)[address ^ 1] = value;
+ } else if (address >= 0xC00000) {
+ z80_vdp_port_write(location & 0xFF, context, value);
+ } else {
+ fprintf(stderr, "Unhandled write by Z80 to address %X through banked memory area\n", address);
+ }
+ return context;
+}
+
+void *z80_write_bank_reg(uint32_t location, void * vcontext, uint8_t value)
+{
+ z80_context * context = vcontext;
+
+ context->bank_reg = (context->bank_reg >> 1 | value << 8) & 0x1FF;
+ if (context->bank_reg < 0x80) {
+ genesis_context *gen = context->system;
+ context->mem_pointers[1] = get_native_pointer(context->bank_reg << 15, (void **)gen->m68k->mem_pointers, &gen->m68k->options->gen);
+ } else {
+ context->mem_pointers[1] = NULL;
+ }
+
+ return context;
+}
+
uint16_t read_sram_w(uint32_t address, m68k_context * context)
{
genesis_context * gen = context->system;
@@ -872,7 +902,7 @@ void save_sram()
void init_run_cpu(genesis_context * gen, FILE * address_log, char * statefile, uint8_t * debugger)
{
m68k_context context;
- x86_68k_options opts;
+ m68k_options opts;
gen->m68k = &context;
memmap_chunk memmap[MAX_MAP_CHUNKS];
uint32_t num_chunks;
@@ -966,7 +996,7 @@ void init_run_cpu(genesis_context * gen, FILE * address_log, char * statefile, u
}
atexit(save_sram);
}
- init_x86_68k_opts(&opts, memmap, num_chunks);
+ init_m68k_opts(&opts, memmap, num_chunks, MCLKS_PER_68K);
opts.address_log = address_log;
init_68k_context(&context, opts.gen.native_code_map, &opts);
@@ -974,7 +1004,7 @@ void init_run_cpu(genesis_context * gen, FILE * address_log, char * statefile, u
context.system = gen;
//cartridge ROM
context.mem_pointers[0] = cart;
- context.target_cycle = context.sync_cycle = mclks_per_frame/MCLKS_PER_68K;
+ context.target_cycle = context.sync_cycle = mclk_target;
//work RAM
context.mem_pointers[1] = ram;
//save RAM/map
@@ -994,9 +1024,6 @@ void init_run_cpu(genesis_context * gen, FILE * address_log, char * statefile, u
insert_breakpoint(&context, pc, debugger);
}
adjust_int_cycle(gen->m68k, gen->vdp);
-#ifdef X86_64
- gen->z80->native_pc = z80_get_native_address_trans(gen->z80, gen->z80->pc);
-#endif
start_68k_context(&context, pc);
} else {
if (debugger) {
@@ -1075,6 +1102,15 @@ void detect_region()
}
}
}
+#ifndef NO_Z80
+const memmap_chunk z80_map[] = {
+ { 0x0000, 0x4000, 0x1FFF, 0, MMAP_READ | MMAP_WRITE | MMAP_CODE, z80_ram, NULL, NULL, NULL, NULL },
+ { 0x8000, 0x10000, 0x7FFF, 0, 0, NULL, NULL, NULL, z80_read_bank, z80_write_bank},
+ { 0x4000, 0x6000, 0x0003, 0, 0, NULL, NULL, NULL, z80_read_ym, z80_write_ym},
+ { 0x6000, 0x6100, 0xFFFF, 0, 0, NULL, NULL, NULL, NULL, z80_write_bank_reg},
+ { 0x7F00, 0x8000, 0x00FF, 0, 0, NULL, NULL, NULL, z80_vdp_port_read, z80_vdp_port_write}
+};
+#endif
int main(int argc, char ** argv)
{
@@ -1220,7 +1256,6 @@ int main(int argc, char ** argv)
height = height < 240 ? (width/320) * 240 : height;
uint32_t fps = 60;
if (version_reg & 0x40) {
- mclks_per_frame = MCLKS_LINE * LINES_PAL;
fps = 50;
}
if (!headless) {
@@ -1231,7 +1266,8 @@ int main(int argc, char ** argv)
memset(&gen, 0, sizeof(gen));
gen.master_clock = gen.normal_clock = fps == 60 ? MCLKS_NTSC : MCLKS_PAL;
- init_vdp_context(&v_context);
+ init_vdp_context(&v_context, version_reg & 0x40);
+ mclk_target = vdp_cycles_to_frame_end(&v_context);
ym2612_context y_context;
ym_init(&y_context, render_sample_rate(), gen.master_clock, MCLKS_PER_YM, render_audio_buffer(), ym_log ? YM_OPT_WAVE_LOG : 0);
@@ -1240,16 +1276,15 @@ int main(int argc, char ** argv)
psg_init(&p_context, render_sample_rate(), gen.master_clock, MCLKS_PER_PSG, render_audio_buffer());
z80_context z_context;
- x86_z80_options z_opts;
-#ifdef X86_64
- init_x86_z80_opts(&z_opts);
+#ifndef NO_Z80
+ z80_options z_opts;
+ init_z80_opts(&z_opts, z80_map, 5, MCLKS_PER_Z80);
init_z80_context(&z_context, &z_opts);
+ z80_assert_reset(&z_context, 0);
#endif
z_context.system = &gen;
z_context.mem_pointers[0] = z80_ram;
- z_context.sync_cycle = z_context.target_cycle = mclks_per_frame/MCLKS_PER_Z80;
- z_context.int_cycle = CYCLE_NEVER;
z_context.mem_pointers[1] = z_context.mem_pointers[2] = (uint8_t *)cart;
gen.z80 = &z_context;
@@ -1271,7 +1306,7 @@ int main(int argc, char ** argv)
if (i < 0) {
strcpy(sram_filename + fname_size, ".sram");
}
- set_keybindings();
+ set_keybindings(gen.ports);
init_run_cpu(&gen, address_log, statefile, debuggerfun);
return 0;
diff --git a/blastem.h b/blastem.h
index 112ebfa..cd75b64 100644
--- a/blastem.h
+++ b/blastem.h
@@ -7,7 +7,7 @@
#define BLASTEM_H_
#include <stdint.h>
-#include "m68k_to_x86.h"
+#include "m68k_core.h"
#include "z80_to_x86.h"
#include "ym2612.h"
#include "vdp.h"
@@ -19,8 +19,6 @@
#define RAM_FLAG_EVEN 0x1000
#define RAM_FLAG_BOTH 0x0000
-#define CYCLE_NEVER 0xFFFFFFFF
-
typedef struct {
m68k_context *m68k;
z80_context *z80;
@@ -42,8 +40,6 @@ extern int headless;
extern int break_on_sync;
extern int save_state;
extern tern_node * config;
-extern uint8_t busreq;
-extern uint8_t reset;
#define CARTRIDGE_WORDS 0x200000
#define RAM_WORDS 32 * 1024
diff --git a/comparetests.py b/comparetests.py
index ce62ae2..3448fd4 100755
--- a/comparetests.py
+++ b/comparetests.py
@@ -18,6 +18,49 @@ for i in range(1, len(argv)):
else:
prefixes.append(argv[i])
+def print_mismatch(path, b, m):
+ blines = b.split('\n')
+ mlines = m.split('\n')
+ if len(blines) != len(mlines):
+ print '-----------------------------'
+ print 'Unknown mismatch in', path
+ print 'blastem output:'
+ print b
+ print 'musashi output:'
+ print m
+ print '-----------------------------'
+ return
+ prevline = ''
+ differences = []
+ flagmismatch = False
+ regmismatch = False
+ for i in xrange(0, len(blines)):
+ if blines[i] != mlines[i]:
+ if prevline == 'XNZVC':
+ differences.append((prevline, prevline))
+ flagmismatch = True
+ else:
+ regmismatch = True
+ differences.append((blines[i], mlines[i]))
+ prevline = blines[i]
+ if flagmismatch and regmismatch:
+ mtype = 'General'
+ elif flagmismatch:
+ mtype = 'Flag'
+ elif regmismatch:
+ mtype = 'Register'
+ else:
+ mtype = 'Unknown'
+ print '-----------------------------'
+ print mtype, 'mismatch in', path
+ for i in xrange(0, 2):
+ print 'musashi' if i else 'blastem', 'output:'
+ for diff in differences:
+ print diff[i]
+ print '-----------------------------'
+
+
+
for path in glob('generated_tests/*/*.bin'):
if path in skip:
continue
@@ -36,13 +79,8 @@ for path in glob('generated_tests/*/*.bin'):
m = subprocess.check_output(['musashi/mustrans', path])
#_,_,b = b.partition('\n')
if b != m:
- print '-----------------------------'
- print 'Mismatch in ' + path
- print 'blastem output:'
- print b
- print 'musashi output:'
- print m
- print '-----------------------------'
+ print_mismatch(path, b, m)
+
else:
print path, 'passed'
except subprocess.CalledProcessError as e:
diff --git a/debug.c b/debug.c
index b3d1aca..fa0e26e 100644
--- a/debug.c
+++ b/debug.c
@@ -82,7 +82,7 @@ void strip_nl(char * buf)
}
}
-#ifdef X86_64
+#ifndef NO_Z80
void zdebugger_print(z80_context * context, char format_char, char * param)
{
@@ -531,19 +531,49 @@ m68k_context * debugger(m68k_context * context, uint32_t address)
debugging = 0;
break;
case 'b':
- param = find_param(input_buf);
- if (!param) {
- fputs("b command requires a parameter\n", stderr);
- break;
+ if (input_buf[1] == 't') {
+ uint32_t stack = context->aregs[7];
+ if (stack >= 0xE00000) {
+ stack &= 0xFFFF;
+ uint8_t non_adr_count = 0;
+ do {
+ uint32_t bt_address = ram[stack/2] << 16 | ram[stack/2+1];
+ bt_address = get_instruction_start(context->native_code_map, bt_address - 2);
+ if (bt_address) {
+ stack += 4;
+ non_adr_count = 0;
+ uint16_t *bt_pc = NULL;
+ if (bt_address < 0x400000) {
+ bt_pc = cart + bt_address/2;
+ } else if(bt_address > 0xE00000) {
+ bt_pc = ram + (bt_address & 0xFFFF)/2;
+ }
+ m68k_decode(bt_pc, &inst, bt_address);
+ m68k_disasm(&inst, input_buf);
+ printf("%X: %s\n", bt_address, input_buf);
+ } else {
+ //non-return address value on stack can be word wide
+ stack += 2;
+ non_adr_count++;
+ }
+ stack &= 0xFFFF;
+ } while (stack && non_adr_count < 6);
+ }
+ } else {
+ param = find_param(input_buf);
+ if (!param) {
+ fputs("b command requires a parameter\n", stderr);
+ break;
+ }
+ value = strtol(param, NULL, 16);
+ insert_breakpoint(context, value, (uint8_t *)debugger);
+ new_bp = malloc(sizeof(bp_def));
+ new_bp->next = breakpoints;
+ new_bp->address = value;
+ new_bp->index = bp_index++;
+ breakpoints = new_bp;
+ printf("68K Breakpoint %d set at %X\n", new_bp->index, value);
}
- value = strtol(param, NULL, 16);
- insert_breakpoint(context, value, (uint8_t *)debugger);
- new_bp = malloc(sizeof(bp_def));
- new_bp->next = breakpoints;
- new_bp->address = value;
- new_bp->index = bp_index++;
- breakpoints = new_bp;
- printf("68K Breakpoint %d set at %X\n", new_bp->index, value);
break;
case 'a':
param = find_param(input_buf);
@@ -602,9 +632,14 @@ m68k_context * debugger(m68k_context * context, uint32_t address)
}
} else if(param[0] == 'c') {
value = context->current_cycle;
- } else if (param[0] == '0' && param[1] == 'x') {
- uint32_t p_addr = strtol(param+2, NULL, 16);
- value = read_dma_value(p_addr/2);
+ } else if ((param[0] == '0' && param[1] == 'x') || param[0] == '$') {
+ uint32_t p_addr = strtol(param+(param[0] == '0' ? 2 : 1), NULL, 16);
+ if ((p_addr & 0xFFFFFF) == 0xC00004) {
+ genesis_context * gen = context->system;
+ value = vdp_hv_counter_read(gen->vdp);
+ } else {
+ value = read_dma_value(p_addr/2);
+ }
} else {
fprintf(stderr, "Unrecognized parameter to p: %s\n", param);
break;
@@ -705,7 +740,7 @@ m68k_context * debugger(m68k_context * context, uint32_t address)
}
break;
}
-#ifdef X86_64
+#ifndef NO_Z80
case 'z': {
genesis_context * gen = context->system;
//Z80 debug commands
diff --git a/debug.h b/debug.h
index 2a05f27..1b61efd 100644
--- a/debug.h
+++ b/debug.h
@@ -2,7 +2,7 @@
#define DEBUG_H_
#include <stdint.h>
-#include "m68k_to_x86.h"
+#include "m68k_core.h"
typedef struct disp_def {
struct disp_def * next;
diff --git a/default.cfg b/default.cfg
index 32df104..1b939be 100644
--- a/default.cfg
+++ b/default.cfg
@@ -54,6 +54,13 @@ bindings {
}
}
+io {
+ devices {
+ 1 gamepad6.1
+ 2 gamepad6.2
+ }
+}
+
video {
width 640
vertex_shader default.v.glsl
diff --git a/dis.c b/dis.c
index 8ec5bb2..17dbc56 100644
--- a/dis.c
+++ b/dis.c
@@ -1,14 +1,17 @@
/*
Copyright 2013 Michael Pavone
- This file is part of BlastEm.
+ This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#include "68kinst.h"
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
+#include "vos_program_module.h"
+#include "tern.h"
uint8_t visited[(16*1024*1024)/16];
-uint8_t label[(16*1024*1024)/8];
+uint16_t label[(16*1024*1024)/8];
void visit(uint32_t address)
{
@@ -20,7 +23,7 @@ void reference(uint32_t address)
{
address &= 0xFFFFFF;
//printf("referenced: %X\n", address);
- label[address/16] |= 1 << (address % 8);
+ label[address/16] |= 1 << (address % 16);
}
uint8_t is_visited(uint32_t address)
@@ -29,10 +32,40 @@ uint8_t is_visited(uint32_t address)
return visited[address/16] & (1 << ((address / 2) % 8));
}
-uint8_t is_label(uint32_t address)
+uint16_t is_label(uint32_t address)
{
address &= 0xFFFFFF;
- return label[address/16] & (1 << (address % 8));
+ return label[address/16] & (1 << (address % 16));
+}
+
+typedef struct {
+ uint32_t num_labels;
+ uint32_t storage;
+ char *labels[];
+} label_names;
+
+tern_node * add_label(tern_node * head, char * name, uint32_t address)
+{
+ char key[MAX_INT_KEY_SIZE];
+ address &= 0xFFFFFF;
+ reference(address);
+ tern_int_key(address, key);
+ label_names * names = tern_find_ptr(head, key);
+ if (names)
+ {
+ if (names->num_labels == names->storage)
+ {
+ names->storage = names->storage + (names->storage >> 1);
+ names = realloc(names, sizeof(label_names) + names->storage * sizeof(char *));
+ }
+ } else {
+ names = malloc(sizeof(label_names) + 4 * sizeof(char *));
+ names->num_labels = 0;
+ names->storage = 4;
+ head = tern_insert_ptr(head, key, names);
+ }
+ names->labels[names->num_labels++] = strdup(name);
+ return head;
}
typedef struct deferred {
@@ -42,7 +75,7 @@ typedef struct deferred {
deferred * defer(uint32_t address, deferred * next)
{
- if (is_visited(address)) {
+ if (is_visited(address) || address & 1) {
return next;
}
//printf("deferring %X\n", address);
@@ -66,9 +99,18 @@ void check_reference(m68kinst * inst, m68k_op_info * op)
}
}
-uint8_t labels = 0;
-uint8_t addr = 0;
-uint8_t only = 0;
+int label_fun(char *dst, uint32_t address, void * data)
+{
+ tern_node * labels = data;
+ char key[MAX_INT_KEY_SIZE];
+ label_names * names = tern_find_ptr(labels, tern_int_key(address & 0xFFFFFF, key));
+ if (names)
+ {
+ return sprintf(dst, "%s", names->labels[0]);
+ } else {
+ return m68k_default_label_fun(dst, address, NULL);
+ }
+}
int main(int argc, char ** argv)
{
@@ -77,14 +119,10 @@ int main(int argc, char ** argv)
char disbuf[1024];
m68kinst instbuf;
unsigned short * cur;
- FILE * f = fopen(argv[1], "rb");
- fseek(f, 0, SEEK_END);
- filesize = ftell(f);
- fseek(f, 0, SEEK_SET);
- filebuf = malloc(filesize);
- fread(filebuf, 2, filesize/2, f);
- fclose(f);
deferred *def = NULL, *tmpd;
+
+ uint8_t labels = 0, addr = 0, only = 0, vos = 0, reset = 0;
+
for(uint8_t opt = 2; opt < argc; ++opt) {
if (argv[opt][0] == '-') {
FILE * address_log;
@@ -99,6 +137,12 @@ int main(int argc, char ** argv)
case 'o':
only = 1;
break;
+ case 'v':
+ vos = 1;
+ break;
+ case 'r':
+ reset = 1;
+ break;
case 'f':
opt++;
if (opt >= argc) {
@@ -126,29 +170,85 @@ int main(int argc, char ** argv)
reference(address);
}
}
- for(cur = filebuf; cur - filebuf < (filesize/2); ++cur)
+
+ FILE * f = fopen(argv[1], "rb");
+ fseek(f, 0, SEEK_END);
+ filesize = ftell(f);
+ fseek(f, 0, SEEK_SET);
+
+ tern_node * named_labels = NULL;
+ char int_key[MAX_INT_KEY_SIZE];
+ uint32_t address_off, address_end;
+ if (vos)
{
- *cur = (*cur >> 8) | (*cur << 8);
+ vos_program_module header;
+ vos_read_header(f, &header);
+ vos_read_alloc_module_map(f, &header);
+ address_off = header.user_boundary;
+ address_end = address_off + filesize - 0x1000;
+ def = defer(header.main_entry_link.code_address, def);
+ named_labels = add_label(named_labels, "main_entry_link", header.main_entry_link.code_address);
+ for (int i = 0; i < header.n_modules; i++)
+ {
+ if (!reset || header.module_map_entries[i].code_address != header.user_boundary)
+ {
+ def = defer(header.module_map_entries[i].code_address, def);
+ }
+ named_labels = add_label(named_labels, header.module_map_entries[i].name.str, header.module_map_entries[i].code_address);
+ }
+ fseek(f, 0x1000, SEEK_SET);
+ filebuf = malloc(filesize - 0x1000);
+ if (fread(filebuf, 2, (filesize - 0x1000)/2, f) != (filesize - 0x1000)/2)
+ {
+ fprintf(stderr, "Failure while reading file %s\n", argv[1]);
+ }
+ fclose(f);
+ for(cur = filebuf; cur - filebuf < ((filesize - 0x1000)/2); ++cur)
+ {
+ *cur = (*cur >> 8) | (*cur << 8);
+ }
+ if (reset)
+ {
+ def = defer(filebuf[2] << 16 | filebuf[3], def);
+ named_labels = add_label(named_labels, "reset", filebuf[2] << 16 | filebuf[3]);
+ }
+ } else {
+ address_off = 0;
+ address_end = filesize;
+ filebuf = malloc(filesize);
+ if (fread(filebuf, 2, filesize/2, f) != filesize/2)
+ {
+ fprintf(stderr, "Failure while reading file %s\n", argv[1]);
+ }
+ fclose(f);
+ for(cur = filebuf; cur - filebuf < (filesize/2); ++cur)
+ {
+ *cur = (*cur >> 8) | (*cur << 8);
+ }
+ uint32_t start = filebuf[2] << 16 | filebuf[3];
+ uint32_t int_2 = filebuf[0x68/2] << 16 | filebuf[0x6A/2];
+ uint32_t int_4 = filebuf[0x70/2] << 16 | filebuf[0x72/2];
+ uint32_t int_6 = filebuf[0x78/2] << 16 | filebuf[0x7A/2];
+ named_labels = add_label(named_labels, "start", start);
+ named_labels = add_label(named_labels, "int_2", int_2);
+ named_labels = add_label(named_labels, "int_4", int_4);
+ named_labels = add_label(named_labels, "int_6", int_6);
+ if (!def || !only) {
+ def = defer(start, def);
+ def = defer(int_2, def);
+ def = defer(int_4, def);
+ def = defer(int_6, def);
+ }
}
- uint32_t start = filebuf[2] << 16 | filebuf[3], tmp_addr;
- uint32_t int_2 = filebuf[0x68/2] << 16 | filebuf[0x6A/2];
- uint32_t int_4 = filebuf[0x70/2] << 16 | filebuf[0x72/2];
- uint32_t int_6 = filebuf[0x78/2] << 16 | filebuf[0x7A/2];
uint16_t *encoded, *next;
- uint32_t size;
- if (!def || !only) {
- def = defer(start, def);
- def = defer(int_2, def);
- def = defer(int_4, def);
- def = defer(int_6, def);
- }
+ uint32_t size, tmp_addr;
uint32_t address;
while(def) {
do {
encoded = NULL;
address = def->address;
if (!is_visited(address)) {
- encoded = filebuf + address/2;
+ encoded = filebuf + (address - address_off)/2;
}
tmpd = def;
def = def->next;
@@ -158,7 +258,7 @@ int main(int argc, char ** argv)
break;
}
for(;;) {
- if (address > filesize) {
+ if (address > address_end || address < address_off) {
break;
}
visit(address);
@@ -175,7 +275,7 @@ int main(int argc, char ** argv)
if (instbuf.op == M68K_BCC || instbuf.op == M68K_DBCC || instbuf.op == M68K_BSR) {
if (instbuf.op == M68K_BCC && instbuf.extra.cond == COND_TRUE) {
address = instbuf.address + 2 + instbuf.src.params.immed;
- encoded = filebuf + address/2;
+ encoded = filebuf + (address - address_off)/2;
reference(address);
if (is_visited(address)) {
break;
@@ -188,13 +288,13 @@ int main(int argc, char ** argv)
} else if(instbuf.op == M68K_JMP) {
if (instbuf.src.addr_mode == MODE_ABSOLUTE || instbuf.src.addr_mode == MODE_ABSOLUTE_SHORT) {
address = instbuf.src.params.immed;
- encoded = filebuf + address/2;
+ encoded = filebuf + (address - address_off)/2;
if (is_visited(address)) {
break;
}
- } else if (instbuf.src.addr_mode = MODE_PC_DISPLACE) {
+ } else if (instbuf.src.addr_mode == MODE_PC_DISPLACE) {
address = instbuf.src.params.regs.displacement + instbuf.address + 2;
- encoded = filebuf + address/2;
+ encoded = filebuf + (address - address_off)/2;
if (is_visited(address)) {
break;
}
@@ -211,6 +311,11 @@ int main(int argc, char ** argv)
}
}
if (labels) {
+ for (address = 0; address < address_off; address++) {
+ if (is_label(address)) {
+ printf("ADR_%X equ $%X\n", address, address);
+ }
+ }
for (address = filesize; address < (16*1024*1024); address++) {
if (is_label(address)) {
printf("ADR_%X equ $%X\n", address, address);
@@ -218,25 +323,21 @@ int main(int argc, char ** argv)
}
puts("");
}
- for (address = 0; address < filesize; address+=2) {
+ for (address = address_off; address < address_end; address+=2) {
if (is_visited(address)) {
- encoded = filebuf + address/2;
+ encoded = filebuf + (address-address_off)/2;
m68k_decode(encoded, &instbuf, address);
if (labels) {
- m68k_disasm_labels(&instbuf, disbuf);
- if (address == start) {
- puts("start:");
- }
- if(address == int_2) {
- puts("int_2:");
- }
- if(address == int_4) {
- puts("int_4:");
- }
- if(address == int_6) {
- puts("int_6:");
- }
- if (is_label(instbuf.address)) {
+ m68k_disasm_labels(&instbuf, disbuf, label_fun, named_labels);
+ char keybuf[MAX_INT_KEY_SIZE];
+ label_names * names = tern_find_ptr(named_labels, tern_int_key(address, keybuf));
+ if (names)
+ {
+ for (int i = 0; i < names->num_labels; i++)
+ {
+ printf("%s:\n", names->labels[i]);
+ }
+ } else if (is_label(instbuf.address)) {
printf("ADR_%X:\n", instbuf.address);
}
if (addr) {
diff --git a/gdb_remote.c b/gdb_remote.c
index 11070f8..dd6a98a 100644
--- a/gdb_remote.c
+++ b/gdb_remote.c
@@ -145,7 +145,7 @@ void write_byte(m68k_context * context, uint32_t address, uint8_t value)
} else if (address >= 0xA00000 && address < 0xA04000) {
z80_ram[address & 0x1FFF] = value;
genesis_context * gen = context->system;
-#ifdef X86_64
+#ifndef NO_Z80
z80_handle_code_write(address & 0x1FFF, gen->z80);
#endif
return;
diff --git a/gen.c b/gen.c
new file mode 100644
index 0000000..8f1b5f5
--- /dev/null
+++ b/gen.c
@@ -0,0 +1,15 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include "gen.h"
+#include "mem.h"
+
+void init_code_info(code_info *code)
+{
+ size_t size = CODE_ALLOC_SIZE;
+ code->cur = alloc_code(&size);
+ if (!code->cur) {
+ fputs("Failed to allocate memory for generated code\n", stderr);
+ exit(1);
+ }
+ code->last = code->cur + size/sizeof(code_word) - RESERVE_WORDS;
+}
diff --git a/gen.h b/gen.h
new file mode 100644
index 0000000..5087b57
--- /dev/null
+++ b/gen.h
@@ -0,0 +1,33 @@
+#ifndef GEN_H_
+#define GEN_H_
+#include <stdint.h>
+
+#if defined(X86_64) || defined(X86_32)
+typedef uint8_t code_word;
+#define RESERVE_WORDS 5 //opcode + 4-byte displacement
+#else
+typedef uint32_t code_word;
+#define RESERVE_WORDS 4 //1 push + 1 ldr + 1bx + 1 constant
+#endif
+typedef code_word * code_ptr;
+#define CODE_ALLOC_SIZE (1024*1024)
+
+typedef struct {
+ code_ptr cur;
+ code_ptr last;
+} code_info;
+
+void check_alloc_code(code_info *code, uint32_t inst_size);
+
+void init_code_info(code_info *code);
+void call(code_info *code, code_ptr fun);
+void jmp(code_info *code, code_ptr dest);
+void jmp_r(code_info *code, uint8_t dst);
+//call a function and put the arguments in the appropriate place according to the host ABI
+void call_args(code_info *code, code_ptr fun, uint32_t num_args, ...);
+//like the above, but follows other aspects of the ABI like stack alignment
+void call_args_abi(code_info *code, code_ptr fun, uint32_t num_args, ...);
+void save_callee_save_regs(code_info *code);
+void restore_callee_save_regs(code_info *code);
+
+#endif //GEN_H_
diff --git a/gen_arm.c b/gen_arm.c
index 2693fc3..5d9fab6 100644
--- a/gen_arm.c
+++ b/gen_arm.c
@@ -57,10 +57,6 @@
#define REG 0u
-
-#define RESERVE_INSTRUCTIONS 4 //1 ldr + 1bx + 1 constant
-#define CODE_ALLOC_SIZE (1024*1024)
-
uint32_t make_immed(uint32_t val)
{
uint32_t rot_amount = 0;
@@ -74,17 +70,6 @@ uint32_t make_immed(uint32_t val)
return INVALID_IMMED;
}
-void init_code_info(code_info *code)
-{
- size_t size = CODE_ALLOC_SIZE;
- code->cur = alloc_code(&size);
- if (!code->cur) {
- fputs("Failed to allocate memory for generated code\n", stderr);
- exit(1);
- }
- code->last = code->cur + size/sizeof(uint32_t) - RESERVE_INSTRUCTIONS;
-}
-
void check_alloc_code(code_info *code)
{
if (code->cur == code->last) {
@@ -94,9 +79,9 @@ void check_alloc_code(code_info *code)
fputs("Failed to allocate memory for generated code\n", stderr);
exit(1);
}
- if (next_code = code->last + RESERVE_INSTRUCTIONS) {
+ if (next_code = code->last + RESERVE_WORDS) {
//new chunk is contiguous with the current one
- code->last = next_code + size/sizeof(uint32_t) - RESERVE_INSTRUCTIONS;
+ code->last = next_code + size/sizeof(code_word) - RESERVE_WORDS;
} else {
uint32_t * from = code->cur + 2;
if (next_code - from < 0x400000 || from - next_code <= 0x400000) {
@@ -115,7 +100,7 @@ void check_alloc_code(code_info *code)
}
//branch to address in r0
*from = CC_AL | OP_BX;
- code->last = next_code + size/sizeof(uint32_t) - RESERVE_INSTRUCTIONS;
+ code->last = next_code + size/sizeof(code_word) - RESERVE_WORDS;
//pop r0
*(next_code++) = CC_AL | POP;
code->cur = next_code;
diff --git a/gen_arm.h b/gen_arm.h
index cd351fc..749f78c 100644
--- a/gen_arm.h
+++ b/gen_arm.h
@@ -7,11 +7,7 @@
#define GEN_ARM_H_
#include <stdint.h>
-
-typedef struct {
- uint32_t *cur;
- uint32_t *last;
-} code_info;
+#include "gen.h"
#define SET_COND 0x100000u
#define NO_COND 0u
@@ -73,8 +69,6 @@ enum {
#define LR 0x4000
#define PC 0x8000
-void init_code_info(code_info *code);
-
uint32_t and(code_info *code, uint32_t dst, uint32_t src1, uint32_t src2, uint32_t set_cond);
uint32_t andi(code_info *code, uint32_t dst, uint32_t src1, uint32_t immed, uint32_t set_cond);
uint32_t and_cc(code_info *code, uint32_t dst, uint32_t src1, uint32_t src2, uint32_t cc, uint32_t set_cond);
diff --git a/gen_test_hv.s68 b/gen_test_hv.s68
new file mode 100644
index 0000000..0ee0c22
--- /dev/null
+++ b/gen_test_hv.s68
@@ -0,0 +1,631 @@
+ dc.l $0, start
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$10
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$20
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$30
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$40
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$50
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$60
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$70
+ dc.l int_4
+ dc.l empty_handler
+ dc.l int_6
+ dc.l empty_handler
+ ;$80
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$90
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$A0
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$B0
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$C0
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$D0
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$E0
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ ;$F0
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.l empty_handler
+ dc.b "SEGA"
+empty_handler:
+int_6:
+ rte
+int_4:
+ move.w (a2), d0
+ ori.w #$8000, d0
+ move.w d0, (a4)+
+ rte
+
+start:
+ lea $C00000, a0
+ lea $C00004, a1
+ move.w #$8104, (a1) ;Mode 5, everything turned off
+ move.w #$8004, (a1)
+ move.w #$8220, (a1) ;Scroll a table $8000
+ move.w #$8404, (a1) ;Scroll b table $8000
+ move.w #$8560, (a1) ;SAT table $C000
+ move.w #$8700, (a1) ;backdrop color 0
+ move.w #$8B00, (a1) ;full screen scroll
+ move.w #$8C81, (a1) ;40 cell mode, no interlace
+ move.w #$8C81, (mode).w
+ move.w #$8D00, (a1) ;hscroll table at 0
+ move.w #$8F02, (a1) ;autoinc 2
+ move.w #$9011, (a1) ;64x64 scroll size
+ move.l #$C0000000, (a1)
+ move.w #$000, (a0)
+ move.w #$EEE, (a0)
+
+ ;clear scroll table
+ move.l #$40000000, (a1)
+ move.l #0, (a0)
+
+ ;load tiles
+ move.l #$44000000, (a1)
+ lea font(pc), a2
+ move.w #((fontend-font)/4 - 1), d0
+tloop:
+ move.l (a2)+, (a0)
+ dbra d0, tloop
+
+
+
+ ;clear name table
+ move.l #$40000002, (a1)
+ moveq #32, d0
+ move.w #(64*64-1), d1
+ploop:
+ move.w d0, (a0)
+ dbra d1, ploop
+
+
+ lea $FF0000, a4
+ move.b #$40, (a4, 6)
+ move.w #$8144, (a1) ;enable display
+ move #$2300, sr
+
+ lea (4, a1), a2 ;hv counter line address
+ lea (2, a1), a3 ;second contro/status address
+
+ move.b #254, d0
+init_wait:
+ cmp.b (a2), d0
+ beq init_wait
+
+top:
+ move.b #254, d0
+ lea $FF0000, a4
+ move.w #$8F00, (a1) ;autoinc of 0
+ move.l #$40040000, (a1) ;unused VRAM address
+wait_active:
+ cmp.b (a2), d0
+ bne.s wait_active
+
+ move.l #$8A718014, (a1) ;enable Hints
+
+ ;sync to VDP by attempting to fill FIFO
+ ;being in vblank makes this a bit difficult
+
+ rept 8
+ move.l d0, (a0)
+ endr
+
+ ;sample data for vblank flag off
+ rept 82 ;two lines worth of move.l
+ move.l (a3), (a4)+
+ endr
+
+ move.l a4, a5 ;save end of first buffer
+
+ move.b (a2), d0
+wait_new_line:
+ cmp.b (a2), d0
+ beq.s wait_new_line
+
+ ;sync to VDP by filling FIFO
+ move.l d0, (a0)
+ move.l d0, (a0)
+ move.w d0, (a0)
+
+ ;sample data for line change HV value
+ rept 45 ;one line worth of move.l
+ move.l (a2), (a4)+
+ endr
+
+ move.l a4, usp ;save end of second buffer
+
+ moveq #$70, d0
+wait_hint_line:
+ cmp.b (a2), d0
+ bne.s wait_hint_line
+
+ ;sample data for line change HV value
+ rept 45 ;one line worth of move.l
+ move.l (a2), (a4)+
+ endr
+
+ move.l a4, a6
+
+ move.b #223, d0
+wait_inactive:
+ cmp.b (a2), d0
+ bne.s wait_inactive
+
+ ;sync to VDP by filling FIFO
+ move.l d0, (a0)
+ move.l d0, (a0)
+ move.w d0, (a0)
+
+ ;sample data for vblank on
+ rept 82 ;two lines worth of move.l
+ move.l (a3), (a4)+
+ endr
+
+ move.l #$8AFF8004, (a1) ;disable Hints
+
+ rsset $FFFF8000
+vblank_start_min rs.w 1
+vblank_start_max rs.w 1
+vblank_end_min rs.w 1
+vblank_end_max rs.w 1
+hblank_start_min rs.w 1
+hblank_start_max rs.w 1
+hblank_end_min rs.w 1
+hblank_end_max rs.w 1
+line_change_min rs.w 1
+line_change_max rs.w 1
+hint_min rs.w 1
+hint_max rs.w 1
+mode rs.w 1
+printed_hv_dump rs.b 1
+button_state rs.b 1
+
+ lea $FF0001, a4
+.loop:
+ btst.b #3, (a4)
+ beq.s found_vblank_off
+ move.w 1(a4), d6
+ addq #4, a4
+ bra.s .loop
+found_vblank_off:
+
+ move.w (vblank_end_max).w, d0
+ beq .new_max
+ cmp.w d0, d6
+ blo .no_new_max
+.new_max
+ move.w d6, (vblank_end_max).w
+.no_new_max:
+
+
+ move.w 1(a4), d6
+
+ move.w (vblank_end_min).w, d0
+ beq .new_min
+ cmp.w d0, d6
+ bhi .no_new_min
+.new_min
+ move.w d6, (vblank_end_min).w
+.no_new_min:
+
+ lea $FF0001, a4
+;first find a point where HBLANK is not set
+ bra.s .start
+.loop:
+ addq #4, a4
+.start
+ btst.b #2, (a4)
+ bne.s .loop
+
+;then find a point after that where it switches to on
+.loop2:
+ btst.b #2, (a4)
+ bne.s found_hblank_on
+ move.w 1(a4), d5
+ addq #4, a4
+ bra.s .loop2
+found_hblank_on:
+
+ move.w (hblank_start_max).w, d0
+ beq .new_max
+ cmp.w d0, d5
+ blo .no_new_max
+.new_max
+ move.w d5, (hblank_start_max).w
+.no_new_max:
+
+
+ move.w 1(a4), d5
+
+ move.w (hblank_start_min).w, d0
+ beq .new_min
+ cmp.w d0, d5
+ bhi .no_new_min
+.new_min
+ move.w d5, (hblank_start_min).w
+.no_new_min:
+
+;finally find a point after that where it switches back off
+.loop2:
+ btst.b #2, (a4)
+ beq.s found_hblank_off
+ move.w 1(a4), d5
+ addq #4, a4
+ bra.s .loop2
+found_hblank_off:
+
+ move.w (hblank_end_max).w, d0
+ beq .new_max
+ cmp.w d0, d5
+ blo .no_new_max
+.new_max
+ move.w d5, (hblank_end_max).w
+.no_new_max:
+
+
+ move.w 1(a4), d5
+
+ move.w (hblank_end_min).w, d0
+ beq .new_min
+ cmp.w d0, d5
+ bhi .no_new_min
+.new_min
+ move.w d5, (hblank_end_min).w
+.no_new_min:
+
+ move.l a5, a4 ;save line change buffer for later
+ move.b (a5), d0
+.loop
+ move.w (a5), d7
+ addq #2, a5
+ cmp.b (a5), d0
+ beq .loop
+found_line_change:
+
+ move.w (line_change_max).w, d0
+ beq .new_max
+ cmp.w d0, d7
+ blo .no_new_max
+.new_max
+ move.w d7, (line_change_max).w
+.no_new_max:
+
+ move.w (a5), d7
+
+ move.w (line_change_min).w, d0
+ beq .new_min
+ cmp.w d0, d7
+ bhi .no_new_min
+.new_min
+ move.w d7, (line_change_min).w
+.no_new_min:
+
+ addq #1, a6
+.loop:
+ btst.b #3, (a6)
+ bne.s found_vblank_on
+ move.w 1(a6), d5
+ addq #4, a6
+ bra.s .loop
+found_vblank_on:
+
+ move.w (vblank_start_max).w, d0
+ beq .new_max
+ cmp.w d0, d5
+ blo .no_new_max
+.new_max
+ move.w d5, (vblank_start_max).w
+.no_new_max:
+
+ move.w 1(a6), d5
+
+ move.w (vblank_start_min).w, d0
+ beq .new_min
+ cmp.b d0, d5
+ bhi .no_new_min
+.new_min
+ move.w d5, (vblank_start_min).w
+.no_new_min:
+
+ move usp, a5
+.loop:
+ btst.b #7, (a5)
+ bne.s found_hint
+ move.w (a5), d1
+ addq #2, a5
+ bra.s .loop
+found_hint:
+
+ move.w (hint_max).w, d0
+ beq .new_max
+ cmp.w d0, d1
+ blo .no_new_max
+.new_max
+ move.w d1, (hint_max).w
+.no_new_max:
+
+ move.w (a5), d1
+ and.w #$7FFF, d1
+
+ move.w (hint_min).w, d0
+ beq .new_min
+ cmp.b d0, d1
+ bhi .no_new_min
+.new_min
+ move.w d1, (hint_min).w
+.no_new_min:
+
+draw_info:
+ ;draw data
+ move.w #$8F02, (a1) ;autoinc of 2
+ move.l #$40840002, (a1)
+
+ moveq #0, d0
+ lea VBlankStart(pc), a6
+ bsr print_string
+
+
+ move.w (vblank_start_max), d0
+ moveq #0, d1
+ bsr print_hexw
+
+ move.w #32, (a0)
+ move.w d5, d0
+ bsr print_hexw
+
+ move.w #32, (a0)
+ move.w (vblank_start_min), d0
+ bsr print_hexw
+
+ moveq #0, d0
+ move.l #$41040002, (a1)
+ lea VBlankEnd(pc), a6
+ bsr print_string
+
+ ;max value before vblank end
+ moveq #0, d1
+ move.w (vblank_end_max), d0
+ bsr print_hexw
+
+ move.w #32, (a0)
+ move.w d6, d0
+ bsr print_hexw
+
+ ;min value after vblank end
+ move.w (vblank_end_min), d0
+ move.w #32, (a0)
+ bsr print_hexw
+
+ moveq #0, d0
+ move.l #$41840002, (a1)
+ lea LineChange(pc), a6
+ bsr print_string
+
+ move.w (line_change_max), d0
+ moveq #0, d1
+ bsr print_hexw
+
+ move.w #32, (a0)
+ move.w d7, d0
+ bsr print_hexw
+
+ move.w (line_change_min), d0
+ move.w #32, (a0)
+ bsr print_hexw
+
+ moveq #0, d0
+ move.l #$42040002, (a1)
+ lea HBlankStart(pc), a6
+ bsr print_string
+
+ move.w (hblank_start_max), d0
+ moveq #0, d1
+ bsr print_hexw
+
+ move.w (hblank_start_min), d0
+ move.w #32, (a0)
+ bsr print_hexw
+
+ moveq #0, d0
+ move.l #$42840002, (a1)
+ lea HBlankEnd(pc), a6
+ bsr print_string
+
+ move.w (hblank_end_max), d0
+ moveq #0, d1
+ bsr print_hexw
+
+ move.w (hblank_end_min), d0
+ move.w #32, (a0)
+ bsr print_hexw
+
+ moveq #0, d0
+ move.l #$43040002, (a1)
+ lea HInterrupt(pc), a6
+ bsr print_string
+
+ move.w (hint_max), d0
+ moveq #0, d1
+ bsr print_hexw
+
+ move.w (hint_min), d0
+ move.w #32, (a0)
+ bsr print_hexw
+
+ ;read pad
+ move.b #$40, $A10003
+ move.b $A10003, d0
+ move.b #$00, $A10003
+ and.b #$3f, d0
+ move.b $A10003, d1
+ and.b #$30, d1
+ lsl.b #2, d1
+ or.b d1, d0
+ not.b d0
+ move.b (button_state).w, d2
+ eor.b d0, d2
+ and.b d0, d2
+ move.b d2, d3 ;d3 contains newly pressed buttons, SACBRLDU
+ move.b d0, (button_state).w
+
+ btst.l #7, d3
+ beq not_pressed
+
+ moveq #0, d0
+ move.l d0, (vblank_start_min).w
+ move.l d0, (vblank_end_min).w
+ move.l d0, (hblank_start_min).w
+ move.l d0, (hblank_end_min).w
+ move.l d0, (line_change_min).w
+ move.l d0, (hint_min).w
+ move.b d0, (printed_hv_dump).w
+ move.w (mode).w, d0
+ eor.w #$81, d0
+ move.w d0, (mode).w
+ move.w d0, (a1)
+ bra top
+
+not_pressed
+
+ move.b (printed_hv_dump).w, d0
+ bne top
+ move.b #1, (printed_hv_dump).w
+
+ moveq #0, d1
+ moveq #89, d4
+ moveq #6, d5
+ move.l #$45820002, d6
+ move.l d6, (a1)
+
+print_loop:
+ dbra d5, .no_line_change
+ ;#$45820002
+ add.l #$00800000, d6
+ move.l d6, (a1)
+ moveq #5, d5
+.no_line_change
+ move.w #32, (a0)
+ move.w (a4)+, d0
+ bsr print_hexw
+ dbra d4, print_loop
+
+ add.l #$01020000, d6
+ move.l d6, (a1)
+ moveq #0, d0
+ lea Instructions(pc), a6
+ bsr print_string
+
+ bra top
+
+VBlankStart:
+ dc.b "VBlank Start: ", 0
+VBlankEnd:
+ dc.b "VBlank End: ", 0
+LineChange:
+ dc.b "Line Change: ", 0
+HBlankStart:
+ dc.b "HBlank Start: ", 0
+HBlankEnd:
+ dc.b "HBlank End: ", 0
+HInterrupt:
+ dc.b "HInterrupt: ", 0
+Instructions:
+ dc.b "Press Start to switch modes", 0
+
+ align 1
+;Prints a number in hex format
+;d0.w - number to print
+;d1.w - base tile attribute
+;a0 - VDP data port
+;
+;Clobbers: d2.l, d3.l
+;
+print_hexw:
+ moveq #3, d3
+.digitloop
+ rol.w #4, d0
+ moveq #$F, d2
+ and.b d0, d2
+ cmp.b #$A, d2
+ bge .hex
+ add.w #$30, d2
+ bra .makeattrib
+.hex
+ add.w #($41-$A), d2
+.makeattrib
+ add.w d1, d2
+ move.w d2, (a0)
+ dbra d3, .digitloop
+ rts
+
+;Prints a null terminated string
+;a6 - pointer to string
+;a0 - VDP data port
+;d0 - base tile attribute
+;
+;Clobbers: d1.w
+print_string:
+.loop
+ moveq #0, d1
+ move.b (a6)+, d1
+ beq .end
+ add.w d0, d1
+ move.w d1, (a0)
+ bra .loop
+.end
+ rts
+
+ align 1
+font:
+ incbin font.tiles
+fontend
+
diff --git a/gen_x86.c b/gen_x86.c
index 2ec4c00..cbae264 100644
--- a/gen_x86.c
+++ b/gen_x86.c
@@ -4,10 +4,12 @@
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#include "gen_x86.h"
-#include "68kinst.h"
+#include "mem.h"
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
#define REX_RM_FIELD 0x1
#define REX_SIB_FIELD 0x2
@@ -33,6 +35,7 @@
#define OP_TEST 0x84
#define OP_XCHG 0x86
#define OP_MOV 0x88
+#define PRE_XOP 0x8F
#define OP_XCHG_AX 0x90
#define OP_CDQ 0x99
#define OP_PUSHF 0x9C
@@ -127,8 +130,55 @@ enum {
X86_R15
} x86_regs_enc;
-uint8_t * x86_rr_sizedir(uint8_t * out, uint16_t opcode, uint8_t src, uint8_t dst, uint8_t size)
+void jmp_nocheck(code_info *code, code_ptr dest)
{
+ code_ptr out = code->cur;
+ ptrdiff_t disp = dest-(out+2);
+ if (disp <= 0x7F && disp >= -0x80) {
+ *(out++) = OP_JMP_BYTE;
+ *(out++) = disp;
+ } else {
+ disp = dest-(out+5);
+ if (disp <= 0x7FFFFFFF && disp >= -2147483648) {
+ *(out++) = OP_JMP;
+ *(out++) = disp;
+ disp >>= 8;
+ *(out++) = disp;
+ disp >>= 8;
+ *(out++) = disp;
+ disp >>= 8;
+ *(out++) = disp;
+ } else {
+ fprintf(stderr, "jmp: %p - %p = %lX\n", dest, out + 6, (long)disp);
+ exit(1);
+ }
+ }
+ code->cur = out;
+}
+
+void check_alloc_code(code_info *code, uint32_t inst_size)
+{
+ if (code->cur + inst_size > code->last) {
+ size_t size = CODE_ALLOC_SIZE;
+ code_ptr next_code = alloc_code(&size);
+ if (!next_code) {
+ fputs("Failed to allocate memory for generated code\n", stderr);
+ exit(1);
+ }
+ if (next_code != code->last + RESERVE_WORDS) {
+ //new chunk is not contiguous with the current one
+ jmp_nocheck(code, next_code);
+ code->cur = next_code;
+ code->last = next_code + size/sizeof(RESERVE_WORDS);
+ }
+ code->last = next_code + size/sizeof(code_word) - RESERVE_WORDS;
+ }
+}
+
+void x86_rr_sizedir(code_info *code, uint16_t opcode, uint8_t src, uint8_t dst, uint8_t size)
+{
+ check_alloc_code(code, 5);
+ code_ptr out = code->cur;
uint8_t tmp;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
@@ -175,11 +225,13 @@ uint8_t * x86_rr_sizedir(uint8_t * out, uint16_t opcode, uint8_t src, uint8_t ds
*(out++) = opcode;
}
*(out++) = MODE_REG_DIRECT | dst | (src << 3);
- return out;
+ code->cur = out;
}
-uint8_t * x86_rrdisp8_sizedir(uint8_t * out, uint16_t opcode, uint8_t reg, uint8_t base, int8_t disp, uint8_t size, uint8_t dir)
+void x86_rrdisp_sizedir(code_info *code, uint16_t opcode, uint8_t reg, uint8_t base, int32_t disp, uint8_t size, uint8_t dir)
{
+ check_alloc_code(code, 10);
+ code_ptr out = code->cur;
//TODO: Deal with the fact that AH, BH, CH and DH can only be in the R/M param when there's a REX prefix
uint8_t tmp;
if (size == SZ_W) {
@@ -218,69 +270,28 @@ uint8_t * x86_rrdisp8_sizedir(uint8_t * out, uint16_t opcode, uint8_t reg, uint8
} else {
*(out++) = opcode;
}
+ if (disp < 128 && disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | base | (reg << 3);
- if (base == RSP) {
- //add SIB byte, with no index and RSP as base
- *(out++) = (RSP << 3) | RSP;
- }
- *(out++) = disp;
- return out;
-}
-
-uint8_t * x86_rrdisp32_sizedir(uint8_t * out, uint16_t opcode, uint8_t reg, uint8_t base, int32_t disp, uint8_t size, uint8_t dir)
-{
- //TODO: Deal with the fact that AH, BH, CH and DH can only be in the R/M param when there's a REX prefix
- uint8_t tmp;
- if (size == SZ_W) {
- *(out++) = PRE_SIZE;
- }
- if (size == SZ_Q || reg >= R8 || base >= R8 || (size == SZ_B && reg >= RSP && reg <= RDI)) {
- *out = PRE_REX;
- if (reg >= AH && reg <= BH) {
- fprintf(stderr, "attempt to use *H reg in an instruction requiring REX prefix. opcode = %X\n", opcode);
- exit(1);
- }
- if (size == SZ_Q) {
- *out |= REX_QUAD;
- }
- if (reg >= R8) {
- *out |= REX_REG_FIELD;
- reg -= (R8 - X86_R8);
- }
- if (base >= R8) {
- *out |= REX_RM_FIELD;
- base -= (R8 - X86_R8);
- }
- out++;
- }
- if (size == SZ_B) {
- if (reg >= AH && reg <= BH) {
- reg -= (AH-X86_AH);
- }
- } else {
- opcode |= BIT_SIZE;
- }
- opcode |= dir;
- if (opcode >= 0x100) {
- *(out++) = opcode >> 8;
- *(out++) = opcode;
} else {
- *(out++) = opcode;
+ *(out++) = MODE_REG_DISPLACE32 | base | (reg << 3);
}
- *(out++) = MODE_REG_DISPLACE32 | base | (reg << 3);
if (base == RSP) {
//add SIB byte, with no index and RSP as base
*(out++) = (RSP << 3) | RSP;
}
*(out++) = disp;
+ if (disp >= 128 || disp < -128) {
*(out++) = disp >> 8;
*(out++) = disp >> 16;
*(out++) = disp >> 24;
- return out;
+ }
+ code->cur = out;
}
-uint8_t * x86_rrind_sizedir(uint8_t * out, uint8_t opcode, uint8_t reg, uint8_t base, uint8_t size, uint8_t dir)
+void x86_rrind_sizedir(code_info *code, uint8_t opcode, uint8_t reg, uint8_t base, uint8_t size, uint8_t dir)
{
+ check_alloc_code(code, 5);
+ code_ptr out = code->cur;
//TODO: Deal with the fact that AH, BH, CH and DH can only be in the R/M param when there's a REX prefix
uint8_t tmp;
if (size == SZ_W) {
@@ -313,16 +324,25 @@ uint8_t * x86_rrind_sizedir(uint8_t * out, uint8_t opcode, uint8_t reg, uint8_t
opcode |= BIT_SIZE;
}
*(out++) = opcode | dir;
+ if (base == RBP) {
+ //add a dummy 8-bit displacement since MODE_REG_INDIRECT with
+ //an R/M field of RBP selects RIP, relative addressing
+ *(out++) = MODE_REG_DISPLACE8 | base | (reg << 3);
+ *(out++) = 0;
+ } else {
*(out++) = MODE_REG_INDIRECT | base | (reg << 3);
if (base == RSP) {
//add SIB byte, with no index and RSP as base
*(out++) = (RSP << 3) | RSP;
}
- return out;
+ }
+ code->cur = out;
}
-uint8_t * x86_rrindex_sizedir(uint8_t * out, uint8_t opcode, uint8_t reg, uint8_t base, uint8_t index, uint8_t scale, uint8_t size, uint8_t dir)
+void x86_rrindex_sizedir(code_info *code, uint8_t opcode, uint8_t reg, uint8_t base, uint8_t index, uint8_t scale, uint8_t size, uint8_t dir)
{
+ check_alloc_code(code, 5);
+ code_ptr out = code->cur;
//TODO: Deal with the fact that AH, BH, CH and DH can only be in the R/M param when there's a REX prefix
uint8_t tmp;
if (size == SZ_W) {
@@ -361,14 +381,20 @@ uint8_t * x86_rrindex_sizedir(uint8_t * out, uint8_t opcode, uint8_t reg, uint8_
*(out++) = opcode | dir;
*(out++) = MODE_REG_INDIRECT | RSP | (reg << 3);
if (scale == 4) {
- scale = 3;
+ scale = 2;
+ } else if(scale == 8) {
+ scale = 3;
+ } else {
+ scale--;
}
*(out++) = scale << 6 | (index << 3) | base;
- return out;
+ code->cur = out;
}
-uint8_t * x86_r_size(uint8_t * out, uint8_t opcode, uint8_t opex, uint8_t dst, uint8_t size)
+void x86_r_size(code_info *code, uint8_t opcode, uint8_t opex, uint8_t dst, uint8_t size)
{
+ check_alloc_code(code, 4);
+ code_ptr out = code->cur;
uint8_t tmp;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
@@ -397,11 +423,13 @@ uint8_t * x86_r_size(uint8_t * out, uint8_t opcode, uint8_t opex, uint8_t dst, u
}
*(out++) = opcode;
*(out++) = MODE_REG_DIRECT | dst | (opex << 3);
- return out;
+ code->cur = out;
}
-uint8_t * x86_rdisp8_size(uint8_t * out, uint8_t opcode, uint8_t opex, uint8_t dst, int8_t disp, uint8_t size)
+void x86_rdisp_size(code_info *code, uint8_t opcode, uint8_t opex, uint8_t dst, int32_t disp, uint8_t size)
{
+ check_alloc_code(code, 7);
+ code_ptr out = code->cur;
uint8_t tmp;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
@@ -421,13 +449,23 @@ uint8_t * x86_rdisp8_size(uint8_t * out, uint8_t opcode, uint8_t opex, uint8_t d
opcode |= BIT_SIZE;
}
*(out++) = opcode;
+ if (disp < 128 && disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | dst | (opex << 3);
*(out++) = disp;
- return out;
+ } else {
+ *(out++) = MODE_REG_DISPLACE32 | dst | (opex << 3);
+ *(out++) = disp;
+ *(out++) = disp >> 8;
+ *(out++) = disp >> 16;
+ *(out++) = disp >> 24;
+ }
+ code->cur = out;
}
-uint8_t * x86_ir(uint8_t * out, uint8_t opcode, uint8_t op_ex, uint8_t al_opcode, int32_t val, uint8_t dst, uint8_t size)
+void x86_ir(code_info *code, uint8_t opcode, uint8_t op_ex, uint8_t al_opcode, int32_t val, uint8_t dst, uint8_t size)
{
+ check_alloc_code(code, 8);
+ code_ptr out = code->cur;
uint8_t sign_extend = 0;
if (opcode != OP_NOT_NEG && (size == SZ_D || size == SZ_Q) && val <= 0x7F && val >= -0x80) {
sign_extend = 1;
@@ -476,11 +514,13 @@ uint8_t * x86_ir(uint8_t * out, uint8_t opcode, uint8_t op_ex, uint8_t al_opcode
*(out++) = val;
}
}
- return out;
+ code->cur = out;
}
-uint8_t * x86_irdisp8(uint8_t * out, uint8_t opcode, uint8_t op_ex, int32_t val, uint8_t dst, int8_t disp, uint8_t size)
+void x86_irdisp(code_info *code, uint8_t opcode, uint8_t op_ex, int32_t val, uint8_t dst, int32_t disp, uint8_t size)
{
+ check_alloc_code(code, 12);
+ code_ptr out = code->cur;
uint8_t sign_extend = 0;
if ((size == SZ_D || size == SZ_Q) && val <= 0x7F && val >= -0x80) {
sign_extend = 1;
@@ -505,48 +545,10 @@ uint8_t * x86_irdisp8(uint8_t * out, uint8_t opcode, uint8_t op_ex, int32_t val,
opcode |= BIT_SIZE;
}
*(out++) = opcode;
+ if (disp < 128 && disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | dst | (op_ex << 3);
*(out++) = disp;
- *(out++) = val;
- if (size != SZ_B && !sign_extend) {
- val >>= 8;
- *(out++) = val;
- if (size != SZ_W) {
- val >>= 8;
- *(out++) = val;
- val >>= 8;
- *(out++) = val;
- }
- }
- return out;
-}
-
-uint8_t * x86_irdisp32(uint8_t * out, uint8_t opcode, uint8_t op_ex, int32_t val, uint8_t dst, int32_t disp, uint8_t size)
-{
- uint8_t sign_extend = 0;
- if ((size == SZ_D || size == SZ_Q) && val <= 0x7F && val >= -0x80) {
- sign_extend = 1;
- opcode |= BIT_DIR;
- }
- if (size == SZ_W) {
- *(out++) = PRE_SIZE;
- }
-
- if (size == SZ_Q || dst >= R8) {
- *out = PRE_REX;
- if (size == SZ_Q) {
- *out |= REX_QUAD;
- }
- if (dst >= R8) {
- *out |= REX_RM_FIELD;
- dst -= (R8 - X86_R8);
- }
- out++;
- }
- if (size != SZ_B) {
- opcode |= BIT_SIZE;
- }
- *(out++) = opcode;
+ } else {
*(out++) = MODE_REG_DISPLACE32 | dst | (op_ex << 3);
*(out++) = disp;
disp >>= 8;
@@ -555,6 +557,7 @@ uint8_t * x86_irdisp32(uint8_t * out, uint8_t opcode, uint8_t op_ex, int32_t val
*(out++) = disp;
disp >>= 8;
*(out++) = disp;
+ }
*(out++) = val;
if (size != SZ_B && !sign_extend) {
val >>= 8;
@@ -566,12 +569,13 @@ uint8_t * x86_irdisp32(uint8_t * out, uint8_t opcode, uint8_t op_ex, int32_t val
*(out++) = val;
}
}
- return out;
+ code->cur = out;
}
-
-uint8_t * x86_shiftrot_ir(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_t dst, uint8_t size)
+void x86_shiftrot_ir(code_info *code, uint8_t op_ex, uint8_t val, uint8_t dst, uint8_t size)
{
+ check_alloc_code(code, 5);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -595,11 +599,13 @@ uint8_t * x86_shiftrot_ir(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_t dst
if (val != 1) {
*(out++) = val;
}
- return out;
+ code->cur = out;
}
-uint8_t * x86_shiftrot_irdisp8(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_t dst, int8_t disp, uint8_t size)
+void x86_shiftrot_irdisp(code_info *code, uint8_t op_ex, uint8_t val, uint8_t dst, int32_t disp, uint8_t size)
{
+ check_alloc_code(code, 9);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -619,16 +625,26 @@ uint8_t * x86_shiftrot_irdisp8(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_
}
*(out++) = (val == 1 ? OP_SHIFTROT_1: OP_SHIFTROT_IR) | (size == SZ_B ? 0 : BIT_SIZE);
+ if (disp < 128 && disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | dst | (op_ex << 3);
*(out++) = disp;
+ } else {
+ *(out++) = MODE_REG_DISPLACE32 | dst | (op_ex << 3);
+ *(out++) = disp;
+ *(out++) = disp >> 8;
+ *(out++) = disp >> 16;
+ *(out++) = disp >> 24;
+ }
if (val != 1) {
*(out++) = val;
}
- return out;
+ code->cur = out;
}
-uint8_t * x86_shiftrot_clr(uint8_t * out, uint8_t op_ex, uint8_t dst, uint8_t size)
+void x86_shiftrot_clr(code_info *code, uint8_t op_ex, uint8_t dst, uint8_t size)
{
+ check_alloc_code(code, 4);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -649,11 +665,13 @@ uint8_t * x86_shiftrot_clr(uint8_t * out, uint8_t op_ex, uint8_t dst, uint8_t si
*(out++) = OP_SHIFTROT_CL | (size == SZ_B ? 0 : BIT_SIZE);
*(out++) = MODE_REG_DIRECT | dst | (op_ex << 3);
- return out;
+ code->cur = out;
}
-uint8_t * x86_shiftrot_clrdisp8(uint8_t * out, uint8_t op_ex, uint8_t dst, int8_t disp, uint8_t size)
+void x86_shiftrot_clrdisp(code_info *code, uint8_t op_ex, uint8_t dst, int32_t disp, uint8_t size)
{
+ check_alloc_code(code, 8);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -673,497 +691,492 @@ uint8_t * x86_shiftrot_clrdisp8(uint8_t * out, uint8_t op_ex, uint8_t dst, int8_
}
*(out++) = OP_SHIFTROT_CL | (size == SZ_B ? 0 : BIT_SIZE);
+ if (disp < 128 && disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | dst | (op_ex << 3);
*(out++) = disp;
- return out;
-}
-
-uint8_t * rol_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
-{
- return x86_shiftrot_ir(out, OP_EX_ROL, val, dst, size);
+ } else {
+ *(out++) = MODE_REG_DISPLACE32 | dst | (op_ex << 3);
+ *(out++) = disp;
+ *(out++) = disp >> 8;
+ *(out++) = disp >> 16;
+ *(out++) = disp >> 24;
}
-
-uint8_t * ror_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
-{
- return x86_shiftrot_ir(out, OP_EX_ROR, val, dst, size);
+ code->cur = out;
}
-uint8_t * rcl_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+void rol_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_ir(out, OP_EX_RCL, val, dst, size);
+ x86_shiftrot_ir(code, OP_EX_ROL, val, dst, size);
}
-uint8_t * rcr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+void ror_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_ir(out, OP_EX_RCR, val, dst, size);
+ x86_shiftrot_ir(code, OP_EX_ROR, val, dst, size);
}
-uint8_t * shl_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+void rcl_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_ir(out, OP_EX_SHL, val, dst, size);
+ x86_shiftrot_ir(code, OP_EX_RCL, val, dst, size);
}
-uint8_t * shr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+void rcr_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_ir(out, OP_EX_SHR, val, dst, size);
+ x86_shiftrot_ir(code, OP_EX_RCR, val, dst, size);
}
-uint8_t * sar_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+void shl_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_ir(out, OP_EX_SAR, val, dst, size);
+ x86_shiftrot_ir(code, OP_EX_SHL, val, dst, size);
}
-uint8_t * rol_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void shr_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_irdisp8(out, OP_EX_ROL, val, dst_base, disp, size);
+ x86_shiftrot_ir(code, OP_EX_SHR, val, dst, size);
}
-uint8_t * ror_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void sar_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_irdisp8(out, OP_EX_ROR, val, dst_base, disp, size);
+ x86_shiftrot_ir(code, OP_EX_SAR, val, dst, size);
}
-uint8_t * rcl_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void rol_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_irdisp8(out, OP_EX_RCL, val, dst_base, disp, size);
+ x86_shiftrot_irdisp(code, OP_EX_ROL, val, dst_base, disp, size);
}
-uint8_t * rcr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void ror_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_irdisp8(out, OP_EX_RCR, val, dst_base, disp, size);
+ x86_shiftrot_irdisp(code, OP_EX_ROR, val, dst_base, disp, size);
}
-uint8_t * shl_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void rcl_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_irdisp8(out, OP_EX_SHL, val, dst_base, disp, size);
+ x86_shiftrot_irdisp(code, OP_EX_RCL, val, dst_base, disp, size);
}
-uint8_t * shr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void rcr_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_irdisp8(out, OP_EX_SHR, val, dst_base, disp, size);
+ x86_shiftrot_irdisp(code, OP_EX_RCR, val, dst_base, disp, size);
}
-uint8_t * sar_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void shl_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_irdisp8(out, OP_EX_SAR, val, dst_base, disp, size);
+ x86_shiftrot_irdisp(code, OP_EX_SHL, val, dst_base, disp, size);
}
-uint8_t * rol_clr(uint8_t * out, uint8_t dst, uint8_t size)
+void shr_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_clr(out, OP_EX_ROL, dst, size);
+ x86_shiftrot_irdisp(code, OP_EX_SHR, val, dst_base, disp, size);
}
-uint8_t * ror_clr(uint8_t * out, uint8_t dst, uint8_t size)
+void sar_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_clr(out, OP_EX_ROR, dst, size);
+ x86_shiftrot_irdisp(code, OP_EX_SAR, val, dst_base, disp, size);
}
-uint8_t * rcl_clr(uint8_t * out, uint8_t dst, uint8_t size)
+void rol_clr(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_clr(out, OP_EX_RCL, dst, size);
+ x86_shiftrot_clr(code, OP_EX_ROL, dst, size);
}
-uint8_t * rcr_clr(uint8_t * out, uint8_t dst, uint8_t size)
+void ror_clr(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_clr(out, OP_EX_RCR, dst, size);
+ x86_shiftrot_clr(code, OP_EX_ROR, dst, size);
}
-uint8_t * shl_clr(uint8_t * out, uint8_t dst, uint8_t size)
+void rcl_clr(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_clr(out, OP_EX_SHL, dst, size);
+ x86_shiftrot_clr(code, OP_EX_RCL, dst, size);
}
-uint8_t * shr_clr(uint8_t * out, uint8_t dst, uint8_t size)
+void rcr_clr(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_clr(out, OP_EX_SHR, dst, size);
+ x86_shiftrot_clr(code, OP_EX_RCR, dst, size);
}
-uint8_t * sar_clr(uint8_t * out, uint8_t dst, uint8_t size)
+void shl_clr(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_clr(out, OP_EX_SAR, dst, size);
+ x86_shiftrot_clr(code, OP_EX_SHL, dst, size);
}
-uint8_t * rol_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void shr_clr(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_clrdisp8(out, OP_EX_ROL, dst_base, disp, size);
+ x86_shiftrot_clr(code, OP_EX_SHR, dst, size);
}
-uint8_t * ror_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void sar_clr(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_shiftrot_clrdisp8(out, OP_EX_ROR, dst_base, disp, size);
+ x86_shiftrot_clr(code, OP_EX_SAR, dst, size);
}
-uint8_t * rcl_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void rol_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_clrdisp8(out, OP_EX_RCL, dst_base, disp, size);
+ x86_shiftrot_clrdisp(code, OP_EX_ROL, dst_base, disp, size);
}
-uint8_t * rcr_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void ror_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_clrdisp8(out, OP_EX_RCR, dst_base, disp, size);
+ x86_shiftrot_clrdisp(code, OP_EX_ROR, dst_base, disp, size);
}
-uint8_t * shl_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void rcl_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_clrdisp8(out, OP_EX_SHL, dst_base, disp, size);
+ x86_shiftrot_clrdisp(code, OP_EX_RCL, dst_base, disp, size);
}
-uint8_t * shr_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void rcr_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_clrdisp8(out, OP_EX_SHR, dst_base, disp, size);
+ x86_shiftrot_clrdisp(code, OP_EX_RCR, dst_base, disp, size);
}
-uint8_t * sar_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void shl_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_shiftrot_clrdisp8(out, OP_EX_SAR, dst_base, disp, size);
+ x86_shiftrot_clrdisp(code, OP_EX_SHL, dst_base, disp, size);
}
-uint8_t * add_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void shr_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rr_sizedir(out, OP_ADD, src, dst, size);
+ x86_shiftrot_clrdisp(code, OP_EX_SHR, dst_base, disp, size);
}
-uint8_t * add_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
+void sar_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_ir(out, OP_IMMED_ARITH, OP_EX_ADDI, OP_ADD, val, dst, size);
+ x86_shiftrot_clrdisp(code, OP_EX_SAR, dst_base, disp, size);
}
-uint8_t * add_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void add_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_ADDI, val, dst_base, disp, size);
+ x86_rr_sizedir(code, OP_ADD, src, dst, size);
}
-uint8_t * add_irdisp32(uint8_t * out, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
+void add_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
- return x86_irdisp32(out, OP_IMMED_ARITH, OP_EX_ADDI, val, dst_base, disp, size);
+ x86_ir(code, OP_IMMED_ARITH, OP_EX_ADDI, OP_ADD, val, dst, size);
}
-uint8_t * add_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void add_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_ADD, src, dst_base, disp, size, 0);
+ x86_irdisp(code, OP_IMMED_ARITH, OP_EX_ADDI, val, dst_base, disp, size);
}
-uint8_t * add_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void add_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_ADD, dst, src_base, disp, size, BIT_DIR);
+ x86_rrdisp_sizedir(code, OP_ADD, src, dst_base, disp, size, 0);
}
-uint8_t * adc_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void add_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_rr_sizedir(out, OP_ADC, src, dst, size);
+ x86_rrdisp_sizedir(code, OP_ADD, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * adc_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
+void adc_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_ir(out, OP_IMMED_ARITH, OP_EX_ADCI, OP_ADC, val, dst, size);
+ x86_rr_sizedir(code, OP_ADC, src, dst, size);
}
-uint8_t * adc_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void adc_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
- return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_ADCI, val, dst_base, disp, size);
+ x86_ir(code, OP_IMMED_ARITH, OP_EX_ADCI, OP_ADC, val, dst, size);
}
-uint8_t * adc_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void adc_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_ADC, src, dst_base, disp, size, 0);
+ x86_irdisp(code, OP_IMMED_ARITH, OP_EX_ADCI, val, dst_base, disp, size);
}
-uint8_t * adc_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void adc_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_ADC, dst, src_base, disp, size, BIT_DIR);
+ x86_rrdisp_sizedir(code, OP_ADC, src, dst_base, disp, size, 0);
}
-uint8_t * or_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void adc_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_rr_sizedir(out, OP_OR, src, dst, size);
-}
-uint8_t * or_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
-{
- return x86_ir(out, OP_IMMED_ARITH, OP_EX_ORI, OP_OR, val, dst, size);
+ x86_rrdisp_sizedir(code, OP_ADC, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * or_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void or_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_ORI, val, dst_base, disp, size);
+ x86_rr_sizedir(code, OP_OR, src, dst, size);
}
-
-uint8_t * or_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void or_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_OR, src, dst_base, disp, size, 0);
+ x86_ir(code, OP_IMMED_ARITH, OP_EX_ORI, OP_OR, val, dst, size);
}
-uint8_t * or_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void or_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_OR, dst, src_base, disp, size, BIT_DIR);
+ x86_irdisp(code, OP_IMMED_ARITH, OP_EX_ORI, val, dst_base, disp, size);
}
-uint8_t * and_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void or_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rr_sizedir(out, OP_AND, src, dst, size);
+ x86_rrdisp_sizedir(code, OP_OR, src, dst_base, disp, size, 0);
}
-uint8_t * and_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
+void or_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_ir(out, OP_IMMED_ARITH, OP_EX_ANDI, OP_AND, val, dst, size);
+ x86_rrdisp_sizedir(code, OP_OR, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * and_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void and_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_ANDI, val, dst_base, disp, size);
+ x86_rr_sizedir(code, OP_AND, src, dst, size);
}
-uint8_t * and_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void and_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_AND, src, dst_base, disp, size, 0);
+ x86_ir(code, OP_IMMED_ARITH, OP_EX_ANDI, OP_AND, val, dst, size);
}
-uint8_t * and_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void and_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_AND, dst, src_base, disp, size, BIT_DIR);
+ x86_irdisp(code, OP_IMMED_ARITH, OP_EX_ANDI, val, dst_base, disp, size);
}
-uint8_t * xor_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void and_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rr_sizedir(out, OP_XOR, src, dst, size);
+ x86_rrdisp_sizedir(code, OP_AND, src, dst_base, disp, size, 0);
}
-uint8_t * xor_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
+void and_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_ir(out, OP_IMMED_ARITH, OP_EX_XORI, OP_XOR, val, dst, size);
+ x86_rrdisp_sizedir(code, OP_AND, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * xor_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void xor_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_XORI, val, dst_base, disp, size);
+ x86_rr_sizedir(code, OP_XOR, src, dst, size);
}
-uint8_t * xor_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void xor_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_XOR, src, dst_base, disp, size, 0);
+ x86_ir(code, OP_IMMED_ARITH, OP_EX_XORI, OP_XOR, val, dst, size);
}
-uint8_t * xor_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void xor_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_XOR, dst, src_base, disp, size, BIT_DIR);
+ x86_irdisp(code, OP_IMMED_ARITH, OP_EX_XORI, val, dst_base, disp, size);
}
-uint8_t * sub_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void xor_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rr_sizedir(out, OP_SUB, src, dst, size);
+ x86_rrdisp_sizedir(code, OP_XOR, src, dst_base, disp, size, 0);
}
-uint8_t * sub_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
+void xor_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_ir(out, OP_IMMED_ARITH, OP_EX_SUBI, OP_SUB, val, dst, size);
+ x86_rrdisp_sizedir(code, OP_XOR, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * sub_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void sub_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_SUBI, val, dst_base, disp, size);
+ x86_rr_sizedir(code, OP_SUB, src, dst, size);
}
-uint8_t * sub_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void sub_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_SUB, src, dst_base, disp, size, 0);
+ x86_ir(code, OP_IMMED_ARITH, OP_EX_SUBI, OP_SUB, val, dst, size);
}
-uint8_t * sub_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void sub_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_SUB, dst, src_base, disp, size, BIT_DIR);
+ x86_irdisp(code, OP_IMMED_ARITH, OP_EX_SUBI, val, dst_base, disp, size);
}
-uint8_t * sbb_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void sub_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rr_sizedir(out, OP_SBB, src, dst, size);
+ x86_rrdisp_sizedir(code, OP_SUB, src, dst_base, disp, size, 0);
}
-uint8_t * sbb_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
+void sub_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_ir(out, OP_IMMED_ARITH, OP_EX_SBBI, OP_SBB, val, dst, size);
+ x86_rrdisp_sizedir(code, OP_SUB, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * sbb_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void sbb_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_SBBI, val, dst_base, disp, size);
+ x86_rr_sizedir(code, OP_SBB, src, dst, size);
}
-uint8_t * sbb_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void sbb_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_SBB, src, dst_base, disp, size, 0);
+ x86_ir(code, OP_IMMED_ARITH, OP_EX_SBBI, OP_SBB, val, dst, size);
}
-uint8_t * sbb_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void sbb_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_SBB, dst, src_base, disp, size, BIT_DIR);
+ x86_irdisp(code, OP_IMMED_ARITH, OP_EX_SBBI, val, dst_base, disp, size);
}
-uint8_t * cmp_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void sbb_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rr_sizedir(out, OP_CMP, src, dst, size);
+ x86_rrdisp_sizedir(code, OP_SBB, src, dst_base, disp, size, 0);
}
-uint8_t * cmp_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
+void sbb_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_ir(out, OP_IMMED_ARITH, OP_EX_CMPI, OP_CMP, val, dst, size);
+ x86_rrdisp_sizedir(code, OP_SBB, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * cmp_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void cmp_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_irdisp8(out, OP_IMMED_ARITH, OP_EX_CMPI, val, dst_base, disp, size);
+ x86_rr_sizedir(code, OP_CMP, src, dst, size);
}
-uint8_t * cmp_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void cmp_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_CMP, src, dst_base, disp, size, 0);
+ x86_ir(code, OP_IMMED_ARITH, OP_EX_CMPI, OP_CMP, val, dst, size);
}
-uint8_t * cmp_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void cmp_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_CMP, dst, src_base, disp, size, BIT_DIR);
+ x86_irdisp(code, OP_IMMED_ARITH, OP_EX_CMPI, val, dst_base, disp, size);
}
-uint8_t * test_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void cmp_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rr_sizedir(out, OP_TEST, src, dst, size);
+ x86_rrdisp_sizedir(code, OP_CMP, src, dst_base, disp, size, 0);
}
-uint8_t * test_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
+void cmp_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_ir(out, OP_NOT_NEG, OP_EX_TEST_I, OP_TEST, val, dst, size);
+ x86_rrdisp_sizedir(code, OP_CMP, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * test_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size)
+void test_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_irdisp8(out, OP_NOT_NEG, OP_EX_TEST_I, val, dst_base, disp, size);
+ x86_rr_sizedir(code, OP_TEST, src, dst, size);
}
-uint8_t * test_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void test_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_TEST, src, dst_base, disp, size, 0);
+ x86_ir(code, OP_NOT_NEG, OP_EX_TEST_I, OP_TEST, val, dst, size);
}
-uint8_t * test_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void test_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_TEST, dst, src_base, disp, size, BIT_DIR);
+ x86_irdisp(code, OP_NOT_NEG, OP_EX_TEST_I, val, dst_base, disp, size);
}
-uint8_t * imul_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void test_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rr_sizedir(out, OP2_IMUL | (PRE_2BYTE << 8), dst, src, size);
+ x86_rrdisp_sizedir(code, OP_TEST, src, dst_base, disp, size, 0);
}
-uint8_t * imul_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void test_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP2_IMUL | (PRE_2BYTE << 8), dst, src_base, disp, size, 0);
+ x86_rrdisp_sizedir(code, OP_TEST, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * not_r(uint8_t * out, uint8_t dst, uint8_t size)
+void imul_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_r_size(out, OP_NOT_NEG, OP_EX_NOT, dst, size);
+ x86_rr_sizedir(code, OP2_IMUL | (PRE_2BYTE << 8), dst, src, size);
}
-uint8_t * neg_r(uint8_t * out, uint8_t dst, uint8_t size)
+void imul_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_r_size(out, OP_NOT_NEG, OP_EX_NEG, dst, size);
+ x86_rrdisp_sizedir(code, OP2_IMUL | (PRE_2BYTE << 8), dst, src_base, disp, size, 0);
}
-uint8_t * not_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void not_r(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_rdisp8_size(out, OP_NOT_NEG, OP_EX_NOT, dst_base, disp, size);
+ x86_r_size(code, OP_NOT_NEG, OP_EX_NOT, dst, size);
}
-uint8_t * neg_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void neg_r(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_rdisp8_size(out, OP_NOT_NEG, OP_EX_NEG, dst_base, disp, size);
+ x86_r_size(code, OP_NOT_NEG, OP_EX_NEG, dst, size);
}
-uint8_t * mul_r(uint8_t * out, uint8_t dst, uint8_t size)
+void not_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_r_size(out, OP_NOT_NEG, OP_EX_MUL, dst, size);
+ x86_rdisp_size(code, OP_NOT_NEG, OP_EX_NOT, dst_base, disp, size);
}
-uint8_t * imul_r(uint8_t * out, uint8_t dst, uint8_t size)
+void neg_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_r_size(out, OP_NOT_NEG, OP_EX_IMUL, dst, size);
+ x86_rdisp_size(code, OP_NOT_NEG, OP_EX_NEG, dst_base, disp, size);
}
-uint8_t * div_r(uint8_t * out, uint8_t dst, uint8_t size)
+void mul_r(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_r_size(out, OP_NOT_NEG, OP_EX_DIV, dst, size);
+ x86_r_size(code, OP_NOT_NEG, OP_EX_MUL, dst, size);
}
-uint8_t * idiv_r(uint8_t * out, uint8_t dst, uint8_t size)
+void imul_r(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_r_size(out, OP_NOT_NEG, OP_EX_IDIV, dst, size);
+ x86_r_size(code, OP_NOT_NEG, OP_EX_IMUL, dst, size);
}
-uint8_t * mul_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void div_r(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_rdisp8_size(out, OP_NOT_NEG, OP_EX_MUL, dst_base, disp, size);
+ x86_r_size(code, OP_NOT_NEG, OP_EX_DIV, dst, size);
}
-uint8_t * imul_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void idiv_r(code_info *code, uint8_t dst, uint8_t size)
{
- return x86_rdisp8_size(out, OP_NOT_NEG, OP_EX_IMUL, dst_base, disp, size);
+ x86_r_size(code, OP_NOT_NEG, OP_EX_IDIV, dst, size);
}
-uint8_t * div_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void mul_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rdisp8_size(out, OP_NOT_NEG, OP_EX_DIV, dst_base, disp, size);
+ x86_rdisp_size(code, OP_NOT_NEG, OP_EX_MUL, dst_base, disp, size);
}
-uint8_t * idiv_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size)
+void imul_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rdisp8_size(out, OP_NOT_NEG, OP_EX_IDIV, dst_base, disp, size);
+ x86_rdisp_size(code, OP_NOT_NEG, OP_EX_IMUL, dst_base, disp, size);
}
-uint8_t * mov_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void div_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rr_sizedir(out, OP_MOV, src, dst, size);
+ x86_rdisp_size(code, OP_NOT_NEG, OP_EX_DIV, dst_base, disp, size);
}
-uint8_t * mov_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size)
+void idiv_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_MOV, src, dst_base, disp, size, 0);
+ x86_rdisp_size(code, OP_NOT_NEG, OP_EX_IDIV, dst_base, disp, size);
}
-uint8_t * mov_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size)
+void mov_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_rrdisp8_sizedir(out, OP_MOV, dst, src_base, disp, size, BIT_DIR);
+ x86_rr_sizedir(code, OP_MOV, src, dst, size);
}
-uint8_t * mov_rrdisp32(uint8_t * out, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
+void mov_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size)
{
- return x86_rrdisp32_sizedir(out, OP_MOV, src, dst_base, disp, size, 0);
+ x86_rrdisp_sizedir(code, OP_MOV, src, dst_base, disp, size, 0);
}
-uint8_t * mov_rdisp32r(uint8_t * out, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
+void mov_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size)
{
- return x86_rrdisp32_sizedir(out, OP_MOV, dst, src_base, disp, size, BIT_DIR);
+ x86_rrdisp_sizedir(code, OP_MOV, dst, src_base, disp, size, BIT_DIR);
}
-uint8_t * mov_rrind(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void mov_rrind(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_rrind_sizedir(out, OP_MOV, src, dst, size, 0);
+ x86_rrind_sizedir(code, OP_MOV, src, dst, size, 0);
}
-uint8_t * mov_rindr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void mov_rindr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return x86_rrind_sizedir(out, OP_MOV, dst, src, size, BIT_DIR);
+ x86_rrind_sizedir(code, OP_MOV, dst, src, size, BIT_DIR);
}
-uint8_t * mov_rrindex(uint8_t * out, uint8_t src, uint8_t dst_base, uint8_t dst_index, uint8_t scale, uint8_t size)
+void mov_rrindex(code_info *code, uint8_t src, uint8_t dst_base, uint8_t dst_index, uint8_t scale, uint8_t size)
{
- return x86_rrindex_sizedir(out, OP_MOV, src, dst_base, dst_index, scale, size, 0);
+ x86_rrindex_sizedir(code, OP_MOV, src, dst_base, dst_index, scale, size, 0);
}
-uint8_t * mov_rindexr(uint8_t * out, uint8_t src_base, uint8_t src_index, uint8_t scale, uint8_t dst, uint8_t size)
+void mov_rindexr(code_info *code, uint8_t src_base, uint8_t src_index, uint8_t scale, uint8_t dst, uint8_t size)
{
- return x86_rrindex_sizedir(out, OP_MOV, dst, src_base, src_index, scale, size, BIT_DIR);
+ x86_rrindex_sizedir(code, OP_MOV, dst, src_base, src_index, scale, size, BIT_DIR);
}
-uint8_t * mov_ir(uint8_t * out, int64_t val, uint8_t dst, uint8_t size)
+void mov_ir(code_info *code, int64_t val, uint8_t dst, uint8_t size)
{
+ check_alloc_code(code, 14);
+ code_ptr out = code->cur;
uint8_t sign_extend = 0;
if (size == SZ_Q && val <= 0x7FFFFFFF && val >= -2147483648) {
sign_extend = 1;
@@ -1214,11 +1227,13 @@ uint8_t * mov_ir(uint8_t * out, int64_t val, uint8_t dst, uint8_t size)
}
}
}
- return out;
+ code->cur = out;
}
-uint8_t * mov_irdisp8(uint8_t * out, int32_t val, uint8_t dst, int8_t disp, uint8_t size)
+void mov_irdisp(code_info *code, int32_t val, uint8_t dst, int32_t disp, uint8_t size)
{
+ check_alloc_code(code, 12);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1237,8 +1252,16 @@ uint8_t * mov_irdisp8(uint8_t * out, int32_t val, uint8_t dst, int8_t disp, uint
dst -= (AH-X86_AH);
}
*(out++) = OP_MOV_IEA | (size == SZ_B ? 0 : BIT_SIZE);
+ if (disp < 128 && disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | dst;
*(out++) = disp;
+ } else {
+ *(out++) = MODE_REG_DISPLACE32 | dst;
+ *(out++) = disp;
+ *(out++) = disp >> 8;
+ *(out++) = disp >> 16;
+ *(out++) = disp >> 24;
+ }
*(out++) = val;
if (size != SZ_B) {
@@ -1251,11 +1274,13 @@ uint8_t * mov_irdisp8(uint8_t * out, int32_t val, uint8_t dst, int8_t disp, uint
*(out++) = val;
}
}
- return out;
+ code->cur = out;
}
-uint8_t * mov_irind(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
+void mov_irind(code_info *code, int32_t val, uint8_t dst, uint8_t size)
{
+ check_alloc_code(code, 8);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1287,11 +1312,13 @@ uint8_t * mov_irind(uint8_t * out, int32_t val, uint8_t dst, uint8_t size)
*(out++) = val;
}
}
- return out;
+ code->cur = out;
}
-uint8_t * movsx_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t src_size, uint8_t size)
+void movsx_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t src_size, uint8_t size)
{
+ check_alloc_code(code, 5);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1317,11 +1344,13 @@ uint8_t * movsx_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t src_size, ui
*(out++) = OP2_MOVSX | (src_size == SZ_B ? 0 : BIT_SIZE);
}
*(out++) = MODE_REG_DIRECT | src | (dst << 3);
- return out;
+ code->cur = out;
}
-uint8_t * movsx_rdisp8r(uint8_t * out, uint8_t src, int8_t disp, uint8_t dst, uint8_t src_size, uint8_t size)
+void movsx_rdispr(code_info *code, uint8_t src, int32_t disp, uint8_t dst, uint8_t src_size, uint8_t size)
{
+ check_alloc_code(code, 12);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1346,13 +1375,23 @@ uint8_t * movsx_rdisp8r(uint8_t * out, uint8_t src, int8_t disp, uint8_t dst, ui
*(out++) = PRE_2BYTE;
*(out++) = OP2_MOVSX | (src_size == SZ_B ? 0 : BIT_SIZE);
}
+ if (disp < 128 && disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | src | (dst << 3);
*(out++) = disp;
- return out;
+ } else {
+ *(out++) = MODE_REG_DISPLACE32 | src | (dst << 3);
+ *(out++) = disp;
+ *(out++) = disp >> 8;
+ *(out++) = disp >> 16;
+ *(out++) = disp >> 24;
+ }
+ code->cur = out;
}
-uint8_t * movzx_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t src_size, uint8_t size)
+void movzx_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t src_size, uint8_t size)
{
+ check_alloc_code(code, 5);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1374,11 +1413,13 @@ uint8_t * movzx_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t src_size, ui
*(out++) = PRE_2BYTE;
*(out++) = OP2_MOVZX | (src_size == SZ_B ? 0 : BIT_SIZE);
*(out++) = MODE_REG_DIRECT | src | (dst << 3);
- return out;
+ code->cur = out;
}
-uint8_t * movzx_rdisp8r(uint8_t * out, uint8_t src, int8_t disp, uint8_t dst, uint8_t src_size, uint8_t size)
+void movzx_rdispr(code_info *code, uint8_t src, int32_t disp, uint8_t dst, uint8_t src_size, uint8_t size)
{
+ check_alloc_code(code, 9);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1399,13 +1440,23 @@ uint8_t * movzx_rdisp8r(uint8_t * out, uint8_t src, int8_t disp, uint8_t dst, ui
}
*(out++) = PRE_2BYTE;
*(out++) = OP2_MOVZX | (src_size == SZ_B ? 0 : BIT_SIZE);
+ if (disp < 128 && disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | src | (dst << 3);
*(out++) = disp;
- return out;
+ } else {
+ *(out++) = MODE_REG_DISPLACE32 | src | (dst << 3);
+ *(out++) = disp;
+ *(out++) = disp >> 8;
+ *(out++) = disp >> 16;
+ *(out++) = disp >> 24;
+ }
+ code->cur = out;
}
-uint8_t * xchg_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void xchg_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
+ check_alloc_code(code, 4);
+ code_ptr out = code->cur;
//TODO: Use OP_XCHG_AX when one of the registers is AX, EAX or RAX
uint8_t tmp;
if (size == SZ_W) {
@@ -1444,43 +1495,73 @@ uint8_t * xchg_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
}
*(out++) = opcode;
*(out++) = MODE_REG_DIRECT | dst | (src << 3);
- return out;
+ code->cur = out;
}
-uint8_t * pushf(uint8_t * out)
+void pushf(code_info *code)
{
+ check_alloc_code(code, 1);
+ code_ptr out = code->cur;
*(out++) = OP_PUSHF;
- return out;
+ code->cur = out;
}
-uint8_t * popf(uint8_t * out)
+void popf(code_info *code)
{
+ check_alloc_code(code, 1);
+ code_ptr out = code->cur;
*(out++) = OP_POPF;
- return out;
+ code->cur = out;
}
-uint8_t * push_r(uint8_t * out, uint8_t reg)
+void push_r(code_info *code, uint8_t reg)
{
+ check_alloc_code(code, 2);
+ code_ptr out = code->cur;
if (reg >= R8) {
*(out++) = PRE_REX | REX_RM_FIELD;
reg -= R8 - X86_R8;
}
*(out++) = OP_PUSH | reg;
- return out;
+ code->cur = out;
+}
+
+void push_rdisp(code_info *code, uint8_t base, int32_t disp)
+{
+ //This instruction has no explicit size, so we pass SZ_B
+ //to avoid any prefixes or bits being set
+ x86_rdisp_size(code, OP_SINGLE_EA, OP_EX_PUSH_EA, base, disp, SZ_B);
}
-uint8_t * pop_r(uint8_t * out, uint8_t reg)
+void pop_r(code_info *code, uint8_t reg)
{
+ check_alloc_code(code, 2);
+ code_ptr out = code->cur;
if (reg >= R8) {
*(out++) = PRE_REX | REX_RM_FIELD;
reg -= R8 - X86_R8;
}
*(out++) = OP_POP | reg;
- return out;
+ code->cur = out;
}
-uint8_t * setcc_r(uint8_t * out, uint8_t cc, uint8_t dst)
+void pop_rind(code_info *code, uint8_t reg)
{
+ check_alloc_code(code, 3);
+ code_ptr out = code->cur;
+ if (reg >= R8) {
+ *(out++) = PRE_REX | REX_RM_FIELD;
+ reg -= R8 - X86_R8;
+ }
+ *(out++) = PRE_XOP;
+ *(out++) = MODE_REG_INDIRECT | reg;
+ code->cur = out;
+}
+
+void setcc_r(code_info *code, uint8_t cc, uint8_t dst)
+{
+ check_alloc_code(code, 4);
+ code_ptr out = code->cur;
if (dst >= R8) {
*(out++) = PRE_REX | REX_RM_FIELD;
dst -= R8 - X86_R8;
@@ -1492,11 +1573,13 @@ uint8_t * setcc_r(uint8_t * out, uint8_t cc, uint8_t dst)
*(out++) = PRE_2BYTE;
*(out++) = OP2_SETCC | cc;
*(out++) = MODE_REG_DIRECT | dst;
- return out;
+ code->cur = out;
}
-uint8_t * setcc_rind(uint8_t * out, uint8_t cc, uint8_t dst)
+void setcc_rind(code_info *code, uint8_t cc, uint8_t dst)
{
+ check_alloc_code(code, 4);
+ code_ptr out = code->cur;
if (dst >= R8) {
*(out++) = PRE_REX | REX_RM_FIELD;
dst -= R8 - X86_R8;
@@ -1504,24 +1587,36 @@ uint8_t * setcc_rind(uint8_t * out, uint8_t cc, uint8_t dst)
*(out++) = PRE_2BYTE;
*(out++) = OP2_SETCC | cc;
*(out++) = MODE_REG_INDIRECT | dst;
- return out;
+ code->cur = out;
}
-uint8_t * setcc_rdisp8(uint8_t * out, uint8_t cc, uint8_t dst, int8_t disp)
+void setcc_rdisp(code_info *code, uint8_t cc, uint8_t dst, int32_t disp)
{
+ check_alloc_code(code, 8);
+ code_ptr out = code->cur;
if (dst >= R8) {
*(out++) = PRE_REX | REX_RM_FIELD;
dst -= R8 - X86_R8;
}
*(out++) = PRE_2BYTE;
*(out++) = OP2_SETCC | cc;
+ if (disp < 128 && disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | dst;
*(out++) = disp;
- return out;
+ } else {
+ *(out++) = MODE_REG_DISPLACE32 | dst;
+ *(out++) = disp;
+ *(out++) = disp >> 8;
+ *(out++) = disp >> 16;
+ *(out++) = disp >> 24;
+ }
+ code->cur = out;
}
-uint8_t * bit_rr(uint8_t * out, uint8_t op2, uint8_t src, uint8_t dst, uint8_t size)
+void bit_rr(code_info *code, uint8_t op2, uint8_t src, uint8_t dst, uint8_t size)
{
+ check_alloc_code(code, 5);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1543,11 +1638,13 @@ uint8_t * bit_rr(uint8_t * out, uint8_t op2, uint8_t src, uint8_t dst, uint8_t s
*(out++) = PRE_2BYTE;
*(out++) = op2;
*(out++) = MODE_REG_DIRECT | dst | (src << 3);
- return out;
+ code->cur = out;
}
-uint8_t * bit_rrdisp8(uint8_t * out, uint8_t op2, uint8_t src, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void bit_rrdisp(code_info *code, uint8_t op2, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
+ check_alloc_code(code, 9);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1568,43 +1665,23 @@ uint8_t * bit_rrdisp8(uint8_t * out, uint8_t op2, uint8_t src, uint8_t dst_base,
}
*(out++) = PRE_2BYTE;
*(out++) = op2;
+ if (dst_disp < 128 && dst_disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | dst_base | (src << 3);
*(out++) = dst_disp;
- return out;
-}
-
-uint8_t * bit_rrdisp32(uint8_t * out, uint8_t op2, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size)
-{
- if (size == SZ_W) {
- *(out++) = PRE_SIZE;
- }
- if (size == SZ_Q || src >= R8 || dst_base >= R8) {
- *out = PRE_REX;
- if (size == SZ_Q) {
- *out |= REX_QUAD;
- }
- if (src >= R8) {
- *out |= REX_REG_FIELD;
- src -= (R8 - X86_R8);
- }
- if (dst_base >= R8) {
- *out |= REX_RM_FIELD;
- dst_base -= (R8 - X86_R8);
- }
- out++;
- }
- *(out++) = PRE_2BYTE;
- *(out++) = op2;
+ } else {
*(out++) = MODE_REG_DISPLACE32 | dst_base | (src << 3);
*(out++) = dst_disp;
*(out++) = dst_disp >> 8;
*(out++) = dst_disp >> 16;
*(out++) = dst_disp >> 24;
- return out;
+ }
+ code->cur = out;
}
-uint8_t * bit_ir(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_t dst, uint8_t size)
+void bit_ir(code_info *code, uint8_t op_ex, uint8_t val, uint8_t dst, uint8_t size)
{
+ check_alloc_code(code, 6);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1623,11 +1700,13 @@ uint8_t * bit_ir(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_t dst, uint8_t
*(out++) = OP2_BTX_I;
*(out++) = MODE_REG_DIRECT | dst | (op_ex << 3);
*(out++) = val;
- return out;
+ code->cur = out;
}
-uint8_t * bit_irdisp8(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void bit_irdisp(code_info *code, uint8_t op_ex, uint8_t val, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
+ check_alloc_code(code, 10);
+ code_ptr out = code->cur;
if (size == SZ_W) {
*(out++) = PRE_SIZE;
}
@@ -1644,99 +1723,104 @@ uint8_t * bit_irdisp8(uint8_t * out, uint8_t op_ex, uint8_t val, uint8_t dst_bas
}
*(out++) = PRE_2BYTE;
*(out++) = OP2_BTX_I;
+ if (dst_disp < 128 && dst_disp >= -128) {
*(out++) = MODE_REG_DISPLACE8 | dst_base | (op_ex << 3);
*(out++) = dst_disp;
+ } else {
+ *(out++) = MODE_REG_DISPLACE32 | dst_base | (op_ex << 3);
+ *(out++) = dst_disp;
+ *(out++) = dst_disp >> 8;
+ *(out++) = dst_disp >> 16;
+ *(out++) = dst_disp >> 24;
+ }
*(out++) = val;
- return out;
-}
-
-uint8_t * bt_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
-{
- return bit_rr(out, OP2_BT, src, dst, size);
+ code->cur = out;
}
-uint8_t * bt_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void bt_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return bit_rrdisp8(out, OP2_BT, src, dst_base, dst_disp, size);
+ return bit_rr(code, OP2_BT, src, dst, size);
}
-uint8_t * bt_rrdisp32(uint8_t * out, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size)
+void bt_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
- return bit_rrdisp32(out, OP2_BT, src, dst_base, dst_disp, size);
+ return bit_rrdisp(code, OP2_BT, src, dst_base, dst_disp, size);
}
-uint8_t * bt_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+void bt_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return bit_ir(out, OP_EX_BT, val, dst, size);
+ return bit_ir(code, OP_EX_BT, val, dst, size);
}
-uint8_t * bt_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void bt_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
- return bit_irdisp8(out, OP_EX_BT, val, dst_base, dst_disp, size);
+ return bit_irdisp(code, OP_EX_BT, val, dst_base, dst_disp, size);
}
-uint8_t * bts_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void bts_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return bit_rr(out, OP2_BTS, src, dst, size);
+ return bit_rr(code, OP2_BTS, src, dst, size);
}
-uint8_t * bts_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void bts_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
- return bit_rrdisp8(out, OP2_BTS, src, dst_base, dst_disp, size);
+ return bit_rrdisp(code, OP2_BTS, src, dst_base, dst_disp, size);
}
-uint8_t * bts_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+void bts_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return bit_ir(out, OP_EX_BTS, val, dst, size);
+ return bit_ir(code, OP_EX_BTS, val, dst, size);
}
-uint8_t * bts_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void bts_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
- return bit_irdisp8(out, OP_EX_BTS, val, dst_base, dst_disp, size);
+ return bit_irdisp(code, OP_EX_BTS, val, dst_base, dst_disp, size);
}
-uint8_t * btr_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void btr_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return bit_rr(out, OP2_BTR, src, dst, size);
+ return bit_rr(code, OP2_BTR, src, dst, size);
}
-uint8_t * btr_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void btr_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
- return bit_rrdisp8(out, OP2_BTR, src, dst_base, dst_disp, size);
+ return bit_rrdisp(code, OP2_BTR, src, dst_base, dst_disp, size);
}
-uint8_t * btr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+void btr_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return bit_ir(out, OP_EX_BTR, val, dst, size);
+ return bit_ir(code, OP_EX_BTR, val, dst, size);
}
-uint8_t * btr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void btr_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
- return bit_irdisp8(out, OP_EX_BTR, val, dst_base, dst_disp, size);
+ return bit_irdisp(code, OP_EX_BTR, val, dst_base, dst_disp, size);
}
-uint8_t * btc_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size)
+void btc_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size)
{
- return bit_rr(out, OP2_BTC, src, dst, size);
+ return bit_rr(code, OP2_BTC, src, dst, size);
}
-uint8_t * btc_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void btc_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
- return bit_rrdisp8(out, OP2_BTC, src, dst_base, dst_disp, size);
+ return bit_rrdisp(code, OP2_BTC, src, dst_base, dst_disp, size);
}
-uint8_t * btc_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size)
+void btc_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size)
{
- return bit_ir(out, OP_EX_BTC, val, dst, size);
+ return bit_ir(code, OP_EX_BTC, val, dst, size);
}
-uint8_t * btc_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t dst_disp, uint8_t size)
+void btc_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t dst_disp, uint8_t size)
{
- return bit_irdisp8(out, OP_EX_BTC, val, dst_base, dst_disp, size);
+ return bit_irdisp(code, OP_EX_BTC, val, dst_base, dst_disp, size);
}
-uint8_t * jcc(uint8_t * out, uint8_t cc, uint8_t * dest)
+void jcc(code_info *code, uint8_t cc, code_ptr dest)
{
+ check_alloc_code(code, 6);
+ code_ptr out = code->cur;
ptrdiff_t disp = dest-(out+2);
if (disp <= 0x7F && disp >= -0x80) {
*(out++) = OP_JCC | cc;
@@ -1754,15 +1838,17 @@ uint8_t * jcc(uint8_t * out, uint8_t cc, uint8_t * dest)
disp >>= 8;
*(out++) = disp;
} else {
- printf("%p - %p = %lX\n", dest, out + 6, (long)disp);
- return NULL;
+ fprintf(stderr, "jcc: %p - %p = %lX\n", dest, out + 6, (long)disp);
+ exit(1);
}
}
- return out;
+ code->cur = out;
}
-uint8_t * jmp(uint8_t * out, uint8_t * dest)
+void jmp(code_info *code, code_ptr dest)
{
+ check_alloc_code(code, 5);
+ code_ptr out = code->cur;
ptrdiff_t disp = dest-(out+2);
if (disp <= 0x7F && disp >= -0x80) {
*(out++) = OP_JMP_BYTE;
@@ -1779,26 +1865,43 @@ uint8_t * jmp(uint8_t * out, uint8_t * dest)
disp >>= 8;
*(out++) = disp;
} else {
- printf("%p - %p = %lX\n", dest, out + 6, (long)disp);
- return NULL;
+ fprintf(stderr, "jmp: %p - %p = %lX\n", dest, out + 6, (long)disp);
+ exit(1);
}
}
- return out;
+ code->cur = out;
}
-uint8_t * jmp_r(uint8_t * out, uint8_t dst)
+void jmp_r(code_info *code, uint8_t dst)
{
+ check_alloc_code(code, 3);
+ code_ptr out = code->cur;
if (dst >= R8) {
dst -= R8 - X86_R8;
*(out++) = PRE_REX | REX_RM_FIELD;
}
*(out++) = OP_SINGLE_EA;
*(out++) = MODE_REG_DIRECT | dst | (OP_EX_JMP_EA << 3);
- return out;
+ code->cur = out;
+}
+
+void jmp_rind(code_info *code, uint8_t dst)
+{
+ check_alloc_code(code, 3);
+ code_ptr out = code->cur;
+ if (dst >= R8) {
+ dst -= R8 - X86_R8;
+ *(out++) = PRE_REX | REX_RM_FIELD;
+ }
+ *(out++) = OP_SINGLE_EA;
+ *(out++) = MODE_REG_INDIRECT | dst | (OP_EX_JMP_EA << 3);
+ code->cur = out;
}
-uint8_t * call(uint8_t * out, uint8_t * fun)
+void call(code_info *code, code_ptr fun)
{
+ check_alloc_code(code, 5);
+ code_ptr out = code->cur;
ptrdiff_t disp = fun-(out+5);
if (disp <= 0x7FFFFFFF && disp >= -2147483648) {
*(out++) = OP_CALL;
@@ -1811,35 +1914,158 @@ uint8_t * call(uint8_t * out, uint8_t * fun)
*(out++) = disp;
} else {
//TODO: Implement far call???
- printf("%p - %p = %lX\n", fun, out + 5, (long)disp);
- return NULL;
+ fprintf(stderr, "%p - %p = %lX\n", fun, out + 5, (long)disp);
+ exit(1);
}
- return out;
+ code->cur = out;
}
-uint8_t * call_r(uint8_t * out, uint8_t dst)
+void call_r(code_info *code, uint8_t dst)
{
+ check_alloc_code(code, 2);
+ code_ptr out = code->cur;
*(out++) = OP_SINGLE_EA;
*(out++) = MODE_REG_DIRECT | dst | (OP_EX_CALL_EA << 3);
- return out;
+ code->cur = out;
}
-uint8_t * retn(uint8_t * out)
+void retn(code_info *code)
{
+ check_alloc_code(code, 1);
+ code_ptr out = code->cur;
*(out++) = OP_RETN;
- return out;
+ code->cur = out;
}
-uint8_t * cdq(uint8_t * out)
+void cdq(code_info *code)
{
+ check_alloc_code(code, 1);
+ code_ptr out = code->cur;
*(out++) = OP_CDQ;
- return out;
+ code->cur = out;
}
-uint8_t * loop(uint8_t * out, uint8_t * dst)
+void loop(code_info *code, code_ptr dst)
{
+ check_alloc_code(code, 2);
+ code_ptr out = code->cur;
ptrdiff_t disp = dst-(out+2);
*(out++) = OP_LOOP;
*(out++) = disp;
- return out;
+ code->cur = out;
+}
+
+uint32_t prep_args(code_info *code, uint32_t num_args, va_list args)
+{
+ uint8_t *arg_arr = malloc(num_args);
+ for (int i = 0; i < num_args; i ++)
+ {
+ arg_arr[i] = va_arg(args, int);
+ }
+#ifdef X86_64
+ uint32_t stack_args = 0;
+ uint8_t abi_regs[] = {RDI, RSI, RDX, RCX, R8, R9};
+ int8_t reg_swap[R15+1];
+ uint32_t usage = 0;
+ memset(reg_swap, -1, sizeof(reg_swap));
+ for (int i = 0; i < num_args; i ++)
+ {
+ usage |= 1 << arg_arr[i];
+ }
+ for (int i = 0; i < num_args; i ++)
+ {
+ uint8_t reg_arg = arg_arr[i];
+ if (i < sizeof(abi_regs)) {
+ if (reg_swap[reg_arg] >= 0) {
+ reg_arg = reg_swap[reg_arg];
+ }
+ if (reg_arg != abi_regs[i]) {
+ if (usage & (1 << abi_regs[i])) {
+ xchg_rr(code, reg_arg, abi_regs[i], SZ_PTR);
+ reg_swap[abi_regs[i]] = reg_arg;
+ } else {
+ mov_rr(code, reg_arg, abi_regs[i], SZ_PTR);
+ }
+ }
+ } else {
+ arg_arr[stack_args++] = reg_arg;
+ }
+ }
+#else
+#define stack_args num_args
+#endif
+ for (int i = stack_args -1; i >= 0; i--)
+ {
+ push_r(code, arg_arr[i]);
+ }
+
+ return stack_args * sizeof(void *);
+}
+
+void call_args(code_info *code, code_ptr fun, uint32_t num_args, ...)
+{
+ va_list args;
+ va_start(args, num_args);
+ uint32_t adjust = prep_args(code, num_args, args);
+ va_end(args);
+ call(code, fun);
+ if (adjust) {
+ add_ir(code, adjust, RSP, SZ_PTR);
+ }
+}
+
+void call_args_abi(code_info *code, code_ptr fun, uint32_t num_args, ...)
+{
+ va_list args;
+ va_start(args, num_args);
+ uint32_t adjust = prep_args(code, num_args, args);
+ va_end(args);
+#ifdef X86_64
+ test_ir(code, 8, RSP, SZ_PTR); //check stack alignment
+ code_ptr do_adjust_rsp = code->cur + 1;
+ jcc(code, CC_NZ, code->cur + 2);
+#endif
+ call(code, fun);
+ if (adjust) {
+ add_ir(code, adjust, RSP, SZ_PTR);
+ }
+#ifdef X86_64
+ code_ptr no_adjust_rsp = code->cur + 1;
+ jmp(code, code->cur + 2);
+ *do_adjust_rsp = code->cur - (do_adjust_rsp+1);
+ sub_ir(code, 8, RSP, SZ_PTR);
+ call(code, fun);
+ add_ir(code, adjust + 8 , RSP, SZ_PTR);
+ *no_adjust_rsp = code->cur - (no_adjust_rsp+1);
+#endif
+}
+
+void save_callee_save_regs(code_info *code)
+{
+ push_r(code, RBX);
+ push_r(code, RBP);
+#ifdef X86_64
+ push_r(code, R12);
+ push_r(code, R13);
+ push_r(code, R14);
+ push_r(code, R15);
+#else
+ push_r(code, RDI);
+ push_r(code, RSI);
+#endif
+}
+
+void restore_callee_save_regs(code_info *code)
+{
+#ifdef X86_64
+ pop_r(code, R15);
+ pop_r(code, R14);
+ pop_r(code, R13);
+ pop_r(code, R12);
+#else
+ pop_r(code, RSI);
+ pop_r(code, RDI);
+#endif
+ pop_r(code, RBP);
+ pop_r(code, RBX);
}
diff --git a/gen_x86.h b/gen_x86.h
index a74f3cd..0e614dc 100644
--- a/gen_x86.h
+++ b/gen_x86.h
@@ -7,6 +7,7 @@
#define GEN_X86_H_
#include <stdint.h>
+#include "gen.h"
enum {
RAX = 0,
@@ -35,7 +36,9 @@ enum {
CC_O = 0,
CC_NO,
CC_C,
+ CC_B = CC_C,
CC_NC,
+ CC_NB = CC_NC,
CC_Z,
CC_NZ,
CC_BE,
@@ -59,8 +62,10 @@ enum {
#ifdef X86_64
#define SZ_PTR SZ_Q
+#define MAX_INST_LEN 14
#else
#define SZ_PTR SZ_D
+#define MAX_INST_LEN 11
#endif
enum {
@@ -75,145 +80,140 @@ enum {
MODE_IMMED = 0xFF
} x86_modes;
-
-uint8_t * rol_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * ror_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * rcl_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * rcr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * shl_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * shr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * sar_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * rol_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * ror_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * rcl_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * rcr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * shl_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * shr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * sar_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * rol_clr(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * ror_clr(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * rcl_clr(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * rcr_clr(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * shl_clr(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * shr_clr(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * sar_clr(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * rol_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * ror_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * rcl_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * rcr_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * shl_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * shr_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * sar_clrdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * add_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * adc_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * or_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * xor_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * and_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * sub_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * sbb_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * cmp_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * add_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * adc_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * or_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * xor_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * and_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * sub_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * sbb_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * cmp_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * add_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * add_irdisp32(uint8_t * out, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
-uint8_t * adc_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * or_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * xor_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * and_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * sub_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * sbb_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * cmp_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * add_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * adc_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * add_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * adc_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * or_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * or_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * xor_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * xor_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * and_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * and_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * sub_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * sub_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * sbb_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * sbb_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * cmp_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * cmp_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * imul_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * imul_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * imul_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * not_r(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * neg_r(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * not_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * neg_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * mul_r(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * imul_r(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * div_r(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * idiv_r(uint8_t * out, uint8_t dst, uint8_t size);
-uint8_t * mul_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * imul_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * div_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * idiv_rdisp8(uint8_t * out, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * test_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * test_ir(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * test_irdisp8(uint8_t * out, int32_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * test_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * test_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * mov_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * mov_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t disp, uint8_t size);
-uint8_t * mov_rdisp8r(uint8_t * out, uint8_t src_base, int8_t disp, uint8_t dst, uint8_t size);
-uint8_t * mov_rrdisp32(uint8_t * out, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
-uint8_t * mov_rdisp32r(uint8_t * out, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
-uint8_t * mov_rrindex(uint8_t * out, uint8_t src, uint8_t dst_base, uint8_t dst_index, uint8_t scale, uint8_t size);
-uint8_t * mov_rindexr(uint8_t * out, uint8_t src_base, uint8_t src_index, uint8_t scale, uint8_t dst, uint8_t size);
-uint8_t * mov_rrind(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * mov_rindr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * mov_ir(uint8_t * out, int64_t val, uint8_t dst, uint8_t size);
-uint8_t * mov_irdisp8(uint8_t * out, int32_t val, uint8_t dst, int8_t disp, uint8_t size);
-uint8_t * mov_irind(uint8_t * out, int32_t val, uint8_t dst, uint8_t size);
-uint8_t * movsx_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t src_size, uint8_t size);
-uint8_t * movsx_rdisp8r(uint8_t * out, uint8_t src, int8_t disp, uint8_t dst, uint8_t src_size, uint8_t size);
-uint8_t * movzx_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t src_size, uint8_t size);
-uint8_t * movzx_rdisp8r(uint8_t * out, uint8_t src, int8_t disp, uint8_t dst, uint8_t src_size, uint8_t size);
-uint8_t * xchg_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * pushf(uint8_t * out);
-uint8_t * popf(uint8_t * out);
-uint8_t * push_r(uint8_t * out, uint8_t reg);
-uint8_t * pop_r(uint8_t * out, uint8_t reg);
-uint8_t * setcc_r(uint8_t * out, uint8_t cc, uint8_t dst);
-uint8_t * setcc_rind(uint8_t * out, uint8_t cc, uint8_t dst);
-uint8_t * setcc_rdisp8(uint8_t * out, uint8_t cc, uint8_t dst, int8_t disp);
-uint8_t * bt_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * bt_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t dst_disp, uint8_t size);
-uint8_t * bt_rrdisp32(uint8_t * out, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size);
-uint8_t * bt_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * bt_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t dst_disp, uint8_t size);
-uint8_t * bts_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * bts_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t dst_disp, uint8_t size);
-uint8_t * bts_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * bts_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t dst_disp, uint8_t size);
-uint8_t * btr_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * btr_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t dst_disp, uint8_t size);
-uint8_t * btr_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * btr_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t dst_disp, uint8_t size);
-uint8_t * btc_rr(uint8_t * out, uint8_t src, uint8_t dst, uint8_t size);
-uint8_t * btc_rrdisp8(uint8_t * out, uint8_t src, uint8_t dst_base, int8_t dst_disp, uint8_t size);
-uint8_t * btc_ir(uint8_t * out, uint8_t val, uint8_t dst, uint8_t size);
-uint8_t * btc_irdisp8(uint8_t * out, uint8_t val, uint8_t dst_base, int8_t dst_disp, uint8_t size);
-uint8_t * jcc(uint8_t * out, uint8_t cc, uint8_t *dest);
-uint8_t * jmp(uint8_t * out, uint8_t *dest);
-uint8_t * jmp_r(uint8_t * out, uint8_t dst);
-uint8_t * call(uint8_t * out, uint8_t * fun);
-uint8_t * call_r(uint8_t * out, uint8_t dst);
-uint8_t * retn(uint8_t * out);
-uint8_t * cdq(uint8_t * out);
-uint8_t * loop(uint8_t * out, uint8_t * dst);
+void rol_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void ror_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void rcl_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void rcr_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void shl_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void shr_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void sar_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void rol_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void ror_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void rcl_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void rcr_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void shl_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void shr_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void sar_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void rol_clr(code_info *code, uint8_t dst, uint8_t size);
+void ror_clr(code_info *code, uint8_t dst, uint8_t size);
+void rcl_clr(code_info *code, uint8_t dst, uint8_t size);
+void rcr_clr(code_info *code, uint8_t dst, uint8_t size);
+void shl_clr(code_info *code, uint8_t dst, uint8_t size);
+void shr_clr(code_info *code, uint8_t dst, uint8_t size);
+void sar_clr(code_info *code, uint8_t dst, uint8_t size);
+void rol_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void ror_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void rcl_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void rcr_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void shl_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void shr_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void sar_clrdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void add_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void adc_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void or_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void xor_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void and_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void sub_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void sbb_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void cmp_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void add_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void adc_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void or_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void xor_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void and_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void sub_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void sbb_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void cmp_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void add_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void adc_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void or_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void xor_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void and_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void sub_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void sbb_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void cmp_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void add_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void adc_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void add_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void adc_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void or_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void or_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void xor_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void xor_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void and_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void and_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void sub_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void sub_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void sbb_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void sbb_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void cmp_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void cmp_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void imul_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void imul_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void imul_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void not_r(code_info *code, uint8_t dst, uint8_t size);
+void neg_r(code_info *code, uint8_t dst, uint8_t size);
+void not_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void neg_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void mul_r(code_info *code, uint8_t dst, uint8_t size);
+void imul_r(code_info *code, uint8_t dst, uint8_t size);
+void div_r(code_info *code, uint8_t dst, uint8_t size);
+void idiv_r(code_info *code, uint8_t dst, uint8_t size);
+void mul_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void imul_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void div_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void idiv_rdisp(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+void test_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void test_ir(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void test_irdisp(code_info *code, int32_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+void test_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void test_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void mov_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void mov_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t disp, uint8_t size);
+void mov_rdispr(code_info *code, uint8_t src_base, int32_t disp, uint8_t dst, uint8_t size);
+void mov_rrindex(code_info *code, uint8_t src, uint8_t dst_base, uint8_t dst_index, uint8_t scale, uint8_t size);
+void mov_rindexr(code_info *code, uint8_t src_base, uint8_t src_index, uint8_t scale, uint8_t dst, uint8_t size);
+void mov_rrind(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void mov_rindr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void mov_ir(code_info *code, int64_t val, uint8_t dst, uint8_t size);
+void mov_irdisp(code_info *code, int32_t val, uint8_t dst, int32_t disp, uint8_t size);
+void mov_irind(code_info *code, int32_t val, uint8_t dst, uint8_t size);
+void movsx_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t src_size, uint8_t size);
+void movsx_rdispr(code_info *code, uint8_t src, int32_t disp, uint8_t dst, uint8_t src_size, uint8_t size);
+void movzx_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t src_size, uint8_t size);
+void movzx_rdispr(code_info *code, uint8_t src, int32_t disp, uint8_t dst, uint8_t src_size, uint8_t size);
+void xchg_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void pushf(code_info *code);
+void popf(code_info *code);
+void push_r(code_info *code, uint8_t reg);
+void push_rdisp(code_info *code, uint8_t base, int32_t disp);
+void pop_r(code_info *code, uint8_t reg);
+void pop_rind(code_info *code, uint8_t reg);
+void setcc_r(code_info *code, uint8_t cc, uint8_t dst);
+void setcc_rind(code_info *code, uint8_t cc, uint8_t dst);
+void setcc_rdisp(code_info *code, uint8_t cc, uint8_t dst, int32_t disp);
+void bt_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void bt_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size);
+void bt_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void bt_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t dst_disp, uint8_t size);
+void bts_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void bts_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size);
+void bts_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void bts_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t dst_disp, uint8_t size);
+void btr_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void btr_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size);
+void btr_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void btr_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t dst_disp, uint8_t size);
+void btc_rr(code_info *code, uint8_t src, uint8_t dst, uint8_t size);
+void btc_rrdisp(code_info *code, uint8_t src, uint8_t dst_base, int32_t dst_disp, uint8_t size);
+void btc_ir(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+void btc_irdisp(code_info *code, uint8_t val, uint8_t dst_base, int32_t dst_disp, uint8_t size);
+void jcc(code_info *code, uint8_t cc, code_ptr dest);
+void jmp_rind(code_info *code, uint8_t dst);
+void call_r(code_info *code, uint8_t dst);
+void retn(code_info *code);
+void cdq(code_info *code);
+void loop(code_info *code, code_ptr dst);
#endif //GEN_X86_H_
diff --git a/gentests.py b/gentests.py
index 000f73e..428d6a0 100755
--- a/gentests.py
+++ b/gentests.py
@@ -178,9 +178,17 @@ class Indexed(object):
self.disp -= (address & 0xFFFFFF)
else:
self.disp += 0xE00000-(address & 0xFFFFFF)
+ if self.disp > 127:
+ self.disp = 127
+ elif self.disp < -128:
+ self.disp = -128
address = base + index + self.disp
elif (address & 0xFFFFFF) > 0xFFFFFC:
self.disp -= (address & 0xFFFFFF) - 0xFFFFFC
+ if self.disp > 127:
+ self.disp = 127
+ elif self.disp < -128:
+ self.disp = -128
address = base + index + self.disp
if size != 'b' and address & 1:
self.disp = self.disp ^ 1
diff --git a/gst.c b/gst.c
index adf034d..a901858 100644
--- a/gst.c
+++ b/gst.c
@@ -1,6 +1,6 @@
/*
Copyright 2013 Michael Pavone
- This file is part of BlastEm.
+ This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#include "gst.h"
@@ -207,8 +207,8 @@ uint8_t z80_load_gst(z80_context * context, FILE * gstfile)
curpos += 2;
context->iff1 = context->iff2 = *curpos;
curpos += 2;
- reset = !*(curpos++);
- busreq = *curpos;
+ context->reset = !*(curpos++);
+ context->busreq = *curpos;
curpos += 3;
uint32_t bank = read_le_32(curpos);
if (bank < 0x400000) {
@@ -350,8 +350,8 @@ uint8_t z80_save_gst(z80_context * context, FILE * gstfile)
curpos += 2;
*curpos = context->iff1;
curpos += 2;
- *(curpos++) = !reset;
- *curpos = busreq;
+ *(curpos++) = !context->reset;
+ *curpos = context->busreq;
curpos += 3;
uint32_t bank = context->bank_reg << 15;
write_le_32(curpos, bank);
diff --git a/io.c b/io.c
index 4935321..3ec8672 100644
--- a/io.c
+++ b/io.c
@@ -3,15 +3,44 @@
This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <string.h>
+
#include "io.h"
#include "blastem.h"
#include "render.h"
+const char * device_type_names[] = {
+ "3-button gamepad",
+ "6-button gamepad",
+ "Mega Mouse",
+ "Menacer",
+ "Justifier",
+ "Sega multi-tap",
+ "EA 4-way Play cable A",
+ "EA 4-way Play cable B",
+ "Sega Parallel Transfer Board",
+ "Generic Device",
+ "None"
+};
+
enum {
BIND_NONE,
+ BIND_UI,
BIND_GAMEPAD1,
BIND_GAMEPAD2,
- BIND_UI
+ BIND_GAMEPAD3,
+ BIND_GAMEPAD4,
+ BIND_GAMEPAD5,
+ BIND_GAMEPAD6,
+ BIND_GAMEPAD7,
+ BIND_GAMEPAD8
};
typedef enum {
@@ -26,6 +55,7 @@ typedef enum {
} ui_action;
typedef struct {
+ io_port *port;
uint8_t bind_type;
uint8_t subtype_a;
uint8_t subtype_b;
@@ -117,7 +147,7 @@ void bind_dpad(int joystick, int dpad, int direction, uint8_t bind_type, uint8_t
void bind_gamepad(int keycode, int gamepadnum, int button)
{
- if (gamepadnum < 1 || gamepadnum > 2) {
+ if (gamepadnum < 1 || gamepadnum > 8) {
return;
}
uint8_t bind_type = gamepadnum - 1 + BIND_GAMEPAD1;
@@ -126,7 +156,7 @@ void bind_gamepad(int keycode, int gamepadnum, int button)
void bind_button_gamepad(int joystick, int joybutton, int gamepadnum, int padbutton)
{
- if (gamepadnum < 1 || gamepadnum > 2) {
+ if (gamepadnum < 1 || gamepadnum > 8) {
return;
}
uint8_t bind_type = gamepadnum - 1 + BIND_GAMEPAD1;
@@ -135,7 +165,7 @@ void bind_button_gamepad(int joystick, int joybutton, int gamepadnum, int padbut
void bind_dpad_gamepad(int joystick, int dpad, uint8_t direction, int gamepadnum, int button)
{
- if (gamepadnum < 1 || gamepadnum > 2) {
+ if (gamepadnum < 1 || gamepadnum > 8) {
return;
}
uint8_t bind_type = gamepadnum - 1 + BIND_GAMEPAD1;
@@ -159,17 +189,14 @@ void bind_dpad_ui(int joystick, int dpad, uint8_t direction, ui_action action, u
void handle_binding_down(keybinding * binding)
{
- switch(binding->bind_type)
+ if (binding->bind_type >= BIND_GAMEPAD1)
{
- case BIND_GAMEPAD1:
- case BIND_GAMEPAD2:
- if (binding->subtype_a <= GAMEPAD_EXTRA) {
- genesis->ports[binding->bind_type - BIND_GAMEPAD1].input[binding->subtype_a] |= binding->value;
+ if (binding->subtype_a <= GAMEPAD_EXTRA && binding->port) {
+ binding->port->input[binding->subtype_a] |= binding->value;
}
- if (binding->subtype_b <= GAMEPAD_EXTRA) {
- genesis->ports[binding->bind_type - BIND_GAMEPAD1].input[binding->subtype_b] |= binding->value;
+ if (binding->subtype_b <= GAMEPAD_EXTRA && binding->port) {
+ binding->port->input[binding->subtype_b] |= binding->value;
}
- break;
}
}
@@ -206,11 +233,11 @@ void handle_binding_up(keybinding * binding)
{
case BIND_GAMEPAD1:
case BIND_GAMEPAD2:
- if (binding->subtype_a <= GAMEPAD_EXTRA) {
- genesis->ports[binding->bind_type - BIND_GAMEPAD1].input[binding->subtype_a] &= ~binding->value;
+ if (binding->subtype_a <= GAMEPAD_EXTRA && binding->port) {
+ binding->port->input[binding->subtype_a] &= ~binding->value;
}
- if (binding->subtype_b <= GAMEPAD_EXTRA) {
- genesis->ports[binding->bind_type - BIND_GAMEPAD1].input[binding->subtype_b] &= ~binding->value;
+ if (binding->subtype_b <= GAMEPAD_EXTRA && binding->port) {
+ binding->port->input[binding->subtype_b] &= ~binding->value;
}
break;
case BIND_UI:
@@ -447,7 +474,169 @@ void process_speeds(tern_node * cur, char * prefix)
}
}
-void set_keybindings()
+void process_device(char * device_type, io_port * port)
+{
+ port->device_type = IO_NONE;
+ if (!device_type)
+ {
+ return;
+ }
+
+ const int gamepad_len = strlen("gamepad");
+ if (!memcmp(device_type, "gamepad", gamepad_len))
+ {
+ if (
+ (device_type[gamepad_len] != '3' && device_type[gamepad_len] != '6')
+ || device_type[gamepad_len+1] != '.' || device_type[gamepad_len+2] < '1'
+ || device_type[gamepad_len+2] > '8' || device_type[gamepad_len+3] != 0
+ )
+ {
+ fprintf(stderr, "%s is not a valid gamepad type\n", device_type);
+ } else if (device_type[gamepad_len] == '3')
+ {
+ port->device_type = IO_GAMEPAD3;
+ } else {
+ port->device_type = IO_GAMEPAD6;
+ }
+ port->device.pad.gamepad_num = device_type[gamepad_len+2] - '1';
+ } else if(!strcmp(device_type, "sega_parallel")) {
+ port->device_type = IO_SEGA_PARALLEL;
+ port->device.stream.data_fd = -1;
+ port->device.stream.listen_fd = -1;
+ } else if(!strcmp(device_type, "generic")) {
+ port->device_type = IO_GENERIC;
+ port->device.stream.data_fd = -1;
+ port->device.stream.listen_fd = -1;
+ }
+}
+
+char * io_name(int i)
+{
+ switch (i)
+ {
+ case 0:
+ return "1";
+ case 1:
+ return "2";
+ case 2:
+ return "EXT";
+ default:
+ return "invalid";
+ }
+}
+
+static char * sockfile_name;
+static void cleanup_sockfile()
+{
+ unlink(sockfile_name);
+}
+
+void setup_io_devices(tern_node * config, io_port * ports)
+{
+ tern_node *io_nodes = tern_find_prefix(config, "iodevices");
+ char * io_1 = tern_find_ptr(io_nodes, "1");
+ char * io_2 = tern_find_ptr(io_nodes, "2");
+ char * io_ext = tern_find_ptr(io_nodes, "ext");
+
+ process_device(io_1, ports);
+ process_device(io_2, ports+1);
+ process_device(io_ext, ports+2);
+
+ for (int i = 0; i < 3; i++)
+ {
+
+ if (ports[i].device_type == IO_SEGA_PARALLEL)
+ {
+ char *pipe_name = tern_find_ptr(config, "ioparallel_pipe");
+ if (!pipe_name)
+ {
+ fprintf(stderr, "IO port %s is configured to use the sega parallel board, but no paralell_pipe is set!\n", io_name(i));
+ ports[i].device_type = IO_NONE;
+ } else {
+ printf("IO port: %s connected to device '%s' with pipe name: %s\n", io_name(i), device_type_names[ports[i].device_type], pipe_name);
+ if (!strcmp("stdin", pipe_name))
+ {
+ ports[i].device.stream.data_fd = STDIN_FILENO;
+ } else {
+ if (mkfifo(pipe_name, 0666) && errno != EEXIST)
+ {
+ fprintf(stderr, "Failed to create fifo %s for Sega parallel board emulation: %d %s\n", pipe_name, errno, strerror(errno));
+ ports[i].device_type = IO_NONE;
+ } else {
+ ports[i].device.stream.data_fd = open(pipe_name, O_NONBLOCK | O_RDONLY);
+ if (ports[i].device.stream.data_fd == -1)
+ {
+ fprintf(stderr, "Failed to open fifo %s for Sega parallel board emulation: %d %s\n", pipe_name, errno, strerror(errno));
+ ports[i].device_type = IO_NONE;
+ }
+ }
+ }
+ }
+ } else if (ports[i].device_type == IO_GENERIC) {
+ char *sock_name = tern_find_ptr(config, "iosocket");
+ if (!sock_name)
+ {
+ fprintf(stderr, "IO port %s is configured to use generic IO, but no socket is set!\n", io_name(i));
+ ports[i].device_type = IO_NONE;
+ } else {
+ printf("IO port: %s connected to device '%s' with socket name: %s\n", io_name(i), device_type_names[ports[i].device_type], sock_name);
+ ports[i].device.stream.data_fd = -1;
+ ports[i].device.stream.listen_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ size_t pathlen = strlen(sock_name);
+ size_t addrlen = offsetof(struct sockaddr_un, sun_path) + pathlen + 1;
+ struct sockaddr_un *saddr = malloc(addrlen);
+ saddr->sun_family = AF_UNIX;
+ memcpy(saddr->sun_path, sock_name, pathlen+1);
+ if (bind(ports[i].device.stream.listen_fd, (struct sockaddr *)saddr, addrlen))
+ {
+ fprintf(stderr, "Failed to bind socket for IO Port %s to path %s: %d %s\n", io_name(i), sock_name, errno, strerror(errno));
+ goto cleanup_sock;
+ }
+ if (listen(ports[i].device.stream.listen_fd, 1))
+ {
+ fprintf(stderr, "Failed to listen on socket for IO Port %s: %d %s\n", io_name(i), errno, strerror(errno));
+ goto cleanup_sockfile;
+ }
+ sockfile_name = sock_name;
+ atexit(cleanup_sockfile);
+ continue;
+cleanup_sockfile:
+ unlink(sock_name);
+cleanup_sock:
+ close(ports[i].device.stream.listen_fd);
+ ports[i].device_type = IO_NONE;
+ }
+ } else if (ports[i].device_type == IO_GAMEPAD3 || ports[i].device_type == IO_GAMEPAD6) {
+ printf("IO port %s connected to gamepad #%d with type '%s'\n", io_name(i), ports[i].device.pad.gamepad_num + 1, device_type_names[ports[i].device_type]);
+ } else {
+ printf("IO port %s connected to device '%s'\n", io_name(i), device_type_names[ports[i].device_type]);
+ }
+ }
+}
+
+void map_bindings(io_port *ports, keybinding *bindings, int numbindings)
+{
+ for (int i = 0; i < numbindings; i++)
+ {
+ if (bindings[i].bind_type >= BIND_GAMEPAD1)
+ {
+ int num = bindings[i].bind_type - BIND_GAMEPAD1;
+ for (int j = 0; j < 3; j++)
+ {
+ if ((ports[j].device_type == IO_GAMEPAD3
+ || ports[j].device_type ==IO_GAMEPAD6)
+ && ports[j].device.pad.gamepad_num == num
+ )
+ {
+ bindings[i].port = ports + j;
+ break;
+ }
+ }
+ }
+ }
+}
+
+void set_keybindings(io_port *ports)
{
tern_node * special = tern_insert_int(NULL, "up", RENDERKEY_UP);
special = tern_insert_int(special, "down", RENDERKEY_DOWN);
@@ -532,76 +721,245 @@ void set_keybindings()
speeds = malloc(sizeof(uint32_t));
speeds[0] = 100;
process_speeds(speed_nodes, NULL);
- for (int i = 0; i < num_speeds; i++) {
+ for (int i = 0; i < num_speeds; i++)
+ {
if (!speeds[i]) {
fprintf(stderr, "Speed index %d was not set to a valid percentage!", i);
speeds[i] = 100;
}
}
+ for (int bucket = 0; bucket < 256; bucket++)
+ {
+ if (bindings[bucket])
+ {
+ map_bindings(ports, bindings[bucket], 256);
+ }
+ }
+ for (int stick = 0; stick < MAX_JOYSTICKS; stick++)
+ {
+ if (joybindings[stick])
+ {
+ int numbuttons = render_joystick_num_buttons(stick);
+ map_bindings(ports, joybindings[stick], render_joystick_num_buttons(stick));
+ }
+ if (joydpads[stick])
+ {
+ map_bindings(ports, joydpads[stick]->bindings, 4);
+ }
+ }
}
#define TH 0x40
#define TH_TIMEOUT 8000
-void io_adjust_cycles(io_port * pad, uint32_t current_cycle, uint32_t deduction)
+void io_adjust_cycles(io_port * port, uint32_t current_cycle, uint32_t deduction)
{
/*uint8_t control = pad->control | 0x80;
uint8_t th = control & pad->output;
if (pad->input[GAMEPAD_TH0] || pad->input[GAMEPAD_TH1]) {
printf("adjust_cycles | control: %X, TH: %X, GAMEPAD_TH0: %X, GAMEPAD_TH1: %X, TH Counter: %d, Timeout: %d, Cycle: %d\n", control, th, pad->input[GAMEPAD_TH0], pad->input[GAMEPAD_TH1], pad->th_counter,pad->timeout_cycle, current_cycle);
}*/
- if (current_cycle >= pad->timeout_cycle) {
- pad->th_counter = 0;
- } else {
- pad->timeout_cycle -= deduction;
+ if (port->device_type == IO_GAMEPAD6)
+ {
+ if (current_cycle >= port->device.pad.timeout_cycle)
+ {
+ port->device.pad.th_counter = 0;
+ } else {
+ port->device.pad.timeout_cycle -= deduction;
+ }
+ }
+}
+
+static void wait_for_connection(io_port * port)
+{
+ if (port->device.stream.data_fd == -1)
+ {
+ puts("Waiting for socket connection...");
+ port->device.stream.data_fd = accept(port->device.stream.listen_fd, NULL, NULL);
+ fcntl(port->device.stream.data_fd, F_SETFL, O_NONBLOCK | O_RDWR);
}
}
-void io_data_write(io_port * pad, uint8_t value, uint32_t current_cycle)
+static void service_pipe(io_port * port)
{
- if (pad->control & TH) {
- //check if TH has changed
- if ((pad->output & TH) ^ (value & TH)) {
- if (current_cycle >= pad->timeout_cycle) {
- pad->th_counter = 0;
+ uint8_t value;
+ int numRead = read(port->device.stream.data_fd, &value, sizeof(value));
+ if (numRead > 0)
+ {
+ port->input[IO_TH0] = (value & 0xF) | 0x10;
+ port->input[IO_TH1] = (value >> 4) | 0x10;
+ } else if(numRead == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
+ fprintf(stderr, "Error reading pipe for IO port: %d %s\n", errno, strerror(errno));
+ }
+}
+
+static void service_socket(io_port *port)
+{
+ uint8_t buf[32];
+ uint8_t blocking = 0;
+ int numRead = 0;
+ while (numRead <= 0)
+ {
+ numRead = recv(port->device.stream.data_fd, buf, sizeof(buf), 0);
+ if (numRead > 0)
+ {
+ port->input[IO_TH0] = buf[numRead-1];
+ if (port->input[IO_STATE] == IO_READ_PENDING)
+ {
+ port->input[IO_STATE] = IO_READ;
+ if (blocking)
+ {
+ //pending read satisfied, back to non-blocking mode
+ fcntl(port->device.stream.data_fd, F_SETFL, O_RDWR | O_NONBLOCK);
+ }
+ } else if (port->input[IO_STATE] == IO_WRITTEN) {
+ port->input[IO_STATE] = IO_READ;
}
- if (!(value & TH)) {
- pad->th_counter++;
+ } else if (numRead == 0) {
+ port->device.stream.data_fd = -1;
+ wait_for_connection(port);
+ } else if (errno != EAGAIN && errno != EWOULDBLOCK) {
+ fprintf(stderr, "Error reading from socket for IO port: %d %s\n", errno, strerror(errno));
+ close(port->device.stream.data_fd);
+ wait_for_connection(port);
+ } else if (port->input[IO_STATE] == IO_READ_PENDING) {
+ //clear the nonblocking flag so the next read will block
+ if (!blocking)
+ {
+ fcntl(port->device.stream.data_fd, F_SETFL, O_RDWR);
+ blocking = 1;
+ }
+ } else {
+ //no new data, but that's ok
+ break;
+ }
+ }
+
+ if (port->input[IO_STATE] == IO_WRITE_PENDING)
+ {
+ uint8_t value = port->output & port->control;
+ int written = 0;
+ blocking = 0;
+ while (written <= 0)
+ {
+ send(port->device.stream.data_fd, &value, sizeof(value), 0);
+ if (written > 0)
+ {
+ port->input[IO_STATE] = IO_WRITTEN;
+ if (blocking)
+ {
+ //pending write satisfied, back to non-blocking mode
+ fcntl(port->device.stream.data_fd, F_SETFL, O_RDWR | O_NONBLOCK);
+ }
+ } else if (written == 0) {
+ port->device.stream.data_fd = -1;
+ wait_for_connection(port);
+ } else if (errno != EAGAIN && errno != EWOULDBLOCK) {
+ fprintf(stderr, "Error writing to socket for IO port: %d %s\n", errno, strerror(errno));
+ close(port->device.stream.data_fd);
+ wait_for_connection(port);
+ } else {
+ //clear the nonblocking flag so the next write will block
+ if (!blocking)
+ {
+ fcntl(port->device.stream.data_fd, F_SETFL, O_RDWR);
+ blocking = 1;
+ }
}
- pad->timeout_cycle = current_cycle + TH_TIMEOUT;
}
}
- pad->output = value;
}
-uint8_t io_data_read(io_port * pad, uint32_t current_cycle)
+void io_data_write(io_port * port, uint8_t value, uint32_t current_cycle)
{
- uint8_t control = pad->control | 0x80;
- uint8_t th = control & pad->output;
+ switch (port->device_type)
+ {
+ case IO_GAMEPAD6:
+ if (port->control & TH) {
+ //check if TH has changed
+ if ((port->output & TH) ^ (value & TH)) {
+ if (current_cycle >= port->device.pad.timeout_cycle) {
+ port->device.pad.th_counter = 0;
+ }
+ if (!(value & TH)) {
+ port->device.pad.th_counter++;
+ }
+ port->device.pad.timeout_cycle = current_cycle + TH_TIMEOUT;
+ }
+ }
+ port->output = value;
+ break;
+ case IO_GENERIC:
+ wait_for_connection(port);
+ port->input[IO_STATE] = IO_WRITE_PENDING;
+ port->output = value;
+ service_socket(port);
+ break;
+ default:
+ port->output = value;
+ }
+
+}
+
+uint8_t io_data_read(io_port * port, uint32_t current_cycle)
+{
+ uint8_t control = port->control | 0x80;
+ uint8_t th = control & port->output & 0x40;
uint8_t input;
- if (current_cycle >= pad->timeout_cycle) {
- pad->th_counter = 0;
+ switch (port->device_type)
+ {
+ case IO_GAMEPAD3:
+ {
+ input = port->input[th ? GAMEPAD_TH1 : GAMEPAD_TH0];
+ break;
}
- /*if (pad->input[GAMEPAD_TH0] || pad->input[GAMEPAD_TH1]) {
- printf("io_data_read | control: %X, TH: %X, GAMEPAD_TH0: %X, GAMEPAD_TH1: %X, TH Counter: %d, Timeout: %d, Cycle: %d\n", control, th, pad->input[GAMEPAD_TH0], pad->input[GAMEPAD_TH1], pad->th_counter,pad->timeout_cycle, context->current_cycle);
- }*/
- if (th) {
- if (pad->th_counter == 3) {
- input = pad->input[GAMEPAD_EXTRA];
- } else {
- input = pad->input[GAMEPAD_TH1];
+ case IO_GAMEPAD6:
+ {
+ if (current_cycle >= port->device.pad.timeout_cycle) {
+ port->device.pad.th_counter = 0;
}
- } else {
- if (pad->th_counter == 3) {
- input = pad->input[GAMEPAD_TH0] | 0xF;
- } else if(pad->th_counter == 4) {
- input = pad->input[GAMEPAD_TH0] & 0x30;
+ /*if (port->input[GAMEPAD_TH0] || port->input[GAMEPAD_TH1]) {
+ printf("io_data_read | control: %X, TH: %X, GAMEPAD_TH0: %X, GAMEPAD_TH1: %X, TH Counter: %d, Timeout: %d, Cycle: %d\n", control, th, port->input[GAMEPAD_TH0], port->input[GAMEPAD_TH1], port->th_counter,port->timeout_cycle, context->current_cycle);
+ }*/
+ if (th) {
+ if (port->device.pad.th_counter == 3) {
+ input = port->input[GAMEPAD_EXTRA];
+ } else {
+ input = port->input[GAMEPAD_TH1];
+ }
} else {
- input = pad->input[GAMEPAD_TH0] | 0xC;
+ if (port->device.pad.th_counter == 3) {
+ input = port->input[GAMEPAD_TH0] | 0xF;
+ } else if(port->device.pad.th_counter == 4) {
+ input = port->input[GAMEPAD_TH0] & 0x30;
+ } else {
+ input = port->input[GAMEPAD_TH0] | 0xC;
+ }
}
+ break;
+ }
+ case IO_SEGA_PARALLEL:
+ if (!th)
+ {
+ service_pipe(port);
+ }
+ input = ~port->input[th ? IO_TH1 : IO_TH0];
+ break;
+ case IO_GENERIC:
+ if (port->input[IO_TH0] & 0x80 && port->input[IO_STATE] == IO_WRITTEN)
+ {
+ //device requested a blocking read after writes
+ port->input[IO_STATE] = IO_READ_PENDING;
+ }
+ service_socket(port);
+ input = ~port->input[IO_TH0];
+ break;
+ default:
+ input = 0;
+ break;
}
- uint8_t value = ((~input) & (~control)) | (pad->output & control);
- /*if (pad->input[GAMEPAD_TH0] || pad->input[GAMEPAD_TH1]) {
+ uint8_t value = ((~input) & (~control)) | (port->output & control);
+ /*if (port->input[GAMEPAD_TH0] || port->input[GAMEPAD_TH1]) {
printf ("value: %X\n", value);
}*/
return value;
diff --git a/io.h b/io.h
index b0fd1e9..b73be67 100644
--- a/io.h
+++ b/io.h
@@ -1,18 +1,43 @@
/*
Copyright 2013 Michael Pavone
- This file is part of BlastEm.
+ This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#ifndef IO_H_
#define IO_H_
#include <stdint.h>
+#include "tern.h"
+
+enum {
+ IO_GAMEPAD3,
+ IO_GAMEPAD6,
+ IO_MOUSE,
+ IO_MENACER,
+ IO_JUSTIFIER,
+ IO_SEGA_MULTI,
+ IO_EA_MULTI_A,
+ IO_EA_MULTI_B,
+ IO_SEGA_PARALLEL,
+ IO_GENERIC,
+ IO_NONE
+};
typedef struct {
- uint32_t th_counter;
- uint32_t timeout_cycle;
- uint8_t output;
- uint8_t control;
- uint8_t input[3];
+ union {
+ struct {
+ uint32_t timeout_cycle;
+ uint16_t th_counter;
+ uint16_t gamepad_num;
+ } pad;
+ struct {
+ int data_fd;
+ int listen_fd;
+ } stream;
+ } device;
+ uint8_t output;
+ uint8_t control;
+ uint8_t input[3];
+ uint8_t device_type;
} io_port;
#define GAMEPAD_TH0 0
@@ -20,7 +45,19 @@ typedef struct {
#define GAMEPAD_EXTRA 2
#define GAMEPAD_NONE 0xF
-void set_keybindings();
+#define IO_TH0 0
+#define IO_TH1 1
+#define IO_STATE 2
+
+enum {
+ IO_WRITE_PENDING,
+ IO_WRITTEN,
+ IO_READ_PENDING,
+ IO_READ
+};
+
+void set_keybindings(io_port *ports);
+void setup_io_devices(tern_node * config, io_port * ports);
void io_adjust_cycles(io_port * pad, uint32_t current_cycle, uint32_t deduction);
void io_data_write(io_port * pad, uint8_t value, uint32_t current_cycle);
uint8_t io_data_read(io_port * pad, uint32_t current_cycle);
diff --git a/m68k_core.c b/m68k_core.c
new file mode 100644
index 0000000..fbd8923
--- /dev/null
+++ b/m68k_core.c
@@ -0,0 +1,924 @@
+/*
+ Copyright 2014 Michael Pavone
+ This file is part of BlastEm.
+ BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
+*/
+#include "m68k_core.h"
+#include "m68k_internal.h"
+#include "68kinst.h"
+#include "backend.h"
+#include "gen.h"
+#include <stdio.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+char disasm_buf[1024];
+
+int8_t native_reg(m68k_op_info * op, m68k_options * opts)
+{
+ if (op->addr_mode == MODE_REG) {
+ return opts->dregs[op->params.regs.pri];
+ }
+ if (op->addr_mode == MODE_AREG) {
+ return opts->aregs[op->params.regs.pri];
+ }
+ return -1;
+}
+
+size_t dreg_offset(uint8_t reg)
+{
+ return offsetof(m68k_context, dregs) + sizeof(uint32_t) * reg;
+}
+
+size_t areg_offset(uint8_t reg)
+{
+ return offsetof(m68k_context, aregs) + sizeof(uint32_t) * reg;
+}
+
+//must be called with an m68k_op_info that uses a register
+size_t reg_offset(m68k_op_info *op)
+{
+ return op->addr_mode == MODE_REG ? dreg_offset(op->params.regs.pri) : areg_offset(op->params.regs.pri);
+}
+
+void print_regs_exit(m68k_context * context)
+{
+ printf("XNZVC\n%d%d%d%d%d\n", context->flags[0], context->flags[1], context->flags[2], context->flags[3], context->flags[4]);
+ for (int i = 0; i < 8; i++) {
+ printf("d%d: %X\n", i, context->dregs[i]);
+ }
+ for (int i = 0; i < 8; i++) {
+ printf("a%d: %X\n", i, context->aregs[i]);
+ }
+ exit(0);
+}
+
+void m68k_read_size(m68k_options *opts, uint8_t size)
+{
+ switch (size)
+ {
+ case OPSIZE_BYTE:
+ call(&opts->gen.code, opts->read_8);
+ break;
+ case OPSIZE_WORD:
+ call(&opts->gen.code, opts->read_16);
+ break;
+ case OPSIZE_LONG:
+ call(&opts->gen.code, opts->read_32);
+ break;
+ }
+}
+
+void m68k_write_size(m68k_options *opts, uint8_t size)
+{
+ switch (size)
+ {
+ case OPSIZE_BYTE:
+ call(&opts->gen.code, opts->write_8);
+ break;
+ case OPSIZE_WORD:
+ call(&opts->gen.code, opts->write_16);
+ break;
+ case OPSIZE_LONG:
+ call(&opts->gen.code, opts->write_32_highfirst);
+ break;
+ }
+}
+
+void translate_m68k_lea_pea(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ int8_t dst_reg = inst->op == M68K_PEA ? opts->gen.scratch1 : native_reg(&(inst->dst), opts);
+ switch(inst->src.addr_mode)
+ {
+ case MODE_AREG_INDIRECT:
+ cycles(&opts->gen, BUS);
+ if (dst_reg >= 0) {
+ areg_to_native(opts, inst->src.params.regs.pri, dst_reg);
+ } else {
+ if (opts->aregs[inst->src.params.regs.pri] >= 0) {
+ native_to_areg(opts, opts->aregs[inst->src.params.regs.pri], inst->dst.params.regs.pri);
+ } else {
+ areg_to_native(opts, inst->src.params.regs.pri, opts->gen.scratch1);
+ native_to_areg(opts, opts->gen.scratch1, inst->dst.params.regs.pri);
+ }
+ }
+ break;
+ case MODE_AREG_DISPLACE:
+ cycles(&opts->gen, 8);
+ calc_areg_displace(opts, &inst->src, dst_reg >= 0 ? dst_reg : opts->gen.scratch1);
+ if (dst_reg < 0) {
+ native_to_areg(opts, opts->gen.scratch1, inst->dst.params.regs.pri);
+ }
+ break;
+ case MODE_AREG_INDEX_DISP8:
+ cycles(&opts->gen, 12);
+ if (dst_reg < 0 || inst->dst.params.regs.pri == inst->src.params.regs.pri || inst->dst.params.regs.pri == (inst->src.params.regs.sec >> 1 & 0x7)) {
+ dst_reg = opts->gen.scratch1;
+ }
+ calc_areg_index_disp8(opts, &inst->src, dst_reg);
+ if (dst_reg == opts->gen.scratch1 && inst->op != M68K_PEA) {
+ native_to_areg(opts, opts->gen.scratch1, inst->dst.params.regs.pri);
+ }
+ break;
+ case MODE_PC_DISPLACE:
+ cycles(&opts->gen, 8);
+ if (inst->op == M68K_PEA) {
+ ldi_native(opts, inst->src.params.regs.displacement + inst->address+2, dst_reg);
+ } else {
+ ldi_areg(opts, inst->src.params.regs.displacement + inst->address+2, inst->dst.params.regs.pri);
+ }
+ break;
+ case MODE_PC_INDEX_DISP8:
+ cycles(&opts->gen, BUS*3);
+ if (dst_reg < 0 || inst->dst.params.regs.pri == (inst->src.params.regs.sec >> 1 & 0x7)) {
+ dst_reg = opts->gen.scratch1;
+ }
+ ldi_native(opts, inst->address+2, dst_reg);
+ calc_index_disp8(opts, &inst->src, dst_reg);
+ if (dst_reg == opts->gen.scratch1 && inst->op != M68K_PEA) {
+ native_to_areg(opts, opts->gen.scratch1, inst->dst.params.regs.pri);
+ }
+ break;
+ case MODE_ABSOLUTE:
+ case MODE_ABSOLUTE_SHORT:
+ cycles(&opts->gen, (inst->src.addr_mode == MODE_ABSOLUTE) ? BUS * 3 : BUS * 2);
+ if (inst->op == M68K_PEA) {
+ ldi_native(opts, inst->src.params.immed, dst_reg);
+ } else {
+ ldi_areg(opts, inst->src.params.immed, inst->dst.params.regs.pri);
+ }
+ break;
+ default:
+ m68k_disasm(inst, disasm_buf);
+ printf("%X: %s\naddress mode %d not implemented (lea src)\n", inst->address, disasm_buf, inst->src.addr_mode);
+ exit(1);
+ }
+ if (inst->op == M68K_PEA) {
+ subi_areg(opts, 4, 7);
+ areg_to_native(opts, 7, opts->gen.scratch2);
+ call(code, opts->write_32_lowfirst);
+ }
+}
+
+void push_const(m68k_options *opts, int32_t value)
+{
+ ldi_native(opts, value, opts->gen.scratch1);
+ subi_areg(opts, 4, 7);
+ areg_to_native(opts, 7, opts->gen.scratch2);
+ call(&opts->gen.code, opts->write_32_highfirst);
+}
+
+void jump_m68k_abs(m68k_options * opts, uint32_t address)
+{
+ code_info *code = &opts->gen.code;
+ code_ptr dest_addr = get_native_address(opts->gen.native_code_map, address);
+ if (!dest_addr) {
+ opts->gen.deferred = defer_address(opts->gen.deferred, address, code->cur + 1);
+ //dummy address to be replaced later, make sure it generates a 4-byte displacement
+ dest_addr = code->cur + 256;
+ }
+ jmp(code, dest_addr);
+ //this used to call opts->native_addr for destinations in RAM, but that shouldn't be needed
+ //since instruction retranslation patches the original native instruction location
+}
+
+void translate_m68k_bsr(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ int32_t disp = inst->src.params.immed;
+ uint32_t after = inst->address + (inst->variant == VAR_BYTE ? 2 : 4);
+ //TODO: Add cycles in the right place relative to pushing the return address on the stack
+ cycles(&opts->gen, 10);
+ push_const(opts, after);
+ jump_m68k_abs(opts, inst->address + 2 + disp);
+}
+
+void translate_m68k_jmp_jsr(m68k_options * opts, m68kinst * inst)
+{
+ uint8_t is_jsr = inst->op == M68K_JSR;
+ code_info *code = &opts->gen.code;
+ code_ptr dest_addr;
+ uint8_t sec_reg;
+ uint32_t after;
+ uint32_t m68k_addr;
+ switch(inst->src.addr_mode)
+ {
+ case MODE_AREG_INDIRECT:
+ cycles(&opts->gen, BUS*2);
+ if (is_jsr) {
+ push_const(opts, inst->address+2);
+ }
+ areg_to_native(opts, inst->src.params.regs.pri, opts->gen.scratch1);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
+ break;
+ case MODE_AREG_DISPLACE:
+ cycles(&opts->gen, BUS*2);
+ if (is_jsr) {
+ push_const(opts, inst->address+4);
+ }
+ calc_areg_displace(opts, &inst->src, opts->gen.scratch1);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
+ break;
+ case MODE_AREG_INDEX_DISP8:
+ cycles(&opts->gen, BUS*3);//TODO: CHeck that this is correct
+ if (is_jsr) {
+ push_const(opts, inst->address+4);
+ }
+ calc_areg_index_disp8(opts, &inst->src, opts->gen.scratch1);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
+ break;
+ case MODE_PC_DISPLACE:
+ //TODO: Add cycles in the right place relative to pushing the return address on the stack
+ cycles(&opts->gen, 10);
+ if (is_jsr) {
+ push_const(opts, inst->address+4);
+ }
+ jump_m68k_abs(opts, inst->src.params.regs.displacement + inst->address + 2);
+ break;
+ case MODE_PC_INDEX_DISP8:
+ cycles(&opts->gen, BUS*3);//TODO: CHeck that this is correct
+ if (is_jsr) {
+ push_const(opts, inst->address+4);
+ }
+ ldi_native(opts, inst->address+2, opts->gen.scratch1);
+ calc_index_disp8(opts, &inst->src, opts->gen.scratch1);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
+ break;
+ case MODE_ABSOLUTE:
+ case MODE_ABSOLUTE_SHORT:
+ //TODO: Add cycles in the right place relative to pushing the return address on the stack
+ cycles(&opts->gen, inst->src.addr_mode == MODE_ABSOLUTE ? 12 : 10);
+ if (is_jsr) {
+ push_const(opts, inst->address + (inst->src.addr_mode == MODE_ABSOLUTE ? 6 : 4));
+ }
+ jump_m68k_abs(opts, inst->src.params.immed);
+ break;
+ default:
+ m68k_disasm(inst, disasm_buf);
+ printf("%s\naddress mode %d not yet supported (%s)\n", disasm_buf, inst->src.addr_mode, is_jsr ? "jsr" : "jmp");
+ exit(1);
+ }
+}
+
+void translate_m68k_unlk(m68k_options * opts, m68kinst * inst)
+{
+ cycles(&opts->gen, BUS);
+ areg_to_native(opts, inst->dst.params.regs.pri, opts->aregs[7]);
+ areg_to_native(opts, 7, opts->gen.scratch1);
+ call(&opts->gen.code, opts->read_32);
+ native_to_areg(opts, opts->gen.scratch1, inst->dst.params.regs.pri);
+ addi_areg(opts, 4, 7);
+}
+
+void translate_m68k_link(m68k_options * opts, m68kinst * inst)
+{
+ //compensate for displacement word
+ cycles(&opts->gen, BUS);
+ subi_areg(opts, 4, 7);
+ areg_to_native(opts, 7, opts->gen.scratch2);
+ areg_to_native(opts, inst->src.params.regs.pri, opts->gen.scratch1);
+ call(&opts->gen.code, opts->write_32_highfirst);
+ native_to_areg(opts, opts->aregs[7], inst->src.params.regs.pri);
+ addi_areg(opts, inst->dst.params.immed, 7);
+ //prefetch
+ cycles(&opts->gen, BUS);
+}
+
+void translate_m68k_rts(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ //TODO: Add cycles
+ areg_to_native(opts, 7, opts->gen.scratch1);
+ addi_areg(opts, 4, 7);
+ call(code, opts->read_32);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
+}
+
+void translate_m68k_rtr(m68k_options *opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ //Read saved CCR
+ areg_to_native(opts, 7, opts->gen.scratch1);
+ call(code, opts->read_16);
+ addi_areg(opts, 2, 7);
+ call(code, opts->set_ccr);
+ //Read saved PC
+ areg_to_native(opts, 7, opts->gen.scratch1);
+ call(code, opts->read_32);
+ addi_areg(opts, 4, 7);
+ //Get native address and jump to it
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
+}
+
+void translate_m68k_trap(m68k_options *opts, m68kinst *inst)
+{
+ code_info *code = &opts->gen.code;
+ ldi_native(opts, inst->src.params.immed + VECTOR_TRAP_0, opts->gen.scratch2);
+ ldi_native(opts, inst->address+2, opts->gen.scratch1);
+ jmp(code, opts->trap);
+}
+
+void translate_m68k_move_usp(m68k_options *opts, m68kinst *inst)
+{
+ cycles(&opts->gen, BUS);
+ int8_t reg;
+ if (inst->src.addr_mode == MODE_UNUSED) {
+ reg = native_reg(&inst->dst, opts);
+ if (reg < 0) {
+ reg = opts->gen.scratch1;
+ }
+ areg_to_native(opts, 8, reg);
+ if (reg == opts->gen.scratch1) {
+ native_to_areg(opts, opts->gen.scratch1, inst->dst.params.regs.pri);
+ }
+ } else {
+ reg = native_reg(&inst->src, opts);
+ if (reg < 0) {
+ reg = opts->gen.scratch1;
+ areg_to_native(opts, inst->src.params.regs.pri, reg);
+ }
+ native_to_areg(opts, reg, 8);
+ }
+}
+
+void translate_m68k_movem(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ int8_t bit,reg,sec_reg;
+ uint8_t early_cycles;
+ if(inst->src.addr_mode == MODE_REG) {
+ //reg to mem
+ early_cycles = 8;
+ int8_t dir;
+ switch (inst->dst.addr_mode)
+ {
+ case MODE_AREG_INDIRECT:
+ case MODE_AREG_PREDEC:
+ areg_to_native(opts, inst->dst.params.regs.pri, opts->gen.scratch2);
+ break;
+ case MODE_AREG_DISPLACE:
+ early_cycles += BUS;
+ calc_areg_displace(opts, &inst->dst, opts->gen.scratch2);
+ break;
+ case MODE_AREG_INDEX_DISP8:
+ early_cycles += 6;
+ calc_areg_index_disp8(opts, &inst->dst, opts->gen.scratch2);
+ break;
+ case MODE_PC_DISPLACE:
+ early_cycles += BUS;
+ ldi_native(opts, inst->dst.params.regs.displacement + inst->address+2, opts->gen.scratch2);
+ break;
+ case MODE_PC_INDEX_DISP8:
+ early_cycles += 6;
+ ldi_native(opts, inst->address+2, opts->gen.scratch2);
+ calc_index_disp8(opts, &inst->dst, opts->gen.scratch2);
+ case MODE_ABSOLUTE:
+ early_cycles += 4;
+ case MODE_ABSOLUTE_SHORT:
+ early_cycles += 4;
+ ldi_native(opts, inst->dst.params.immed, opts->gen.scratch2);
+ break;
+ default:
+ m68k_disasm(inst, disasm_buf);
+ printf("%X: %s\naddress mode %d not implemented (movem dst)\n", inst->address, disasm_buf, inst->dst.addr_mode);
+ exit(1);
+ }
+ if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
+ reg = 15;
+ dir = -1;
+ } else {
+ reg = 0;
+ dir = 1;
+ }
+ cycles(&opts->gen, early_cycles);
+ for(bit=0; reg < 16 && reg >= 0; reg += dir, bit++) {
+ if (inst->src.params.immed & (1 << bit)) {
+ if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
+ subi_native(opts, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch2);
+ }
+ push_native(opts, opts->gen.scratch2);
+ if (reg > 7) {
+ areg_to_native(opts, reg-8, opts->gen.scratch1);
+ } else {
+ dreg_to_native(opts, reg, opts->gen.scratch1);
+ }
+ if (inst->extra.size == OPSIZE_LONG) {
+ call(code, opts->write_32_lowfirst);
+ } else {
+ call(code, opts->write_16);
+ }
+ pop_native(opts, opts->gen.scratch2);
+ if (inst->dst.addr_mode != MODE_AREG_PREDEC) {
+ addi_native(opts, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch2);
+ }
+ }
+ }
+ if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
+ native_to_areg(opts, opts->gen.scratch2, inst->dst.params.regs.pri);
+ }
+ } else {
+ //mem to reg
+ early_cycles = 4;
+ switch (inst->src.addr_mode)
+ {
+ case MODE_AREG_INDIRECT:
+ case MODE_AREG_POSTINC:
+ areg_to_native(opts, inst->src.params.regs.pri, opts->gen.scratch1);
+ break;
+ case MODE_AREG_DISPLACE:
+ early_cycles += BUS;
+ reg = opts->gen.scratch2;
+ calc_areg_displace(opts, &inst->src, opts->gen.scratch1);
+ break;
+ case MODE_AREG_INDEX_DISP8:
+ early_cycles += 6;
+ calc_areg_index_disp8(opts, &inst->src, opts->gen.scratch1);
+ break;
+ case MODE_PC_DISPLACE:
+ early_cycles += BUS;
+ ldi_native(opts, inst->src.params.regs.displacement + inst->address+2, opts->gen.scratch1);
+ break;
+ case MODE_PC_INDEX_DISP8:
+ early_cycles += 6;
+ ldi_native(opts, inst->address+2, opts->gen.scratch1);
+ calc_index_disp8(opts, &inst->src, opts->gen.scratch1);
+ break;
+ case MODE_ABSOLUTE:
+ early_cycles += 4;
+ case MODE_ABSOLUTE_SHORT:
+ early_cycles += 4;
+ ldi_native(opts, inst->src.params.immed, opts->gen.scratch1);
+ break;
+ default:
+ m68k_disasm(inst, disasm_buf);
+ printf("%X: %s\naddress mode %d not implemented (movem src)\n", inst->address, disasm_buf, inst->src.addr_mode);
+ exit(1);
+ }
+ cycles(&opts->gen, early_cycles);
+ for(reg = 0; reg < 16; reg ++) {
+ if (inst->dst.params.immed & (1 << reg)) {
+ push_native(opts, opts->gen.scratch1);
+ if (inst->extra.size == OPSIZE_LONG) {
+ call(code, opts->read_32);
+ } else {
+ call(code, opts->read_16);
+ }
+ if (inst->extra.size == OPSIZE_WORD) {
+ sign_extend16_native(opts, opts->gen.scratch1);
+ }
+ if (reg > 7) {
+ native_to_areg(opts, opts->gen.scratch1, reg-8);
+ } else {
+ native_to_dreg(opts, opts->gen.scratch1, reg);
+ }
+ pop_native(opts, opts->gen.scratch1);
+ addi_native(opts, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, opts->gen.scratch1);
+ }
+ }
+ if (inst->src.addr_mode == MODE_AREG_POSTINC) {
+ native_to_areg(opts, opts->gen.scratch1, inst->src.params.regs.pri);
+ }
+ }
+ //prefetch
+ cycles(&opts->gen, 4);
+}
+
+void translate_m68k_nop(m68k_options *opts, m68kinst *inst)
+{
+ cycles(&opts->gen, BUS);
+}
+
+void swap_ssp_usp(m68k_options * opts)
+{
+ areg_to_native(opts, 7, opts->gen.scratch2);
+ areg_to_native(opts, 8, opts->aregs[7]);
+ native_to_areg(opts, opts->gen.scratch2, 8);
+}
+
+code_ptr get_native_address(native_map_slot * native_code_map, uint32_t address)
+{
+ address &= 0xFFFFFF;
+ address /= 2;
+ uint32_t chunk = address / NATIVE_CHUNK_SIZE;
+ if (!native_code_map[chunk].base) {
+ return NULL;
+ }
+ uint32_t offset = address % NATIVE_CHUNK_SIZE;
+ if (native_code_map[chunk].offsets[offset] == INVALID_OFFSET || native_code_map[chunk].offsets[offset] == EXTENSION_WORD) {
+ return NULL;
+ }
+ return native_code_map[chunk].base + native_code_map[chunk].offsets[offset];
+}
+
+code_ptr get_native_from_context(m68k_context * context, uint32_t address)
+{
+ return get_native_address(context->native_code_map, address);
+}
+
+uint32_t get_instruction_start(native_map_slot * native_code_map, uint32_t address)
+{
+ address &= 0xFFFFFF;
+ address /= 2;
+ uint32_t chunk = address / NATIVE_CHUNK_SIZE;
+ if (!native_code_map[chunk].base) {
+ return 0;
+ }
+ uint32_t offset = address % NATIVE_CHUNK_SIZE;
+ if (native_code_map[chunk].offsets[offset] == INVALID_OFFSET) {
+ return 0;
+ }
+ while (native_code_map[chunk].offsets[offset] == EXTENSION_WORD) {
+ --address;
+ chunk = address / NATIVE_CHUNK_SIZE;
+ offset = address % NATIVE_CHUNK_SIZE;
+ }
+ return address*2;
+}
+
+void map_native_address(m68k_context * context, uint32_t address, code_ptr native_addr, uint8_t size, uint8_t native_size)
+{
+ native_map_slot * native_code_map = context->native_code_map;
+ m68k_options * opts = context->options;
+ address &= 0xFFFFFF;
+ if (address > 0xE00000) {
+ context->ram_code_flags[(address & 0xC000) >> 14] |= 1 << ((address & 0x3800) >> 11);
+ if (((address & 0x3FFF) + size) & 0xC000) {
+ context->ram_code_flags[((address+size) & 0xC000) >> 14] |= 1 << (((address+size) & 0x3800) >> 11);
+ }
+ uint32_t slot = (address & 0xFFFF)/1024;
+ if (!opts->gen.ram_inst_sizes[slot]) {
+ opts->gen.ram_inst_sizes[slot] = malloc(sizeof(uint8_t) * 512);
+ }
+ opts->gen.ram_inst_sizes[slot][((address & 0xFFFF)/2)%512] = native_size;
+ }
+ address/= 2;
+ uint32_t chunk = address / NATIVE_CHUNK_SIZE;
+ if (!native_code_map[chunk].base) {
+ native_code_map[chunk].base = native_addr;
+ native_code_map[chunk].offsets = malloc(sizeof(int32_t) * NATIVE_CHUNK_SIZE);
+ memset(native_code_map[chunk].offsets, 0xFF, sizeof(int32_t) * NATIVE_CHUNK_SIZE);
+ }
+ uint32_t offset = address % NATIVE_CHUNK_SIZE;
+ native_code_map[chunk].offsets[offset] = native_addr-native_code_map[chunk].base;
+ for(address++,size-=2; size; address++,size-=2) {
+ chunk = address / NATIVE_CHUNK_SIZE;
+ offset = address % NATIVE_CHUNK_SIZE;
+ if (!native_code_map[chunk].base) {
+ native_code_map[chunk].base = native_addr;
+ native_code_map[chunk].offsets = malloc(sizeof(int32_t) * NATIVE_CHUNK_SIZE);
+ memset(native_code_map[chunk].offsets, 0xFF, sizeof(int32_t) * NATIVE_CHUNK_SIZE);
+ }
+ native_code_map[chunk].offsets[offset] = EXTENSION_WORD;
+ }
+}
+
+uint8_t get_native_inst_size(m68k_options * opts, uint32_t address)
+{
+ if (address < 0xE00000) {
+ return 0;
+ }
+ uint32_t slot = (address & 0xFFFF)/1024;
+ return opts->gen.ram_inst_sizes[slot][((address & 0xFFFF)/2)%512];
+}
+
+uint8_t m68k_is_terminal(m68kinst * inst)
+{
+ return inst->op == M68K_RTS || inst->op == M68K_RTE || inst->op == M68K_RTR || inst->op == M68K_JMP
+ || inst->op == M68K_TRAP || inst->op == M68K_ILLEGAL || inst->op == M68K_INVALID || inst->op == M68K_RESET
+ || (inst->op == M68K_BCC && inst->extra.cond == COND_TRUE);
+}
+
+void m68k_handle_deferred(m68k_context * context)
+{
+ m68k_options * opts = context->options;
+ process_deferred(&opts->gen.deferred, context, (native_addr_func)get_native_from_context);
+ if (opts->gen.deferred) {
+ translate_m68k_stream(opts->gen.deferred->address, context);
+ }
+}
+
+typedef enum {
+ RAW_FUNC = 1,
+ BINARY_ARITH,
+ UNARY_ARITH,
+ OP_FUNC
+} impl_type;
+
+typedef void (*raw_fun)(m68k_options * opts, m68kinst *inst);
+typedef void (*op_fun)(m68k_options * opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+
+typedef struct {
+ union {
+ raw_fun raw;
+ uint32_t flag_mask;
+ op_fun op;
+ } impl;
+ impl_type itype;
+} impl_info;
+
+#define RAW_IMPL(inst, fun) [inst] = { .impl = { .raw = fun }, .itype = RAW_FUNC }
+#define OP_IMPL(inst, fun) [inst] = { .impl = { .op = fun }, .itype = OP_FUNC }
+#define UNARY_IMPL(inst, mask) [inst] = { .impl = { .flag_mask = mask }, .itype = UNARY_ARITH }
+#define BINARY_IMPL(inst, mask) [inst] = { .impl = { .flag_mask = mask}, .itype = BINARY_ARITH }
+
+impl_info m68k_impls[] = {
+ //math
+ BINARY_IMPL(M68K_ADD, X|N|Z|V|C),
+ BINARY_IMPL(M68K_SUB, X|N|Z|V|C),
+ //z flag is special cased for ADDX/SUBX
+ BINARY_IMPL(M68K_ADDX, X|N|V|C),
+ BINARY_IMPL(M68K_SUBX, X|N|V|C),
+ OP_IMPL(M68K_ABCD, translate_m68k_abcd_sbcd),
+ OP_IMPL(M68K_SBCD, translate_m68k_abcd_sbcd),
+ BINARY_IMPL(M68K_AND, N|Z|V0|C0),
+ BINARY_IMPL(M68K_EOR, N|Z|V0|C0),
+ BINARY_IMPL(M68K_OR, N|Z|V0|C0),
+ RAW_IMPL(M68K_CMP, translate_m68k_cmp),
+ OP_IMPL(M68K_DIVS, translate_m68k_div),
+ OP_IMPL(M68K_DIVU, translate_m68k_div),
+ OP_IMPL(M68K_MULS, translate_m68k_mul),
+ OP_IMPL(M68K_MULU, translate_m68k_mul),
+ RAW_IMPL(M68K_EXT, translate_m68k_ext),
+ UNARY_IMPL(M68K_NEG, X|N|Z|V|C),
+ OP_IMPL(M68K_NEGX, translate_m68k_negx),
+ UNARY_IMPL(M68K_NOT, N|Z|V|C),
+ UNARY_IMPL(M68K_TST, N|Z|V0|C0),
+
+ //shift/rotate
+ OP_IMPL(M68K_ASL, translate_m68k_sl),
+ OP_IMPL(M68K_LSL, translate_m68k_sl),
+ OP_IMPL(M68K_ASR, translate_m68k_asr),
+ OP_IMPL(M68K_LSR, translate_m68k_lsr),
+ OP_IMPL(M68K_ROL, translate_m68k_rot),
+ OP_IMPL(M68K_ROR, translate_m68k_rot),
+ OP_IMPL(M68K_ROXL, translate_m68k_rot),
+ OP_IMPL(M68K_ROXR, translate_m68k_rot),
+ UNARY_IMPL(M68K_SWAP, N|Z|V0|C0),
+
+ //bit
+ OP_IMPL(M68K_BCHG, translate_m68k_bit),
+ OP_IMPL(M68K_BCLR, translate_m68k_bit),
+ OP_IMPL(M68K_BSET, translate_m68k_bit),
+ OP_IMPL(M68K_BTST, translate_m68k_bit),
+
+ //data movement
+ RAW_IMPL(M68K_MOVE, translate_m68k_move),
+ RAW_IMPL(M68K_MOVEM, translate_m68k_movem),
+ RAW_IMPL(M68K_MOVEP, translate_m68k_movep),
+ RAW_IMPL(M68K_MOVE_USP, translate_m68k_move_usp),
+ RAW_IMPL(M68K_LEA, translate_m68k_lea_pea),
+ RAW_IMPL(M68K_PEA, translate_m68k_lea_pea),
+ RAW_IMPL(M68K_CLR, translate_m68k_clr),
+ OP_IMPL(M68K_EXG, translate_m68k_exg),
+ RAW_IMPL(M68K_SCC, translate_m68k_scc),
+
+ //function calls and branches
+ RAW_IMPL(M68K_BCC, translate_m68k_bcc),
+ RAW_IMPL(M68K_BSR, translate_m68k_bsr),
+ RAW_IMPL(M68K_DBCC, translate_m68k_dbcc),
+ RAW_IMPL(M68K_JMP, translate_m68k_jmp_jsr),
+ RAW_IMPL(M68K_JSR, translate_m68k_jmp_jsr),
+ RAW_IMPL(M68K_RTS, translate_m68k_rts),
+ RAW_IMPL(M68K_RTE, translate_m68k_rte),
+ RAW_IMPL(M68K_RTR, translate_m68k_rtr),
+ RAW_IMPL(M68K_LINK, translate_m68k_link),
+ RAW_IMPL(M68K_UNLK, translate_m68k_unlk),
+
+ //SR/CCR stuff
+ RAW_IMPL(M68K_ANDI_CCR, translate_m68k_andi_ori_ccr_sr),
+ RAW_IMPL(M68K_ANDI_SR, translate_m68k_andi_ori_ccr_sr),
+ RAW_IMPL(M68K_EORI_CCR, translate_m68k_eori_ccr_sr),
+ RAW_IMPL(M68K_EORI_SR, translate_m68k_eori_ccr_sr),
+ RAW_IMPL(M68K_ORI_CCR, translate_m68k_andi_ori_ccr_sr),
+ RAW_IMPL(M68K_ORI_SR, translate_m68k_andi_ori_ccr_sr),
+ OP_IMPL(M68K_MOVE_CCR, translate_m68k_move_ccr_sr),
+ OP_IMPL(M68K_MOVE_SR, translate_m68k_move_ccr_sr),
+ OP_IMPL(M68K_MOVE_FROM_SR, translate_m68k_move_from_sr),
+ RAW_IMPL(M68K_STOP, translate_m68k_stop),
+
+ //traps
+ OP_IMPL(M68K_CHK, translate_m68k_chk),
+ RAW_IMPL(M68K_TRAP, translate_m68k_trap),
+ RAW_IMPL(M68K_ILLEGAL, translate_m68k_illegal),
+ RAW_IMPL(M68K_INVALID, translate_m68k_invalid),
+
+ //misc
+ RAW_IMPL(M68K_NOP, translate_m68k_nop),
+ RAW_IMPL(M68K_RESET, translate_m68k_reset),
+
+ //currently unimplemented
+ //M68K_NBCD
+ //M68K_TAS
+ //M68K_TRAPV
+};
+
+void translate_m68k(m68k_options * opts, m68kinst * inst)
+{
+ check_cycles_int(&opts->gen, inst->address);
+ impl_info * info = m68k_impls + inst->op;
+ if (info->itype == RAW_FUNC) {
+ info->impl.raw(opts, inst);
+ return;
+ }
+
+ host_ea src_op, dst_op;
+ if (inst->src.addr_mode != MODE_UNUSED) {
+ translate_m68k_op(inst, &src_op, opts, 0);
+ }
+ if (inst->dst.addr_mode != MODE_UNUSED) {
+ translate_m68k_op(inst, &dst_op, opts, 1);
+ }
+ if (info->itype == OP_FUNC) {
+ info->impl.op(opts, inst, &src_op, &dst_op);
+ } else if (info->itype == BINARY_ARITH) {
+ translate_m68k_arith(opts, inst, info->impl.flag_mask, &src_op, &dst_op);
+ } else if (info->itype == UNARY_ARITH) {
+ translate_m68k_unary(opts, inst, info->impl.flag_mask, inst->dst.addr_mode != MODE_UNUSED ? &dst_op : &src_op);
+ } else {
+ m68k_disasm(inst, disasm_buf);
+ printf("%X: %s\ninstruction %d not yet implemented\n", inst->address, disasm_buf, inst->op);
+ exit(1);
+ }
+}
+
+void translate_m68k_stream(uint32_t address, m68k_context * context)
+{
+ m68kinst instbuf;
+ m68k_options * opts = context->options;
+ code_info *code = &opts->gen.code;
+ if(get_native_address(opts->gen.native_code_map, address)) {
+ return;
+ }
+ uint16_t *encoded, *next;
+ do {
+ if (opts->address_log) {
+ fprintf(opts->address_log, "%X\n", address);
+ fflush(opts->address_log);
+ }
+ do {
+ encoded = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen);
+ if (!encoded) {
+ map_native_address(context, address, code->cur, 2, 1);
+ translate_out_of_bounds(code);
+ break;
+ }
+ code_ptr existing = get_native_address(opts->gen.native_code_map, address);
+ if (existing) {
+ jmp(code, existing);
+ break;
+ }
+ next = m68k_decode(encoded, &instbuf, address);
+ if (instbuf.op == M68K_INVALID) {
+ instbuf.src.params.immed = *encoded;
+ }
+ uint16_t m68k_size = (next-encoded)*2;
+ address += m68k_size;
+ //char disbuf[1024];
+ //m68k_disasm(&instbuf, disbuf);
+ //printf("%X: %s\n", instbuf.address, disbuf);
+
+ //make sure the beginning of the code for an instruction is contiguous
+ check_code_prologue(code);
+ code_ptr start = code->cur;
+ translate_m68k(opts, &instbuf);
+ code_ptr after = code->cur;
+ map_native_address(context, instbuf.address, start, m68k_size, after-start);
+ } while(!m68k_is_terminal(&instbuf));
+ process_deferred(&opts->gen.deferred, context, (native_addr_func)get_native_from_context);
+ if (opts->gen.deferred) {
+ address = opts->gen.deferred->address;
+ }
+ } while(opts->gen.deferred);
+}
+
+void * m68k_retranslate_inst(uint32_t address, m68k_context * context)
+{
+ m68k_options * opts = context->options;
+ code_info *code = &opts->gen.code;
+ uint8_t orig_size = get_native_inst_size(opts, address);
+ code_ptr orig_start = get_native_address(context->native_code_map, address);
+ uint32_t orig = address;
+ code_info orig_code;
+ orig_code.cur = orig_start;
+ orig_code.last = orig_start + orig_size + 5;
+ uint16_t *after, *inst = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen);
+ m68kinst instbuf;
+ after = m68k_decode(inst, &instbuf, orig);
+ if (orig_size != MAX_NATIVE_SIZE) {
+ deferred_addr * orig_deferred = opts->gen.deferred;
+
+ //make sure we have enough code space for the max size instruction
+ check_alloc_code(code, MAX_NATIVE_SIZE);
+ code_ptr native_start = code->cur;
+ translate_m68k(opts, &instbuf);
+ code_ptr native_end = code->cur;
+ /*uint8_t is_terminal = m68k_is_terminal(&instbuf);
+ if ((native_end - native_start) <= orig_size) {
+ code_ptr native_next;
+ if (!is_terminal) {
+ native_next = get_native_address(context->native_code_map, orig + (after-inst)*2);
+ }
+ if (is_terminal || (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - native_start)) > 5))) {
+ printf("Using original location: %p\n", orig_code.cur);
+ remove_deferred_until(&opts->gen.deferred, orig_deferred);
+ code_info tmp;
+ tmp.cur = code->cur;
+ tmp.last = code->last;
+ code->cur = orig_code.cur;
+ code->last = orig_code.last;
+ translate_m68k(opts, &instbuf);
+ native_end = orig_code.cur = code->cur;
+ code->cur = tmp.cur;
+ code->last = tmp.last;
+ if (!is_terminal) {
+ nop_fill_or_jmp_next(&orig_code, orig_start + orig_size, native_next);
+ }
+ m68k_handle_deferred(context);
+ return orig_start;
+ }
+ }*/
+
+ map_native_address(context, instbuf.address, native_start, (after-inst)*2, MAX_NATIVE_SIZE);
+
+ jmp(&orig_code, native_start);
+ if (!m68k_is_terminal(&instbuf)) {
+ code_ptr native_end = code->cur;
+ code->cur = native_start + MAX_NATIVE_SIZE;
+ code_ptr rest = get_native_address_trans(context, orig + (after-inst)*2);
+ code_ptr tmp = code->cur;
+ code->cur = native_end;
+ jmp(code, rest);
+ code->cur = tmp;
+ } else {
+ code->cur = native_start + MAX_NATIVE_SIZE;
+ }
+ m68k_handle_deferred(context);
+ return native_start;
+ } else {
+ code_info tmp = *code;
+ *code = orig_code;
+ translate_m68k(opts, &instbuf);
+ orig_code = *code;
+ *code = tmp;
+ if (!m68k_is_terminal(&instbuf)) {
+ jmp(&orig_code, get_native_address_trans(context, orig + (after-inst)*2));
+ }
+ m68k_handle_deferred(context);
+ return orig_start;
+ }
+}
+
+code_ptr get_native_address_trans(m68k_context * context, uint32_t address)
+{
+ address &= 0xFFFFFF;
+ code_ptr ret = get_native_address(context->native_code_map, address);
+ if (!ret) {
+ translate_m68k_stream(address, context);
+ ret = get_native_address(context->native_code_map, address);
+ }
+ return ret;
+}
+
+void remove_breakpoint(m68k_context * context, uint32_t address)
+{
+ code_ptr native = get_native_address(context->native_code_map, address);
+ code_info tmp = context->options->gen.code;
+ context->options->gen.code.cur = native;
+ context->options->gen.code.last = native + 16;
+ check_cycles_int(&context->options->gen, address);
+ context->options->gen.code = tmp;
+}
+
+void start_68k_context(m68k_context * context, uint32_t address)
+{
+ code_ptr addr = get_native_address_trans(context, address);
+ m68k_options * options = context->options;
+ options->start_context(addr, context);
+}
+
+void m68k_reset(m68k_context * context)
+{
+ //TODO: Actually execute the M68K reset vector rather than simulating some of its behavior
+ uint16_t *reset_vec = get_native_pointer(0, (void **)context->mem_pointers, &context->options->gen);
+ context->aregs[7] = reset_vec[0] << 16 | reset_vec[1];
+ uint32_t address = reset_vec[2] << 16 | reset_vec[3];
+ start_68k_context(context, address);
+}
+
+
+void init_68k_context(m68k_context * context, native_map_slot * native_code_map, void * opts)
+{
+ memset(context, 0, sizeof(m68k_context));
+ context->native_code_map = native_code_map;
+ context->options = opts;
+ context->int_cycle = 0xFFFFFFFF;
+ context->status = 0x27;
+}
diff --git a/m68k_to_x86.h b/m68k_core.h
index c4fbf22..cf43864 100644
--- a/m68k_to_x86.h
+++ b/m68k_core.h
@@ -3,8 +3,8 @@
This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
-#ifndef M68K_TO_X86_H_
-#define M68K_TO_X86_H_
+#ifndef M68K_CORE_H_
+#define M68K_CORE_H_
#include <stdint.h>
#include <stdio.h>
#include "backend.h"
@@ -41,7 +41,7 @@ typedef struct {
code_ptr get_sr;
code_ptr set_sr;
code_ptr set_ccr;
-} x86_68k_options;
+} m68k_options;
typedef struct {
uint8_t flags[5];
@@ -59,20 +59,21 @@ typedef struct {
uint16_t reserved;
native_map_slot *native_code_map;
- void *options;
+ m68k_options *options;
uint8_t ram_code_flags[32/8];
void *system;
} m68k_context;
-uint8_t * translate_m68k(uint8_t * dst, struct m68kinst * inst, x86_68k_options * opts);
-uint8_t * translate_m68k_stream(uint32_t address, m68k_context * context);
+void translate_m68k(m68k_options * opts, struct m68kinst * inst);
+void translate_m68k_stream(uint32_t address, m68k_context * context);
void start_68k_context(m68k_context * context, uint32_t address);
-void init_x86_68k_opts(x86_68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks);
+void init_m68k_opts(m68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks, uint32_t clock_divider);
void init_68k_context(m68k_context * context, native_map_slot * native_code_map, void * opts);
void m68k_reset(m68k_context * context);
void insert_breakpoint(m68k_context * context, uint32_t address, uint8_t * bp_handler);
void remove_breakpoint(m68k_context * context, uint32_t address);
m68k_context * m68k_handle_code_write(uint32_t address, m68k_context * context);
+uint32_t get_instruction_start(native_map_slot * native_code_map, uint32_t address);
-#endif //M68K_TO_X86_H_
+#endif //M68K_CORE_H_
diff --git a/m68k_core_x86.c b/m68k_core_x86.c
new file mode 100644
index 0000000..f8a323d
--- /dev/null
+++ b/m68k_core_x86.c
@@ -0,0 +1,2519 @@
+/*
+ Copyright 2013 Michael Pavone
+ This file is part of BlastEm.
+ BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
+*/
+#include "gen_x86.h"
+#include "m68k_core.h"
+#include "m68k_internal.h"
+#include "68kinst.h"
+#include "mem.h"
+#include "backend.h"
+#include <stdio.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+enum {
+ FLAG_X,
+ FLAG_N,
+ FLAG_Z,
+ FLAG_V,
+ FLAG_C
+};
+
+void set_flag(m68k_options * opts, uint8_t val, uint8_t flag)
+{
+ if (opts->flag_regs[flag] >= 0) {
+ mov_ir(&opts->gen.code, val, opts->flag_regs[flag], SZ_B);
+ } else {
+ int8_t offset = offsetof(m68k_context, flags) + flag;
+ if (offset) {
+ mov_irdisp(&opts->gen.code, val, opts->gen.context_reg, offset, SZ_B);
+ } else {
+ mov_irind(&opts->gen.code, val, opts->gen.context_reg, SZ_B);
+ }
+ }
+}
+
+void set_flag_cond(m68k_options *opts, uint8_t cond, uint8_t flag)
+{
+ if (opts->flag_regs[flag] >= 0) {
+ setcc_r(&opts->gen.code, cond, opts->flag_regs[flag]);
+ } else {
+ int8_t offset = offsetof(m68k_context, flags) + flag;
+ if (offset) {
+ setcc_rdisp(&opts->gen.code, cond, opts->gen.context_reg, offset);
+ } else {
+ setcc_rind(&opts->gen.code, cond, opts->gen.context_reg);
+ }
+ }
+}
+
+void check_flag(m68k_options *opts, uint8_t flag)
+{
+ if (opts->flag_regs[flag] >= 0) {
+ cmp_ir(&opts->gen.code, 0, opts->flag_regs[flag], SZ_B);
+ } else {
+ cmp_irdisp(&opts->gen.code, 0, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, SZ_B);
+ }
+}
+
+void flag_to_reg(m68k_options *opts, uint8_t flag, uint8_t reg)
+{
+ if (opts->flag_regs[flag] >= 0) {
+ mov_rr(&opts->gen.code, opts->flag_regs[flag], reg, SZ_B);
+ } else {
+ int8_t offset = offsetof(m68k_context, flags) + flag;
+ if (offset) {
+ mov_rdispr(&opts->gen.code, opts->gen.context_reg, offset, reg, SZ_B);
+ } else {
+ mov_rindr(&opts->gen.code, opts->gen.context_reg, reg, SZ_B);
+ }
+ }
+}
+
+void reg_to_flag(m68k_options *opts, uint8_t reg, uint8_t flag)
+{
+ if (opts->flag_regs[flag] >= 0) {
+ mov_rr(&opts->gen.code, reg, opts->flag_regs[flag], SZ_B);
+ } else {
+ int8_t offset = offsetof(m68k_context, flags) + flag;
+ if (offset) {
+ mov_rrdisp(&opts->gen.code, reg, opts->gen.context_reg, offset, SZ_B);
+ } else {
+ mov_rrind(&opts->gen.code, reg, opts->gen.context_reg, SZ_B);
+ }
+ }
+}
+
+void flag_to_flag(m68k_options *opts, uint8_t flag1, uint8_t flag2)
+{
+ code_info *code = &opts->gen.code;
+ if (opts->flag_regs[flag1] >= 0 && opts->flag_regs[flag2] >= 0) {
+ mov_rr(code, opts->flag_regs[flag1], opts->flag_regs[flag2], SZ_B);
+ } else if(opts->flag_regs[flag1] >= 0) {
+ mov_rrdisp(code, opts->flag_regs[flag1], opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
+ } else if (opts->flag_regs[flag2] >= 0) {
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag1, opts->flag_regs[flag2], SZ_B);
+ } else {
+ push_r(code, opts->gen.scratch1);
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag1, opts->gen.scratch1, SZ_B);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
+ pop_r(code, opts->gen.scratch1);
+ }
+}
+
+void update_flags(m68k_options *opts, uint32_t update_mask)
+{
+ uint8_t native_flags[] = {0, CC_S, CC_Z, CC_O, CC_C};
+ for (int8_t flag = FLAG_C; flag >= FLAG_X; --flag)
+ {
+ if (update_mask & X0 << (flag*3)) {
+ set_flag(opts, 0, flag);
+ } else if(update_mask & X1 << (flag*3)) {
+ set_flag(opts, 1, flag);
+ } else if(update_mask & X << (flag*3)) {
+ if (flag == FLAG_X) {
+ if (opts->flag_regs[FLAG_C] >= 0 || !(update_mask & (C0|C1|C))) {
+ flag_to_flag(opts, FLAG_C, FLAG_X);
+ } else if(update_mask & C0) {
+ set_flag(opts, 0, flag);
+ } else if(update_mask & C1) {
+ set_flag(opts, 1, flag);
+ } else {
+ set_flag_cond(opts, CC_C, flag);
+ }
+ } else {
+ set_flag_cond(opts, native_flags[flag], flag);
+ }
+ }
+ }
+}
+
+void flag_to_carry(m68k_options * opts, uint8_t flag)
+{
+ if (opts->flag_regs[flag] >= 0) {
+ bt_ir(&opts->gen.code, 0, opts->flag_regs[flag], SZ_B);
+ } else {
+ bt_irdisp(&opts->gen.code, 0, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, SZ_B);
+ }
+}
+
+void or_flag_to_reg(m68k_options *opts, uint8_t flag, uint8_t reg)
+{
+ if (opts->flag_regs[flag] >= 0) {
+ or_rr(&opts->gen.code, opts->flag_regs[flag], reg, SZ_B);
+ } else {
+ or_rdispr(&opts->gen.code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, reg, SZ_B);
+ }
+}
+
+void xor_flag_to_reg(m68k_options *opts, uint8_t flag, uint8_t reg)
+{
+ if (opts->flag_regs[flag] >= 0) {
+ xor_rr(&opts->gen.code, opts->flag_regs[flag], reg, SZ_B);
+ } else {
+ xor_rdispr(&opts->gen.code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, reg, SZ_B);
+ }
+}
+
+void xor_flag(m68k_options *opts, uint8_t val, uint8_t flag)
+{
+ if (opts->flag_regs[flag] >= 0) {
+ xor_ir(&opts->gen.code, val, opts->flag_regs[flag], SZ_B);
+ } else {
+ xor_irdisp(&opts->gen.code, val, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, SZ_B);
+ }
+}
+
+void cmp_flags(m68k_options *opts, uint8_t flag1, uint8_t flag2)
+{
+ code_info *code = &opts->gen.code;
+ if (opts->flag_regs[flag1] >= 0 && opts->flag_regs[flag2] >= 0) {
+ cmp_rr(code, opts->flag_regs[flag1], opts->flag_regs[flag2], SZ_B);
+ } else if(opts->flag_regs[flag1] >= 0 || opts->flag_regs[flag2] >= 0) {
+ if (opts->flag_regs[flag2] >= 0) {
+ uint8_t tmp = flag1;
+ flag1 = flag2;
+ flag2 = tmp;
+ }
+ cmp_rrdisp(code, opts->flag_regs[flag1], opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
+ } else {
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag1, opts->gen.scratch1, SZ_B);
+ cmp_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, flags) + flag2, SZ_B);
+ }
+}
+
+void areg_to_native(m68k_options *opts, uint8_t reg, uint8_t native_reg)
+{
+ if (opts->aregs[reg] >= 0) {
+ mov_rr(&opts->gen.code, opts->aregs[reg], native_reg, SZ_D);
+ } else {
+ mov_rdispr(&opts->gen.code, opts->gen.context_reg, areg_offset(reg), native_reg, SZ_D);
+ }
+}
+
+void dreg_to_native(m68k_options *opts, uint8_t reg, uint8_t native_reg)
+{
+ if (opts->dregs[reg] >= 0) {
+ mov_rr(&opts->gen.code, opts->dregs[reg], native_reg, SZ_D);
+ } else {
+ mov_rdispr(&opts->gen.code, opts->gen.context_reg, dreg_offset(reg), native_reg, SZ_D);
+ }
+}
+
+void areg_to_native_sx(m68k_options *opts, uint8_t reg, uint8_t native_reg)
+{
+ if (opts->aregs[reg] >= 0) {
+ movsx_rr(&opts->gen.code, opts->aregs[reg], native_reg, SZ_W, SZ_D);
+ } else {
+ movsx_rdispr(&opts->gen.code, opts->gen.context_reg, areg_offset(reg), native_reg, SZ_W, SZ_D);
+ }
+}
+
+void dreg_to_native_sx(m68k_options *opts, uint8_t reg, uint8_t native_reg)
+{
+ if (opts->dregs[reg] >= 0) {
+ movsx_rr(&opts->gen.code, opts->dregs[reg], native_reg, SZ_W, SZ_D);
+ } else {
+ movsx_rdispr(&opts->gen.code, opts->gen.context_reg, dreg_offset(reg), native_reg, SZ_W, SZ_D);
+ }
+ }
+
+void native_to_areg(m68k_options *opts, uint8_t native_reg, uint8_t reg)
+ {
+ if (opts->aregs[reg] >= 0) {
+ mov_rr(&opts->gen.code, native_reg, opts->aregs[reg], SZ_D);
+ } else {
+ mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, areg_offset(reg), SZ_D);
+ }
+ }
+
+void native_to_dreg(m68k_options *opts, uint8_t native_reg, uint8_t reg)
+{
+ if (opts->dregs[reg] >= 0) {
+ mov_rr(&opts->gen.code, native_reg, opts->dregs[reg], SZ_D);
+ } else {
+ mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, dreg_offset(reg), SZ_D);
+ }
+}
+
+void ldi_areg(m68k_options *opts, int32_t value, uint8_t reg)
+{
+ if (opts->aregs[reg] >= 0) {
+ mov_ir(&opts->gen.code, value, opts->aregs[reg], SZ_D);
+ } else {
+ mov_irdisp(&opts->gen.code, value, opts->gen.context_reg, areg_offset(reg), SZ_D);
+ }
+}
+
+void ldi_native(m68k_options *opts, int32_t value, uint8_t reg)
+ {
+ mov_ir(&opts->gen.code, value, reg, SZ_D);
+ }
+
+void addi_native(m68k_options *opts, int32_t value, uint8_t reg)
+{
+ add_ir(&opts->gen.code, value, reg, SZ_D);
+ }
+
+void subi_native(m68k_options *opts, int32_t value, uint8_t reg)
+{
+ sub_ir(&opts->gen.code, value, reg, SZ_D);
+ }
+
+void push_native(m68k_options *opts, uint8_t reg)
+{
+ push_r(&opts->gen.code, reg);
+ }
+
+void pop_native(m68k_options *opts, uint8_t reg)
+ {
+ pop_r(&opts->gen.code, reg);
+ }
+
+void sign_extend16_native(m68k_options *opts, uint8_t reg)
+{
+ movsx_rr(&opts->gen.code, reg, reg, SZ_W, SZ_D);
+ }
+
+void addi_areg(m68k_options *opts, int32_t val, uint8_t reg)
+{
+ if (opts->aregs[reg] >= 0) {
+ add_ir(&opts->gen.code, val, opts->aregs[reg], SZ_D);
+ } else {
+ add_irdisp(&opts->gen.code, val, opts->gen.context_reg, areg_offset(reg), SZ_D);
+ }
+}
+
+void subi_areg(m68k_options *opts, int32_t val, uint8_t reg)
+{
+ if (opts->aregs[reg] >= 0) {
+ sub_ir(&opts->gen.code, val, opts->aregs[reg], SZ_D);
+ } else {
+ sub_irdisp(&opts->gen.code, val, opts->gen.context_reg, areg_offset(reg), SZ_D);
+ }
+ }
+
+void add_areg_native(m68k_options *opts, uint8_t reg, uint8_t native_reg)
+{
+ if (opts->aregs[reg] >= 0) {
+ add_rr(&opts->gen.code, opts->aregs[reg], native_reg, SZ_D);
+ } else {
+ add_rdispr(&opts->gen.code, opts->gen.context_reg, areg_offset(reg), native_reg, SZ_D);
+ }
+}
+
+void add_dreg_native(m68k_options *opts, uint8_t reg, uint8_t native_reg)
+{
+ if (opts->dregs[reg] >= 0) {
+ add_rr(&opts->gen.code, opts->dregs[reg], native_reg, SZ_D);
+ } else {
+ add_rdispr(&opts->gen.code, opts->gen.context_reg, dreg_offset(reg), native_reg, SZ_D);
+ }
+ }
+
+void calc_areg_displace(m68k_options *opts, m68k_op_info *op, uint8_t native_reg)
+ {
+ areg_to_native(opts, op->params.regs.pri, native_reg);
+ add_ir(&opts->gen.code, op->params.regs.displacement, native_reg, SZ_D);
+ }
+
+void calc_index_disp8(m68k_options *opts, m68k_op_info *op, uint8_t native_reg)
+ {
+ uint8_t sec_reg = (op->params.regs.sec >> 1) & 0x7;
+ if (op->params.regs.sec & 1) {
+ if (op->params.regs.sec & 0x10) {
+ add_areg_native(opts, sec_reg, native_reg);
+ } else {
+ add_dreg_native(opts, sec_reg, native_reg);
+ }
+ } else {
+ uint8_t other_reg = native_reg == opts->gen.scratch1 ? opts->gen.scratch2 : opts->gen.scratch1;
+ if (op->params.regs.sec & 0x10) {
+ areg_to_native_sx(opts, sec_reg, other_reg);
+ } else {
+ dreg_to_native_sx(opts, sec_reg, other_reg);
+ }
+ add_rr(&opts->gen.code, other_reg, native_reg, SZ_D);
+ }
+ if (op->params.regs.displacement) {
+ add_ir(&opts->gen.code, op->params.regs.displacement, native_reg, SZ_D);
+ }
+ }
+
+void calc_areg_index_disp8(m68k_options *opts, m68k_op_info *op, uint8_t native_reg)
+ {
+ areg_to_native(opts, op->params.regs.pri, native_reg);
+ calc_index_disp8(opts, op, native_reg);
+}
+
+void translate_m68k_op(m68kinst * inst, host_ea * ea, m68k_options * opts, uint8_t dst)
+{
+ code_info *code = &opts->gen.code;
+ m68k_op_info *op = dst ? &inst->dst : &inst->src;
+ int8_t reg = native_reg(op, opts);
+ uint8_t sec_reg;
+ int32_t dec_amount, inc_amount;
+ if (reg >= 0) {
+ ea->mode = MODE_REG_DIRECT;
+ if (!dst && inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
+ movsx_rr(code, reg, opts->gen.scratch1, SZ_W, SZ_D);
+ ea->base = opts->gen.scratch1;
+ } else {
+ ea->base = reg;
+ }
+ return;
+ }
+ switch (op->addr_mode)
+ {
+ case MODE_REG:
+ case MODE_AREG:
+ //We only get one memory parameter, so if the dst operand is a register in memory,
+ //we need to copy this to a temp register first if we're translating the src operand
+ if (dst || native_reg(&(inst->dst), opts) >= 0 || inst->dst.addr_mode == MODE_UNUSED || !(inst->dst.addr_mode == MODE_REG || inst->dst.addr_mode == MODE_AREG)
+ || inst->op == M68K_EXG) {
+
+ ea->mode = MODE_REG_DISPLACE8;
+ ea->base = opts->gen.context_reg;
+ ea->disp = reg_offset(op);
+ } else {
+ if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
+ movsx_rdispr(code, opts->gen.context_reg, reg_offset(op), opts->gen.scratch1, SZ_W, SZ_D);
+ } else {
+ mov_rdispr(code, opts->gen.context_reg, reg_offset(op), opts->gen.scratch1, inst->extra.size);
+ }
+ ea->mode = MODE_REG_DIRECT;
+ ea->base = opts->gen.scratch1;
+ //we're explicitly handling the areg dest here, so we exit immediately
+ return;
+ }
+ break;
+ case MODE_AREG_PREDEC:
+ if (dst && inst->src.addr_mode == MODE_AREG_PREDEC) {
+ push_r(code, opts->gen.scratch1);
+ }
+ dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (op->params.regs.pri == 7 ? 2 :1));
+ if (!dst) {
+ cycles(&opts->gen, PREDEC_PENALTY);
+ }
+ subi_areg(opts, dec_amount, op->params.regs.pri);
+ case MODE_AREG_INDIRECT:
+ case MODE_AREG_POSTINC:
+ areg_to_native(opts, op->params.regs.pri, opts->gen.scratch1);
+ m68k_read_size(opts, inst->extra.size);
+
+ if (dst) {
+ if (inst->src.addr_mode == MODE_AREG_PREDEC) {
+ //restore src operand to opts->gen.scratch2
+ pop_r(code, opts->gen.scratch2);
+ } else {
+ //save reg value in opts->gen.scratch2 so we can use it to save the result in memory later
+ areg_to_native(opts, op->params.regs.pri, opts->gen.scratch2);
+ }
+ }
+
+ if (op->addr_mode == MODE_AREG_POSTINC) {
+ inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (op->params.regs.pri == 7 ? 2 : 1));
+ addi_areg(opts, inc_amount, op->params.regs.pri);
+ }
+ ea->mode = MODE_REG_DIRECT;
+ ea->base = (!dst && inst->dst.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) ? opts->gen.scratch2 : opts->gen.scratch1;
+ break;
+ case MODE_AREG_DISPLACE:
+ cycles(&opts->gen, BUS);
+ calc_areg_displace(opts, op, opts->gen.scratch1);
+ if (dst) {
+ push_r(code, opts->gen.scratch1);
+ }
+ m68k_read_size(opts, inst->extra.size);
+ if (dst) {
+ pop_r(code, opts->gen.scratch2);
+ }
+
+ ea->mode = MODE_REG_DIRECT;
+ ea->base = opts->gen.scratch1;
+ break;
+ case MODE_AREG_INDEX_DISP8:
+ cycles(&opts->gen, 6);
+ calc_areg_index_disp8(opts, op, opts->gen.scratch1);
+ if (dst) {
+ push_r(code, opts->gen.scratch1);
+ }
+ m68k_read_size(opts, inst->extra.size);
+ if (dst) {
+ pop_r(code, opts->gen.scratch2);
+ }
+
+ ea->mode = MODE_REG_DIRECT;
+ ea->base = opts->gen.scratch1;
+ break;
+ case MODE_PC_DISPLACE:
+ cycles(&opts->gen, BUS);
+ mov_ir(code, op->params.regs.displacement + inst->address+2, opts->gen.scratch1, SZ_D);
+ if (dst) {
+ push_r(code, opts->gen.scratch1);
+ }
+ m68k_read_size(opts, inst->extra.size);
+ if (dst) {
+ pop_r(code, opts->gen.scratch2);
+ }
+
+ ea->mode = MODE_REG_DIRECT;
+ ea->base = opts->gen.scratch1;
+ break;
+ case MODE_PC_INDEX_DISP8:
+ cycles(&opts->gen, 6);
+ mov_ir(code, inst->address+2, opts->gen.scratch1, SZ_D);
+ calc_index_disp8(opts, op, opts->gen.scratch1);
+ if (dst) {
+ push_r(code, opts->gen.scratch1);
+ }
+ m68k_read_size(opts, inst->extra.size);
+ if (dst) {
+ pop_r(code, opts->gen.scratch2);
+ }
+
+ ea->mode = MODE_REG_DIRECT;
+ ea->base = opts->gen.scratch1;
+ break;
+ case MODE_ABSOLUTE:
+ case MODE_ABSOLUTE_SHORT:
+ cycles(&opts->gen, op->addr_mode == MODE_ABSOLUTE ? BUS*2 : BUS);
+ mov_ir(code, op->params.immed, opts->gen.scratch1, SZ_D);
+ if (dst) {
+ push_r(code, opts->gen.scratch1);
+ }
+ m68k_read_size(opts, inst->extra.size);
+ if (dst) {
+ pop_r(code, opts->gen.scratch2);
+ }
+
+ ea->mode = MODE_REG_DIRECT;
+ ea->base = opts->gen.scratch1;
+ break;
+ case MODE_IMMEDIATE:
+ case MODE_IMMEDIATE_WORD:
+ if (inst->variant != VAR_QUICK) {
+ cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG && op->addr_mode == MODE_IMMEDIATE) ? BUS*2 : BUS);
+ }
+ ea->mode = MODE_IMMED;
+ ea->disp = op->params.immed;
+ //sign extend value when the destination is an address register
+ if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD && ea->disp & 0x8000) {
+ ea->disp |= 0xFFFF0000;
+ }
+ return;
+ default:
+ m68k_disasm(inst, disasm_buf);
+ printf("%X: %s\naddress mode %d not implemented (%s)\n", inst->address, disasm_buf, op->addr_mode, dst ? "dst" : "src");
+ exit(1);
+ }
+ if (!dst && inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
+ if (ea->mode == MODE_REG_DIRECT) {
+ movsx_rr(code, ea->base, opts->gen.scratch1, SZ_W, SZ_D);
+ } else {
+ movsx_rdispr(code, ea->base, ea->disp, opts->gen.scratch1, SZ_W, SZ_D);
+ ea->mode = MODE_REG_DIRECT;
+ }
+ ea->base = opts->gen.scratch1;
+ }
+}
+
+void m68k_save_result(m68kinst * inst, m68k_options * opts)
+{
+ code_info *code = &opts->gen.code;
+ if (inst->dst.addr_mode != MODE_REG && inst->dst.addr_mode != MODE_AREG && inst->dst.addr_mode != MODE_UNUSED) {
+ if (inst->dst.addr_mode == MODE_AREG_PREDEC && inst->src.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) {
+ areg_to_native(opts, inst->dst.params.regs.pri, opts->gen.scratch2);
+ }
+ switch (inst->extra.size)
+ {
+ case OPSIZE_BYTE:
+ call(code, opts->write_8);
+ break;
+ case OPSIZE_WORD:
+ call(code, opts->write_16);
+ break;
+ case OPSIZE_LONG:
+ call(code, opts->write_32_lowfirst);
+ break;
+ }
+ }
+}
+
+void translate_m68k_move(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ int8_t reg, flags_reg, sec_reg;
+ uint8_t dir = 0;
+ int32_t offset;
+ int32_t inc_amount, dec_amount;
+ host_ea src;
+ translate_m68k_op(inst, &src, opts, 0);
+ reg = native_reg(&(inst->dst), opts);
+
+ if (inst->dst.addr_mode != MODE_AREG) {
+ if (src.mode == MODE_REG_DIRECT) {
+ flags_reg = src.base;
+ } else {
+ if (reg >= 0) {
+ flags_reg = reg;
+ } else {
+ if(src.mode == MODE_REG_DISPLACE8) {
+ mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
+ } else {
+ mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
+ }
+ src.mode = MODE_REG_DIRECT;
+ flags_reg = src.base = opts->gen.scratch1;
+ }
+ }
+ }
+ uint8_t size = inst->extra.size;
+ switch(inst->dst.addr_mode)
+ {
+ case MODE_AREG:
+ size = OPSIZE_LONG;
+ case MODE_REG:
+ if (reg >= 0) {
+ if (src.mode == MODE_REG_DIRECT) {
+ mov_rr(code, src.base, reg, size);
+ } else if (src.mode == MODE_REG_DISPLACE8) {
+ mov_rdispr(code, src.base, src.disp, reg, size);
+ } else {
+ mov_ir(code, src.disp, reg, size);
+ }
+ } else if(src.mode == MODE_REG_DIRECT) {
+ mov_rrdisp(code, src.base, opts->gen.context_reg, reg_offset(&(inst->dst)), size);
+ } else {
+ mov_irdisp(code, src.disp, opts->gen.context_reg, reg_offset(&(inst->dst)), size);
+ }
+ break;
+ case MODE_AREG_PREDEC:
+ dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
+ case MODE_AREG_INDIRECT:
+ case MODE_AREG_POSTINC:
+ if (src.mode == MODE_REG_DIRECT) {
+ if (src.base != opts->gen.scratch1) {
+ mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
+ }
+ } else if (src.mode == MODE_REG_DISPLACE8) {
+ mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
+ } else {
+ mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
+ }
+ if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
+ subi_areg(opts, dec_amount, inst->dst.params.regs.pri);
+ }
+ areg_to_native(opts, inst->dst.params.regs.pri, opts->gen.scratch2);
+ break;
+ case MODE_AREG_DISPLACE:
+ cycles(&opts->gen, BUS);
+ calc_areg_displace(opts, &inst->dst, opts->gen.scratch2);
+ if (src.mode == MODE_REG_DIRECT) {
+ if (src.base != opts->gen.scratch1) {
+ mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
+ }
+ } else if (src.mode == MODE_REG_DISPLACE8) {
+ mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
+ } else {
+ mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
+ }
+ break;
+ case MODE_AREG_INDEX_DISP8:
+ cycles(&opts->gen, 6);//TODO: Check to make sure this is correct
+ //calc_areg_index_disp8 will clober scratch1 when a 16-bit index is used
+ if (src.base == opts->gen.scratch1 && !(inst->dst.params.regs.sec & 1)) {
+ push_r(code, opts->gen.scratch1);
+ }
+ calc_areg_index_disp8(opts, &inst->dst, opts->gen.scratch2);
+ if (src.base == opts->gen.scratch1 && !(inst->dst.params.regs.sec & 1)) {
+ pop_r(code, opts->gen.scratch1);
+ }
+ if (src.mode == MODE_REG_DIRECT) {
+ if (src.base != opts->gen.scratch1) {
+ mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
+ }
+ } else if (src.mode == MODE_REG_DISPLACE8) {
+ mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
+ } else {
+ mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
+ }
+ break;
+ case MODE_PC_DISPLACE:
+ cycles(&opts->gen, BUS);
+ mov_ir(code, inst->dst.params.regs.displacement + inst->address+2, opts->gen.scratch2, SZ_D);
+ if (src.mode == MODE_REG_DIRECT) {
+ if (src.base != opts->gen.scratch1) {
+ mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
+ }
+ } else if (src.mode == MODE_REG_DISPLACE8) {
+ mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
+ } else {
+ mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
+ }
+ break;
+ case MODE_PC_INDEX_DISP8:
+ cycles(&opts->gen, 6);//TODO: Check to make sure this is correct
+ mov_ir(code, inst->address, opts->gen.scratch2, SZ_D);
+ if (src.base == opts->gen.scratch1 && !(inst->dst.params.regs.sec & 1)) {
+ push_r(code, opts->gen.scratch1);
+ }
+ calc_index_disp8(opts, &inst->dst, opts->gen.scratch2);
+ if (src.base == opts->gen.scratch1 && !(inst->dst.params.regs.sec & 1)) {
+ pop_r(code, opts->gen.scratch1);
+ }
+ if (src.mode == MODE_REG_DIRECT) {
+ if (src.base != opts->gen.scratch1) {
+ mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
+ }
+ } else if (src.mode == MODE_REG_DISPLACE8) {
+ mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
+ } else {
+ mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
+ }
+ break;
+ case MODE_ABSOLUTE:
+ case MODE_ABSOLUTE_SHORT:
+ if (src.mode == MODE_REG_DIRECT) {
+ if (src.base != opts->gen.scratch1) {
+ mov_rr(code, src.base, opts->gen.scratch1, inst->extra.size);
+ }
+ } else if (src.mode == MODE_REG_DISPLACE8) {
+ mov_rdispr(code, src.base, src.disp, opts->gen.scratch1, inst->extra.size);
+ } else {
+ mov_ir(code, src.disp, opts->gen.scratch1, inst->extra.size);
+ }
+ if (inst->dst.addr_mode == MODE_ABSOLUTE) {
+ cycles(&opts->gen, BUS*2);
+ } else {
+ cycles(&opts->gen, BUS);
+ }
+ mov_ir(code, inst->dst.params.immed, opts->gen.scratch2, SZ_D);
+ break;
+ default:
+ m68k_disasm(inst, disasm_buf);
+ printf("%X: %s\naddress mode %d not implemented (move dst)\n", inst->address, disasm_buf, inst->dst.addr_mode);
+ exit(1);
+ }
+
+ if (inst->dst.addr_mode != MODE_AREG) {
+ cmp_ir(code, 0, flags_reg, inst->extra.size);
+ update_flags(opts, N|Z|V0|C0);
+}
+ if (inst->dst.addr_mode != MODE_REG && inst->dst.addr_mode != MODE_AREG) {
+ m68k_write_size(opts, inst->extra.size);
+ if (inst->dst.addr_mode == MODE_AREG_POSTINC) {
+ inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
+ addi_areg(opts, inc_amount, inst->dst.params.regs.pri);
+ }
+ }
+
+ //add cycles for prefetch
+ cycles(&opts->gen, BUS);
+}
+
+void translate_m68k_clr(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ update_flags(opts, N0|V0|C0|Z1);
+ int8_t reg = native_reg(&(inst->dst), opts);
+ if (reg >= 0) {
+ cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG ? 6 : 4));
+ xor_rr(code, reg, reg, inst->extra.size);
+ return;
+ }
+ host_ea dst_op;
+ //TODO: fix timing
+ translate_m68k_op(inst, &dst_op, opts, 1);
+ if (dst_op.mode == MODE_REG_DIRECT) {
+ xor_rr(code, dst_op.base, dst_op.base, inst->extra.size);
+ } else {
+ mov_irdisp(code, 0, dst_op.base, dst_op.disp, inst->extra.size);
+ }
+ m68k_save_result(inst, opts);
+}
+
+void translate_m68k_ext(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ host_ea dst_op;
+ uint8_t dst_size = inst->extra.size;
+ inst->extra.size--;
+ translate_m68k_op(inst, &dst_op, opts, 1);
+ if (dst_op.mode == MODE_REG_DIRECT) {
+ movsx_rr(code, dst_op.base, dst_op.base, inst->extra.size, dst_size);
+ cmp_ir(code, 0, dst_op.base, dst_size);
+ } else {
+ movsx_rdispr(code, dst_op.base, dst_op.disp, opts->gen.scratch1, inst->extra.size, dst_size);
+ cmp_ir(code, 0, opts->gen.scratch1, dst_size);
+ mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, dst_size);
+ }
+ inst->extra.size = dst_size;
+ update_flags(opts, N|V0|C0|Z);
+ //M68K EXT only operates on registers so no need for a call to save result here
+}
+
+uint8_t m68k_eval_cond(m68k_options * opts, uint8_t cc)
+{
+ uint8_t cond = CC_NZ;
+ switch (cc)
+ {
+ case COND_HIGH:
+ cond = CC_Z;
+ case COND_LOW_SAME:
+ flag_to_reg(opts, FLAG_Z, opts->gen.scratch1);
+ or_flag_to_reg(opts, FLAG_C, opts->gen.scratch1);
+ break;
+ case COND_CARRY_CLR:
+ cond = CC_Z;
+ case COND_CARRY_SET:
+ check_flag(opts, FLAG_C);
+ break;
+ case COND_NOT_EQ:
+ cond = CC_Z;
+ case COND_EQ:
+ check_flag(opts, FLAG_Z);
+ break;
+ case COND_OVERF_CLR:
+ cond = CC_Z;
+ case COND_OVERF_SET:
+ check_flag(opts, FLAG_V);
+ break;
+ case COND_PLUS:
+ cond = CC_Z;
+ case COND_MINUS:
+ check_flag(opts, FLAG_N);
+ break;
+ case COND_GREATER_EQ:
+ cond = CC_Z;
+ case COND_LESS:
+ cmp_flags(opts, FLAG_N, FLAG_V);
+ break;
+ case COND_GREATER:
+ cond = CC_Z;
+ case COND_LESS_EQ:
+ flag_to_reg(opts, FLAG_V, opts->gen.scratch1);
+ xor_flag_to_reg(opts, FLAG_N, opts->gen.scratch1);
+ or_flag_to_reg(opts, FLAG_Z, opts->gen.scratch1);
+ break;
+ }
+ return cond;
+}
+
+void translate_m68k_bcc(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, 10);//TODO: Adjust this for branch not taken case
+ int32_t disp = inst->src.params.immed;
+ uint32_t after = inst->address + 2;
+ if (inst->extra.cond == COND_TRUE) {
+ jump_m68k_abs(opts, after + disp);
+ } else {
+ code_ptr dest_addr = get_native_address(opts->gen.native_code_map, after + disp);
+ uint8_t cond = m68k_eval_cond(opts, inst->extra.cond);
+ if (!dest_addr) {
+ opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, code->cur + 2);
+ //dummy address to be replaced later, make sure it generates a 4-byte displacement
+ dest_addr = code->cur + 256;
+ }
+ jcc(code, cond, dest_addr);
+ }
+}
+
+void translate_m68k_scc(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ uint8_t cond = inst->extra.cond;
+ host_ea dst_op;
+ inst->extra.size = OPSIZE_BYTE;
+ translate_m68k_op(inst, &dst_op, opts, 1);
+ if (cond == COND_TRUE || cond == COND_FALSE) {
+ if ((inst->dst.addr_mode == MODE_REG || inst->dst.addr_mode == MODE_AREG) && inst->extra.cond == COND_TRUE) {
+ cycles(&opts->gen, 6);
+ } else {
+ cycles(&opts->gen, BUS);
+ }
+ if (dst_op.mode == MODE_REG_DIRECT) {
+ mov_ir(code, cond == COND_TRUE ? 0xFF : 0, dst_op.base, SZ_B);
+ } else {
+ mov_irdisp(code, cond == COND_TRUE ? 0xFF : 0, dst_op.base, dst_op.disp, SZ_B);
+ }
+ } else {
+ uint8_t cc = m68k_eval_cond(opts, cond);
+ check_alloc_code(code, 6*MAX_INST_LEN);
+ code_ptr true_off = code->cur + 1;
+ jcc(code, cc, code->cur+2);
+ cycles(&opts->gen, BUS);
+ if (dst_op.mode == MODE_REG_DIRECT) {
+ mov_ir(code, 0, dst_op.base, SZ_B);
+ } else {
+ mov_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B);
+ }
+ code_ptr end_off = code->cur+1;
+ jmp(code, code->cur+2);
+ *true_off = code->cur - (true_off+1);
+ cycles(&opts->gen, 6);
+ if (dst_op.mode == MODE_REG_DIRECT) {
+ mov_ir(code, 0xFF, dst_op.base, SZ_B);
+ } else {
+ mov_irdisp(code, 0xFF, dst_op.base, dst_op.disp, SZ_B);
+ }
+ *end_off = code->cur - (end_off+1);
+ }
+ m68k_save_result(inst, opts);
+}
+
+void translate_m68k_dbcc(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ //best case duration
+ cycles(&opts->gen, 10);
+ code_ptr skip_loc = NULL;
+ //TODO: Check if COND_TRUE technically valid here even though
+ //it's basically a slow NOP
+ if (inst->extra.cond != COND_FALSE) {
+ uint8_t cond = m68k_eval_cond(opts, inst->extra.cond);
+ check_alloc_code(code, 6*MAX_INST_LEN);
+ skip_loc = code->cur + 1;
+ jcc(code, cond, code->cur + 2);
+ }
+ if (opts->dregs[inst->dst.params.regs.pri] >= 0) {
+ sub_ir(code, 1, opts->dregs[inst->dst.params.regs.pri], SZ_W);
+ cmp_ir(code, -1, opts->dregs[inst->dst.params.regs.pri], SZ_W);
+ } else {
+ sub_irdisp(code, 1, opts->gen.context_reg, offsetof(m68k_context, dregs) + 4 * inst->dst.params.regs.pri, SZ_W);
+ cmp_irdisp(code, -1, opts->gen.context_reg, offsetof(m68k_context, dregs) + 4 * inst->dst.params.regs.pri, SZ_W);
+ }
+ code_ptr loop_end_loc = code->cur + 1;
+ jcc(code, CC_Z, code->cur + 2);
+ uint32_t after = inst->address + 2;
+ jump_m68k_abs(opts, after + inst->src.params.immed);
+ *loop_end_loc = code->cur - (loop_end_loc+1);
+ if (skip_loc) {
+ cycles(&opts->gen, 2);
+ *skip_loc = code->cur - (skip_loc+1);
+ cycles(&opts->gen, 2);
+ } else {
+ cycles(&opts->gen, 4);
+ }
+}
+
+void translate_m68k_movep(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ int8_t reg;
+ cycles(&opts->gen, BUS*2);
+ if (inst->src.addr_mode == MODE_REG) {
+ calc_areg_displace(opts, &inst->dst, opts->gen.scratch2);
+ reg = native_reg(&(inst->src), opts);
+ if (inst->extra.size == OPSIZE_LONG) {
+ if (reg >= 0) {
+ mov_rr(code, reg, opts->gen.scratch1, SZ_D);
+ shr_ir(code, 24, opts->gen.scratch1, SZ_D);
+ push_r(code, opts->gen.scratch2);
+ call(code, opts->write_8);
+ pop_r(code, opts->gen.scratch2);
+ mov_rr(code, reg, opts->gen.scratch1, SZ_D);
+ shr_ir(code, 16, opts->gen.scratch1, SZ_D);
+
+ } else {
+ mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src))+3, opts->gen.scratch1, SZ_B);
+ push_r(code, opts->gen.scratch2);
+ call(code, opts->write_8);
+ pop_r(code, opts->gen.scratch2);
+ mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src))+2, opts->gen.scratch1, SZ_B);
+ }
+ add_ir(code, 2, opts->gen.scratch2, SZ_D);
+ push_r(code, opts->gen.scratch2);
+ call(code, opts->write_8);
+ pop_r(code, opts->gen.scratch2);
+ add_ir(code, 2, opts->gen.scratch2, SZ_D);
+ }
+ if (reg >= 0) {
+ mov_rr(code, reg, opts->gen.scratch1, SZ_W);
+ shr_ir(code, 8, opts->gen.scratch1, SZ_W);
+ push_r(code, opts->gen.scratch2);
+ call(code, opts->write_8);
+ pop_r(code, opts->gen.scratch2);
+ mov_rr(code, reg, opts->gen.scratch1, SZ_W);
+ } else {
+ mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src))+1, opts->gen.scratch1, SZ_B);
+ push_r(code, opts->gen.scratch2);
+ call(code, opts->write_8);
+ pop_r(code, opts->gen.scratch2);
+ mov_rdispr(code, opts->gen.context_reg, reg_offset(&(inst->src)), opts->gen.scratch1, SZ_B);
+ }
+ add_ir(code, 2, opts->gen.scratch2, SZ_D);
+ call(code, opts->write_8);
+ } else {
+ calc_areg_displace(opts, &inst->src, opts->gen.scratch1);
+ reg = native_reg(&(inst->dst), opts);
+ if (inst->extra.size == OPSIZE_LONG) {
+ if (reg >= 0) {
+ push_r(code, opts->gen.scratch1);
+ call(code, opts->read_8);
+ shl_ir(code, 24, opts->gen.scratch1, SZ_D);
+ mov_rr(code, opts->gen.scratch1, reg, SZ_D);
+ pop_r(code, opts->gen.scratch1);
+ add_ir(code, 2, opts->gen.scratch1, SZ_D);
+ push_r(code, opts->gen.scratch1);
+ call(code, opts->read_8);
+ shl_ir(code, 16, opts->gen.scratch1, SZ_D);
+ or_rr(code, opts->gen.scratch1, reg, SZ_D);
+ } else {
+ push_r(code, opts->gen.scratch1);
+ call(code, opts->read_8);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst))+3, SZ_B);
+ pop_r(code, opts->gen.scratch1);
+ add_ir(code, 2, opts->gen.scratch1, SZ_D);
+ push_r(code, opts->gen.scratch1);
+ call(code, opts->read_8);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst))+2, SZ_B);
+ }
+ pop_r(code, opts->gen.scratch1);
+ add_ir(code, 2, opts->gen.scratch1, SZ_D);
+ }
+ push_r(code, opts->gen.scratch1);
+ call(code, opts->read_8);
+ if (reg >= 0) {
+
+ shl_ir(code, 8, opts->gen.scratch1, SZ_W);
+ mov_rr(code, opts->gen.scratch1, reg, SZ_W);
+ pop_r(code, opts->gen.scratch1);
+ add_ir(code, 2, opts->gen.scratch1, SZ_D);
+ call(code, opts->read_8);
+ mov_rr(code, opts->gen.scratch1, reg, SZ_B);
+ } else {
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst))+1, SZ_B);
+ pop_r(code, opts->gen.scratch1);
+ add_ir(code, 2, opts->gen.scratch1, SZ_D);
+ call(code, opts->read_8);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, reg_offset(&(inst->dst)), SZ_B);
+ }
+ }
+}
+
+typedef void (*shift_ir_t)(code_info *code, uint8_t val, uint8_t dst, uint8_t size);
+typedef void (*shift_irdisp_t)(code_info *code, uint8_t val, uint8_t dst_base, int32_t disp, uint8_t size);
+typedef void (*shift_clr_t)(code_info *code, uint8_t dst, uint8_t size);
+typedef void (*shift_clrdisp_t)(code_info *code, uint8_t dst_base, int32_t disp, uint8_t size);
+
+void translate_shift(m68k_options * opts, m68kinst * inst, host_ea *src_op, host_ea * dst_op, shift_ir_t shift_ir, shift_irdisp_t shift_irdisp, shift_clr_t shift_clr, shift_clrdisp_t shift_clrdisp, shift_ir_t special, shift_irdisp_t special_disp)
+{
+ code_info *code = &opts->gen.code;
+ code_ptr end_off = NULL;
+ code_ptr nz_off = NULL;
+ code_ptr z_off = NULL;
+ if (inst->src.addr_mode == MODE_UNUSED) {
+ cycles(&opts->gen, BUS);
+ //Memory shift
+ shift_ir(code, 1, dst_op->base, SZ_W);
+ } else {
+ if (src_op->mode == MODE_IMMED) {
+ cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + 2 * src_op->disp);
+ if (src_op->disp != 1 && inst->op == M68K_ASL) {
+ set_flag(opts, 0, FLAG_V);
+ for (int i = 0; i < src_op->disp; i++) {
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ shift_ir(code, 1, dst_op->base, inst->extra.size);
+ } else {
+ shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ check_alloc_code(code, 2*MAX_INST_LEN);
+ code_ptr after_flag_set = code->cur + 1;
+ jcc(code, CC_NO, code->cur + 2);
+ set_flag(opts, 1, FLAG_V);
+ *after_flag_set = code->cur - (after_flag_set+1);
+ }
+ } else {
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ shift_ir(code, src_op->disp, dst_op->base, inst->extra.size);
+ } else {
+ shift_irdisp(code, src_op->disp, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ set_flag_cond(opts, CC_O, FLAG_V);
+ }
+ } else {
+ cycles(&opts->gen, inst->extra.size == OPSIZE_LONG ? 8 : 6);
+ if (src_op->base != RCX) {
+ if (src_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, src_op->base, RCX, SZ_B);
+ } else {
+ mov_rdispr(code, src_op->base, src_op->disp, RCX, SZ_B);
+ }
+
+ }
+ and_ir(code, 63, RCX, SZ_D);
+ check_alloc_code(code, 7*MAX_INST_LEN);
+ nz_off = code->cur + 1;
+ jcc(code, CC_NZ, code->cur + 2);
+ //Flag behavior for shift count of 0 is different for x86 than 68K
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ cmp_ir(code, 0, dst_op->base, inst->extra.size);
+ } else {
+ cmp_irdisp(code, 0, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ set_flag_cond(opts, CC_Z, FLAG_Z);
+ set_flag_cond(opts, CC_S, FLAG_N);
+ set_flag(opts, 0, FLAG_C);
+ //For other instructions, this flag will be set below
+ if (inst->op == M68K_ASL) {
+ set_flag(opts, 0, FLAG_V);
+ }
+ z_off = code->cur + 1;
+ jmp(code, code->cur + 2);
+ *nz_off = code->cur - (nz_off + 1);
+ //add 2 cycles for every bit shifted
+ mov_ir(code, 2 * opts->gen.clock_divider, opts->gen.scratch2, SZ_D);
+ imul_rr(code, RCX, opts->gen.scratch2, SZ_D);
+ add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D);
+ if (inst->op == M68K_ASL) {
+ //ASL has Overflow flag behavior that depends on all of the bits shifted through the MSB
+ //Easiest way to deal with this is to shift one bit at a time
+ set_flag(opts, 0, FLAG_V);
+ check_alloc_code(code, 5*MAX_INST_LEN);
+ code_ptr loop_start = code->cur;
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ shift_ir(code, 1, dst_op->base, inst->extra.size);
+ } else {
+ shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ code_ptr after_flag_set = code->cur + 1;
+ jcc(code, CC_NO, code->cur + 2);
+ set_flag(opts, 1, FLAG_V);
+ *after_flag_set = code->cur - (after_flag_set+1);
+ loop(code, loop_start);
+ } else {
+ //x86 shifts modulo 32 for operand sizes less than 64-bits
+ //but M68K shifts modulo 64, so we need to check for large shifts here
+ cmp_ir(code, 32, RCX, SZ_B);
+ check_alloc_code(code, 14*MAX_INST_LEN);
+ code_ptr norm_shift_off = code->cur + 1;
+ jcc(code, CC_L, code->cur + 2);
+ if (special) {
+ code_ptr after_flag_set = NULL;
+ if (inst->extra.size == OPSIZE_LONG) {
+ code_ptr neq_32_off = code->cur + 1;
+ jcc(code, CC_NZ, code->cur + 2);
+
+ //set the carry bit to the lsb
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ special(code, 1, dst_op->base, SZ_D);
+ } else {
+ special_disp(code, 1, dst_op->base, dst_op->disp, SZ_D);
+ }
+ set_flag_cond(opts, CC_C, FLAG_C);
+ after_flag_set = code->cur + 1;
+ jmp(code, code->cur + 2);
+ *neq_32_off = code->cur - (neq_32_off+1);
+ }
+ set_flag(opts, 0, FLAG_C);
+ if (after_flag_set) {
+ *after_flag_set = code->cur - (after_flag_set+1);
+ }
+ set_flag(opts, 1, FLAG_Z);
+ set_flag(opts, 0, FLAG_N);
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ xor_rr(code, dst_op->base, dst_op->base, inst->extra.size);
+ } else {
+ mov_irdisp(code, 0, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ } else {
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ shift_ir(code, 31, dst_op->base, inst->extra.size);
+ shift_ir(code, 1, dst_op->base, inst->extra.size);
+ } else {
+ shift_irdisp(code, 31, dst_op->base, dst_op->disp, inst->extra.size);
+ shift_irdisp(code, 1, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+
+ }
+ end_off = code->cur + 1;
+ jmp(code, code->cur + 2);
+ *norm_shift_off = code->cur - (norm_shift_off+1);
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ shift_clr(code, dst_op->base, inst->extra.size);
+ } else {
+ shift_clrdisp(code, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ }
+ }
+
+ }
+ if (!special && end_off) {
+ *end_off = code->cur - (end_off + 1);
+ }
+ update_flags(opts, C|Z|N);
+ if (special && end_off) {
+ *end_off = code->cur - (end_off + 1);
+ }
+ //set X flag to same as C flag
+ if (opts->flag_regs[FLAG_C] >= 0) {
+ flag_to_flag(opts, FLAG_C, FLAG_X);
+ } else {
+ set_flag_cond(opts, CC_C, FLAG_X);
+ }
+ if (z_off) {
+ *z_off = code->cur - (z_off + 1);
+ }
+ if (inst->op != M68K_ASL) {
+ set_flag(opts, 0, FLAG_V);
+ }
+ if (inst->src.addr_mode == MODE_UNUSED) {
+ m68k_save_result(inst, opts);
+ }
+}
+
+void op_ir(code_info *code, m68kinst *inst, int32_t val, uint8_t dst, uint8_t size)
+{
+ switch (inst->op)
+ {
+ case M68K_ADD: add_ir(code, val, dst, size); break;
+ case M68K_ADDX: adc_ir(code, val, dst, size); break;
+ case M68K_AND: and_ir(code, val, dst, size); break;
+ case M68K_BTST: bt_ir(code, val, dst, size); break;
+ case M68K_BSET: bts_ir(code, val, dst, size); break;
+ case M68K_BCLR: btr_ir(code, val, dst, size); break;
+ case M68K_BCHG: btc_ir(code, val, dst, size); break;
+ case M68K_CMP: cmp_ir(code, val, dst, size); break;
+ case M68K_EOR: xor_ir(code, val, dst, size); break;
+ case M68K_OR: or_ir(code, val, dst, size); break;
+ case M68K_ROL: rol_ir(code, val, dst, size); break;
+ case M68K_ROR: ror_ir(code, val, dst, size); break;
+ case M68K_ROXL: rcl_ir(code, val, dst, size); break;
+ case M68K_ROXR: rcr_ir(code, val, dst, size); break;
+ case M68K_SUB: sub_ir(code, val, dst, size); break;
+ case M68K_SUBX: sbb_ir(code, val, dst, size); break;
+ }
+}
+
+void op_irdisp(code_info *code, m68kinst *inst, int32_t val, uint8_t dst, int32_t disp, uint8_t size)
+{
+ switch (inst->op)
+ {
+ case M68K_ADD: add_irdisp(code, val, dst, disp, size); break;
+ case M68K_ADDX: adc_irdisp(code, val, dst, disp, size); break;
+ case M68K_AND: and_irdisp(code, val, dst, disp, size); break;
+ case M68K_BTST: bt_irdisp(code, val, dst, disp, size); break;
+ case M68K_BSET: bts_irdisp(code, val, dst, disp, size); break;
+ case M68K_BCLR: btr_irdisp(code, val, dst, disp, size); break;
+ case M68K_BCHG: btc_irdisp(code, val, dst, disp, size); break;
+ case M68K_CMP: cmp_irdisp(code, val, dst, disp, size); break;
+ case M68K_EOR: xor_irdisp(code, val, dst, disp, size); break;
+ case M68K_OR: or_irdisp(code, val, dst, disp, size); break;
+ case M68K_ROL: rol_irdisp(code, val, dst, disp, size); break;
+ case M68K_ROR: ror_irdisp(code, val, dst, disp, size); break;
+ case M68K_ROXL: rcl_irdisp(code, val, dst, disp, size); break;
+ case M68K_ROXR: rcr_irdisp(code, val, dst, disp, size); break;
+ case M68K_SUB: sub_irdisp(code, val, dst, disp, size); break;
+ case M68K_SUBX: sbb_irdisp(code, val, dst, disp, size); break;
+ }
+ }
+
+void op_rr(code_info *code, m68kinst *inst, uint8_t src, uint8_t dst, uint8_t size)
+{
+ switch (inst->op)
+ {
+ case M68K_ADD: add_rr(code, src, dst, size); break;
+ case M68K_ADDX: adc_rr(code, src, dst, size); break;
+ case M68K_AND: and_rr(code, src, dst, size); break;
+ case M68K_BTST: bt_rr(code, src, dst, size); break;
+ case M68K_BSET: bts_rr(code, src, dst, size); break;
+ case M68K_BCLR: btr_rr(code, src, dst, size); break;
+ case M68K_BCHG: btc_rr(code, src, dst, size); break;
+ case M68K_CMP: cmp_rr(code, src, dst, size); break;
+ case M68K_EOR: xor_rr(code, src, dst, size); break;
+ case M68K_OR: or_rr(code, src, dst, size); break;
+ case M68K_SUB: sub_rr(code, src, dst, size); break;
+ case M68K_SUBX: sbb_rr(code, src, dst, size); break;
+ }
+ }
+
+void op_rrdisp(code_info *code, m68kinst *inst, uint8_t src, uint8_t dst, int32_t disp, uint8_t size)
+{
+ switch(inst->op)
+ {
+ case M68K_ADD: add_rrdisp(code, src, dst, disp, size); break;
+ case M68K_ADDX: adc_rrdisp(code, src, dst, disp, size); break;
+ case M68K_AND: and_rrdisp(code, src, dst, disp, size); break;
+ case M68K_BTST: bt_rrdisp(code, src, dst, disp, size); break;
+ case M68K_BSET: bts_rrdisp(code, src, dst, disp, size); break;
+ case M68K_BCLR: btr_rrdisp(code, src, dst, disp, size); break;
+ case M68K_BCHG: btc_rrdisp(code, src, dst, disp, size); break;
+ case M68K_CMP: cmp_rrdisp(code, src, dst, disp, size); break;
+ case M68K_EOR: xor_rrdisp(code, src, dst, disp, size); break;
+ case M68K_OR: or_rrdisp(code, src, dst, disp, size); break;
+ case M68K_SUB: sub_rrdisp(code, src, dst, disp, size); break;
+ case M68K_SUBX: sbb_rrdisp(code, src, dst, disp, size); break;
+ }
+ }
+
+void op_rdispr(code_info *code, m68kinst *inst, uint8_t src, int32_t disp, uint8_t dst, uint8_t size)
+{
+ switch (inst->op)
+ {
+ case M68K_ADD: add_rdispr(code, src, disp, dst, size); break;
+ case M68K_ADDX: adc_rdispr(code, src, disp, dst, size); break;
+ case M68K_AND: and_rdispr(code, src, disp, dst, size); break;
+ case M68K_CMP: cmp_rdispr(code, src, disp, dst, size); break;
+ case M68K_EOR: xor_rdispr(code, src, disp, dst, size); break;
+ case M68K_OR: or_rdispr(code, src, disp, dst, size); break;
+ case M68K_SUB: sub_rdispr(code, src, disp, dst, size); break;
+ case M68K_SUBX: sbb_rdispr(code, src, disp, dst, size); break;
+ }
+ }
+
+void translate_m68k_arith(m68k_options *opts, m68kinst * inst, uint32_t flag_mask, host_ea *src_op, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, BUS);
+ if (inst->op == M68K_ADDX || inst->op == M68K_SUBX) {
+ flag_to_carry(opts, FLAG_X);
+ }
+ uint8_t size = inst->dst.addr_mode == MODE_AREG ? OPSIZE_LONG : inst->extra.size;
+ if (src_op->mode == MODE_REG_DIRECT) {
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ op_rr(code, inst, src_op->base, dst_op->base, size);
+ } else {
+ op_rrdisp(code, inst, src_op->base, dst_op->base, dst_op->disp, size);
+ }
+ } else if (src_op->mode == MODE_REG_DISPLACE8) {
+ op_rdispr(code, inst, src_op->base, src_op->disp, dst_op->base, size);
+ } else {
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ op_ir(code, inst, src_op->disp, dst_op->base, size);
+ } else {
+ op_irdisp(code, inst, src_op->disp, dst_op->base, dst_op->disp, size);
+ }
+ }
+ if (inst->dst.addr_mode != MODE_AREG || inst->op == M68K_CMP) {
+ update_flags(opts, flag_mask);
+ if (inst->op == M68K_ADDX || inst->op == M68K_SUBX) {
+ check_alloc_code(code, 2*MAX_INST_LEN);
+ code_ptr after_flag_set = code->cur + 1;
+ jcc(code, CC_Z, code->cur + 2);
+ set_flag(opts, 0, FLAG_Z);
+ *after_flag_set = code->cur - (after_flag_set+1);
+ }
+ }
+ if (inst->op != M68K_CMP) {
+ m68k_save_result(inst, opts);
+ }
+}
+
+void translate_m68k_cmp(m68k_options * opts, m68kinst * inst)
+{
+ code_info *code = &opts->gen.code;
+ uint8_t size = inst->extra.size;
+ host_ea src_op, dst_op;
+ translate_m68k_op(inst, &src_op, opts, 0);
+ if (inst->dst.addr_mode == MODE_AREG_POSTINC) {
+ push_r(code, opts->gen.scratch1);
+ translate_m68k_op(inst, &dst_op, opts, 1);
+ pop_r(code, opts->gen.scratch2);
+ src_op.base = opts->gen.scratch2;
+ } else {
+ translate_m68k_op(inst, &dst_op, opts, 1);
+ if (inst->dst.addr_mode == MODE_AREG && size == OPSIZE_WORD) {
+ size = OPSIZE_LONG;
+ }
+ }
+ translate_m68k_arith(opts, inst, N|Z|V|C, &src_op, &dst_op);
+}
+
+void op_r(code_info *code, m68kinst *inst, uint8_t dst, uint8_t size)
+{
+ switch(inst->op)
+ {
+ case M68K_NEG: neg_r(code, dst, size); break;
+ case M68K_NOT: not_r(code, dst, size); cmp_ir(code, 0, dst, size); break;
+ case M68K_ROL: rol_clr(code, dst, size); break;
+ case M68K_ROR: ror_clr(code, dst, size); break;
+ case M68K_ROXL: rcl_clr(code, dst, size); break;
+ case M68K_ROXR: rcr_clr(code, dst, size); break;
+ case M68K_SWAP: rol_ir(code, 16, dst, SZ_D); cmp_ir(code, 0, dst, SZ_D); break;
+ case M68K_TST: cmp_ir(code, 0, dst, size); break;
+ }
+}
+
+void op_rdisp(code_info *code, m68kinst *inst, uint8_t dst, int32_t disp, uint8_t size)
+{
+ switch(inst->op)
+ {
+ case M68K_NEG: neg_rdisp(code, dst, disp, size); break;
+ case M68K_NOT: not_rdisp(code, dst, disp, size); cmp_irdisp(code, 0, dst, disp, size); break;
+ case M68K_ROL: rol_clrdisp(code, dst, disp, size); break;
+ case M68K_ROR: ror_clrdisp(code, dst, disp, size); break;
+ case M68K_ROXL: rcl_clrdisp(code, dst, disp, size); break;
+ case M68K_ROXR: rcr_clrdisp(code, dst, disp, size); break;
+ case M68K_SWAP: rol_irdisp(code, 16, dst, disp, SZ_D); cmp_irdisp(code, 0, dst, disp, SZ_D); break;
+ case M68K_TST: cmp_irdisp(code, 0, dst, disp, size); break;
+ }
+}
+
+void translate_m68k_unary(m68k_options *opts, m68kinst *inst, uint32_t flag_mask, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, BUS);
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ op_r(code, inst, dst_op->base, inst->extra.size);
+ } else {
+ op_rdisp(code, inst, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ update_flags(opts, flag_mask);
+ m68k_save_result(inst, opts);
+ }
+
+void translate_m68k_invalid(m68k_options *opts, m68kinst *inst)
+{
+ code_info *code = &opts->gen.code;
+ if (inst->src.params.immed == 0x7100) {
+ retn(code);
+ return;
+ }
+ mov_ir(code, inst->address, opts->gen.scratch1, SZ_D);
+ call(code, (code_ptr)m68k_invalid);
+}
+
+void translate_m68k_abcd_sbcd(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ if (src_op->base != opts->gen.scratch2) {
+ if (src_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, src_op->base, opts->gen.scratch2, SZ_B);
+ } else {
+ mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch2, SZ_B);
+ }
+ }
+ if (dst_op->base != opts->gen.scratch1) {
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, dst_op->base, opts->gen.scratch1, SZ_B);
+ } else {
+ mov_rdispr(code, dst_op->base, dst_op->disp, opts->gen.scratch1, SZ_B);
+ }
+ }
+ uint8_t other_reg;
+ //WARNING: This may need adjustment if register assignments change
+ if (opts->gen.scratch2 > RBX) {
+ other_reg = RAX;
+ xchg_rr(code, opts->gen.scratch2, RAX, SZ_D);
+ } else {
+ other_reg = opts->gen.scratch2;
+ }
+ mov_rr(code, opts->gen.scratch1, opts->gen.scratch1 + (AH-RAX), SZ_B);
+ mov_rr(code, other_reg, other_reg + (AH-RAX), SZ_B);
+ and_ir(code, 0xF0, opts->gen.scratch1, SZ_B);
+ and_ir(code, 0xF0, other_reg, SZ_B);
+ and_ir(code, 0xF, opts->gen.scratch1 + (AH-RAX), SZ_B);
+ and_ir(code, 0xF, other_reg + (AH-RAX), SZ_B);
+ //do op on low nibble
+ flag_to_carry(opts, FLAG_X);
+ if (inst->op == M68K_ABCD) {
+ adc_rr(code, other_reg + (AH-RAX), opts->gen.scratch1 + (AH-RAX), SZ_B);
+ } else {
+ sbb_rr(code, other_reg + (AH-RAX), opts->gen.scratch1 + (AH-RAX), SZ_B);
+ }
+ cmp_ir(code, 0xA, opts->gen.scratch1 + (AH-RAX), SZ_B);
+ code_ptr no_adjust = code->cur+1;
+ //add correction factor if necessary
+ jcc(code, CC_B, no_adjust);
+ if (inst->op == M68K_ABCD) {
+ add_ir(code, 6, opts->gen.scratch1 + (AH-RAX), SZ_B);
+ } else {
+ sub_ir(code, 6, opts->gen.scratch1 + (AH-RAX), SZ_B);
+ }
+ *no_adjust = code->cur - (no_adjust+1);
+ //add low nibble result to one of the high nibble operands
+ add_rr(code, opts->gen.scratch1 + (AH-RAX), opts->gen.scratch1, SZ_B);
+ if (inst->op == M68K_ABCD) {
+ add_rr(code, other_reg, opts->gen.scratch1, SZ_B);
+ } else {
+ sub_rr(code, other_reg, opts->gen.scratch1, SZ_B);
+ }
+ if (opts->gen.scratch2 > RBX) {
+ mov_rr(code, opts->gen.scratch2, RAX, SZ_D);
+ }
+ set_flag(opts, 0, FLAG_C);
+ set_flag(opts, 0, FLAG_V);
+ code_ptr def_adjust = code->cur+1;
+ jcc(code, CC_C, def_adjust);
+ cmp_ir(code, 0xA0, opts->gen.scratch1, SZ_B);
+ no_adjust = code->cur+1;
+ jcc(code, CC_B, no_adjust);
+ *def_adjust = code->cur - (def_adjust + 1);
+ set_flag(opts, 1, FLAG_C);
+ if (inst->op == M68K_ABCD) {
+ add_ir(code, 0x60, opts->gen.scratch1, SZ_B);
+ } else {
+ sub_ir(code, 0x60, opts->gen.scratch1, SZ_B);
+ }
+ //V flag is set based on the result of the addition of the
+ //result and the correction factor
+ set_flag_cond(opts, CC_O, FLAG_V);
+ *no_adjust = code->cur - (no_adjust+1);
+ flag_to_flag(opts, FLAG_C, FLAG_X);
+
+ cmp_ir(code, 0, opts->gen.scratch1, SZ_B);
+ set_flag_cond(opts, CC_S, FLAG_N);
+ jcc(code, CC_Z, code->cur + 4);
+ set_flag(opts, 0, FLAG_Z);
+ if (dst_op->base != opts->gen.scratch1) {
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, opts->gen.scratch1, dst_op->base, SZ_B);
+ } else {
+ mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_B);
+ }
+ }
+ m68k_save_result(inst, opts);
+ }
+
+void translate_m68k_sl(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ translate_shift(opts, inst, src_op, dst_op, shl_ir, shl_irdisp, shl_clr, shl_clrdisp, shr_ir, shr_irdisp);
+ }
+
+void translate_m68k_asr(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ translate_shift(opts, inst, src_op, dst_op, sar_ir, sar_irdisp, sar_clr, sar_clrdisp, NULL, NULL);
+ }
+
+void translate_m68k_lsr(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ translate_shift(opts, inst, src_op, dst_op, shr_ir, shr_irdisp, shr_clr, shr_clrdisp, shl_ir, shl_irdisp);
+}
+
+void translate_m68k_bit(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, inst->extra.size == OPSIZE_BYTE ? 4 : (
+ inst->op == M68K_BTST ? 6 : (inst->op == M68K_BCLR ? 10 : 8))
+ );
+ if (src_op->mode == MODE_IMMED) {
+ if (inst->extra.size == OPSIZE_BYTE) {
+ src_op->disp &= 0x7;
+ }
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ op_ir(code, inst, src_op->disp, dst_op->base, inst->extra.size);
+ } else {
+ op_irdisp(code, inst, src_op->disp, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ } else {
+ if (src_op->mode == MODE_REG_DISPLACE8 || (inst->dst.addr_mode != MODE_REG && src_op->base != opts->gen.scratch1 && src_op->base != opts->gen.scratch2)) {
+ if (dst_op->base == opts->gen.scratch1) {
+ push_r(code, opts->gen.scratch2);
+ if (src_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, src_op->base, opts->gen.scratch2, SZ_B);
+ } else {
+ mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch2, SZ_B);
+ }
+ src_op->base = opts->gen.scratch2;
+ } else {
+ if (src_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, src_op->base, opts->gen.scratch1, SZ_B);
+ } else {
+ mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_B);
+ }
+ src_op->base = opts->gen.scratch1;
+ }
+ }
+ uint8_t size = inst->extra.size;
+ if (dst_op->mode == MODE_REG_DISPLACE8) {
+ if (src_op->base != opts->gen.scratch1 && src_op->base != opts->gen.scratch2) {
+ if (src_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, src_op->base, opts->gen.scratch1, SZ_D);
+ } else {
+ mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_D);
+ src_op->mode = MODE_REG_DIRECT;
+ }
+ src_op->base = opts->gen.scratch1;
+ }
+ //b### with register destination is modulo 32
+ //x86 with a memory destination isn't modulo anything
+ //so use an and here to force the value to be modulo 32
+ and_ir(code, 31, opts->gen.scratch1, SZ_D);
+ } else if(inst->dst.addr_mode != MODE_REG) {
+ //b### with memory destination is modulo 8
+ //x86-64 doesn't support 8-bit bit operations
+ //so we fake it by forcing the bit number to be modulo 8
+ and_ir(code, 7, src_op->base, SZ_D);
+ size = SZ_D;
+ }
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ op_rr(code, inst, src_op->base, dst_op->base, size);
+ } else {
+ op_rrdisp(code, inst, src_op->base, dst_op->base, dst_op->disp, size);
+ }
+ if (src_op->base == opts->gen.scratch2) {
+ pop_r(code, opts->gen.scratch2);
+ }
+ }
+ //x86 sets the carry flag to the value of the bit tested
+ //68K sets the zero flag to the complement of the bit tested
+ set_flag_cond(opts, CC_NC, FLAG_Z);
+ if (inst->op != M68K_BTST) {
+ m68k_save_result(inst, opts);
+ }
+}
+
+void translate_m68k_chk(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+ {
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, 6);
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ cmp_ir(code, 0, dst_op->base, inst->extra.size);
+ } else {
+ cmp_irdisp(code, 0, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ uint32_t isize;
+ switch(inst->src.addr_mode)
+ {
+ case MODE_AREG_DISPLACE:
+ case MODE_AREG_INDEX_DISP8:
+ case MODE_ABSOLUTE_SHORT:
+ case MODE_PC_INDEX_DISP8:
+ case MODE_PC_DISPLACE:
+ case MODE_IMMEDIATE:
+ isize = 4;
+ break;
+ case MODE_ABSOLUTE:
+ isize = 6;
+ break;
+ default:
+ isize = 2;
+ }
+ //make sure we won't start a new chunk in the middle of these branches
+ check_alloc_code(code, MAX_INST_LEN * 11);
+ code_ptr passed = code->cur + 1;
+ jcc(code, CC_GE, code->cur + 2);
+ set_flag(opts, 1, FLAG_N);
+ mov_ir(code, VECTOR_CHK, opts->gen.scratch2, SZ_D);
+ mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D);
+ jmp(code, opts->trap);
+ *passed = code->cur - (passed+1);
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ if (src_op->mode == MODE_REG_DIRECT) {
+ cmp_rr(code, src_op->base, dst_op->base, inst->extra.size);
+ } else if(src_op->mode == MODE_REG_DISPLACE8) {
+ cmp_rdispr(code, src_op->base, src_op->disp, dst_op->base, inst->extra.size);
+ } else {
+ cmp_ir(code, src_op->disp, dst_op->base, inst->extra.size);
+ }
+ } else if(dst_op->mode == MODE_REG_DISPLACE8) {
+ if (src_op->mode == MODE_REG_DIRECT) {
+ cmp_rrdisp(code, src_op->base, dst_op->base, dst_op->disp, inst->extra.size);
+ } else {
+ cmp_irdisp(code, src_op->disp, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ }
+ passed = code->cur + 1;
+ jcc(code, CC_LE, code->cur + 2);
+ set_flag(opts, 0, FLAG_N);
+ mov_ir(code, VECTOR_CHK, opts->gen.scratch2, SZ_D);
+ mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D);
+ jmp(code, opts->trap);
+ *passed = code->cur - (passed+1);
+ cycles(&opts->gen, 4);
+ }
+
+void translate_m68k_div(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+ {
+ code_info *code = &opts->gen.code;
+ check_alloc_code(code, MAX_NATIVE_SIZE);
+ //TODO: cycle exact division
+ cycles(&opts->gen, inst->op == M68K_DIVS ? 158 : 140);
+ set_flag(opts, 0, FLAG_C);
+ push_r(code, RDX);
+ push_r(code, RAX);
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, dst_op->base, RAX, SZ_D);
+ } else {
+ mov_rdispr(code, dst_op->base, dst_op->disp, RAX, SZ_D);
+ }
+ if (src_op->mode == MODE_IMMED) {
+ mov_ir(code, (src_op->disp & 0x8000) && inst->op == M68K_DIVS ? src_op->disp | 0xFFFF0000 : src_op->disp, opts->gen.scratch2, SZ_D);
+ } else if (src_op->mode == MODE_REG_DIRECT) {
+ if (inst->op == M68K_DIVS) {
+ movsx_rr(code, src_op->base, opts->gen.scratch2, SZ_W, SZ_D);
+ } else {
+ movzx_rr(code, src_op->base, opts->gen.scratch2, SZ_W, SZ_D);
+ }
+ } else if (src_op->mode == MODE_REG_DISPLACE8) {
+ if (inst->op == M68K_DIVS) {
+ movsx_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch2, SZ_W, SZ_D);
+ } else {
+ movzx_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch2, SZ_W, SZ_D);
+ }
+ }
+ uint32_t isize = 2;
+ switch(inst->src.addr_mode)
+ {
+ case MODE_AREG_DISPLACE:
+ case MODE_AREG_INDEX_DISP8:
+ case MODE_ABSOLUTE_SHORT:
+ case MODE_PC_INDEX_DISP8:
+ case MODE_IMMEDIATE:
+ isize = 4;
+ break;
+ case MODE_ABSOLUTE:
+ isize = 6;
+ break;
+ }
+ cmp_ir(code, 0, opts->gen.scratch2, SZ_D);
+ check_alloc_code(code, 6*MAX_INST_LEN);
+ code_ptr not_zero = code->cur + 1;
+ jcc(code, CC_NZ, code->cur + 2);
+ pop_r(code, RAX);
+ pop_r(code, RDX);
+ mov_ir(code, VECTOR_INT_DIV_ZERO, opts->gen.scratch2, SZ_D);
+ mov_ir(code, inst->address+isize, opts->gen.scratch1, SZ_D);
+ jmp(code, opts->trap);
+ *not_zero = code->cur - (not_zero+1);
+ if (inst->op == M68K_DIVS) {
+ cdq(code);
+ } else {
+ xor_rr(code, RDX, RDX, SZ_D);
+ }
+ if (inst->op == M68K_DIVS) {
+ idiv_r(code, opts->gen.scratch2, SZ_D);
+ } else {
+ div_r(code, opts->gen.scratch2, SZ_D);
+ }
+ code_ptr skip_sec_check, norm_off;
+ if (inst->op == M68K_DIVS) {
+ cmp_ir(code, 0x8000, RAX, SZ_D);
+ skip_sec_check = code->cur + 1;
+ jcc(code, CC_GE, code->cur + 2);
+ cmp_ir(code, -0x8000, RAX, SZ_D);
+ norm_off = code->cur + 1;
+ jcc(code, CC_L, code->cur + 2);
+ } else {
+ cmp_ir(code, 0x10000, RAX, SZ_D);
+ norm_off = code->cur + 1;
+ jcc(code, CC_NC, code->cur + 2);
+ }
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, RDX, dst_op->base, SZ_W);
+ shl_ir(code, 16, dst_op->base, SZ_D);
+ mov_rr(code, RAX, dst_op->base, SZ_W);
+ } else {
+ mov_rrdisp(code, RDX, dst_op->base, dst_op->disp, SZ_W);
+ shl_irdisp(code, 16, dst_op->base, dst_op->disp, SZ_D);
+ mov_rrdisp(code, RAX, dst_op->base, dst_op->disp, SZ_W);
+ }
+ cmp_ir(code, 0, RAX, SZ_W);
+ pop_r(code, RAX);
+ pop_r(code, RDX);
+ update_flags(opts, V0|Z|N);
+ code_ptr end_off = code->cur + 1;
+ jmp(code, code->cur + 2);
+ *norm_off = code->cur - (norm_off + 1);
+ if (inst->op == M68K_DIVS) {
+ *skip_sec_check = code->cur - (skip_sec_check+1);
+ }
+ pop_r(code, RAX);
+ pop_r(code, RDX);
+ set_flag(opts, 1, FLAG_V);
+ *end_off = code->cur - (end_off + 1);
+ }
+
+void translate_m68k_exg(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, 6);
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, dst_op->base, opts->gen.scratch2, SZ_D);
+ if (src_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, src_op->base, dst_op->base, SZ_D);
+ mov_rr(code, opts->gen.scratch2, src_op->base, SZ_D);
+ } else {
+ mov_rdispr(code, src_op->base, src_op->disp, dst_op->base, SZ_D);
+ mov_rrdisp(code, opts->gen.scratch2, src_op->base, src_op->disp, SZ_D);
+ }
+ } else {
+ mov_rdispr(code, dst_op->base, dst_op->disp, opts->gen.scratch2, SZ_D);
+ if (src_op->mode == MODE_REG_DIRECT) {
+ mov_rrdisp(code, src_op->base, dst_op->base, dst_op->disp, SZ_D);
+ mov_rr(code, opts->gen.scratch2, src_op->base, SZ_D);
+ } else {
+ mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_D);
+ mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_D);
+ mov_rrdisp(code, opts->gen.scratch2, src_op->base, src_op->disp, SZ_D);
+ }
+ }
+ }
+
+void translate_m68k_mul(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, 70); //TODO: Calculate the actual value based on the value of the <ea> parameter
+ if (src_op->mode == MODE_IMMED) {
+ mov_ir(code, inst->op == M68K_MULU ? (src_op->disp & 0xFFFF) : ((src_op->disp & 0x8000) ? src_op->disp | 0xFFFF0000 : src_op->disp), opts->gen.scratch1, SZ_D);
+ } else if (src_op->mode == MODE_REG_DIRECT) {
+ if (inst->op == M68K_MULS) {
+ movsx_rr(code, src_op->base, opts->gen.scratch1, SZ_W, SZ_D);
+ } else {
+ movzx_rr(code, src_op->base, opts->gen.scratch1, SZ_W, SZ_D);
+ }
+ } else {
+ if (inst->op == M68K_MULS) {
+ movsx_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_W, SZ_D);
+ } else {
+ movzx_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_W, SZ_D);
+ }
+ }
+ uint8_t dst_reg;
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ dst_reg = dst_op->base;
+ if (inst->op == M68K_MULS) {
+ movsx_rr(code, dst_reg, dst_reg, SZ_W, SZ_D);
+ } else {
+ movzx_rr(code, dst_reg, dst_reg, SZ_W, SZ_D);
+ }
+ } else {
+ dst_reg = opts->gen.scratch2;
+ if (inst->op == M68K_MULS) {
+ movsx_rdispr(code, dst_op->base, dst_op->disp, opts->gen.scratch2, SZ_W, SZ_D);
+ } else {
+ movzx_rdispr(code, dst_op->base, dst_op->disp, opts->gen.scratch2, SZ_W, SZ_D);
+ }
+ }
+ imul_rr(code, opts->gen.scratch1, dst_reg, SZ_D);
+ if (dst_op->mode == MODE_REG_DISPLACE8) {
+ mov_rrdisp(code, dst_reg, dst_op->base, dst_op->disp, SZ_D);
+ }
+ cmp_ir(code, 0, dst_reg, SZ_D);
+ update_flags(opts, N|Z|V0|C0);
+ }
+
+void translate_m68k_negx(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, BUS);
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ if (dst_op->base == opts->gen.scratch1) {
+ push_r(code, opts->gen.scratch2);
+ xor_rr(code, opts->gen.scratch2, opts->gen.scratch2, inst->extra.size);
+ flag_to_carry(opts, FLAG_X);
+ sbb_rr(code, dst_op->base, opts->gen.scratch2, inst->extra.size);
+ mov_rr(code, opts->gen.scratch2, dst_op->base, inst->extra.size);
+ pop_r(code, opts->gen.scratch2);
+ } else {
+ xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, inst->extra.size);
+ flag_to_carry(opts, FLAG_X);
+ sbb_rr(code, dst_op->base, opts->gen.scratch1, inst->extra.size);
+ mov_rr(code, opts->gen.scratch1, dst_op->base, inst->extra.size);
+ }
+ } else {
+ xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, inst->extra.size);
+ flag_to_carry(opts, FLAG_X);
+ sbb_rdispr(code, dst_op->base, dst_op->disp, opts->gen.scratch1, inst->extra.size);
+ mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ set_flag_cond(opts, CC_C, FLAG_C);
+ code_ptr after_flag_set = code->cur + 1;
+ jcc(code, CC_Z, code->cur + 2);
+ set_flag(opts, 0, FLAG_Z);
+ *after_flag_set = code->cur - (after_flag_set+1);
+ set_flag_cond(opts, CC_S, FLAG_N);
+ set_flag_cond(opts, CC_O, FLAG_V);
+ if (opts->flag_regs[FLAG_C] >= 0) {
+ flag_to_flag(opts, FLAG_C, FLAG_X);
+ } else {
+ set_flag_cond(opts, CC_C, FLAG_X);
+ }
+ m68k_save_result(inst, opts);
+ }
+
+void translate_m68k_rot(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ int32_t init_flags = C|V0;
+ if (inst->src.addr_mode == MODE_UNUSED) {
+ cycles(&opts->gen, BUS);
+ //Memory rotate
+ if (inst->op == M68K_ROXR || inst->op == M68K_ROXL) {
+ flag_to_carry(opts, FLAG_X);
+ init_flags |= X;
+ }
+ op_ir(code, inst, 1, dst_op->base, inst->extra.size);
+ update_flags(opts, init_flags);
+ cmp_ir(code, 0, dst_op->base, inst->extra.size);
+ update_flags(opts, Z|N);
+ m68k_save_result(inst, opts);
+ } else {
+ if (src_op->mode == MODE_IMMED) {
+ cycles(&opts->gen, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + src_op->disp*2);
+ if (inst->op == M68K_ROXR || inst->op == M68K_ROXL) {
+ flag_to_carry(opts, FLAG_X);
+ init_flags |= X;
+ }
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ op_ir(code, inst, src_op->disp, dst_op->base, inst->extra.size);
+ } else {
+ op_irdisp(code, inst, src_op->disp, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ update_flags(opts, init_flags);
+ } else {
+ if (src_op->mode == MODE_REG_DIRECT) {
+ if (src_op->base != opts->gen.scratch1) {
+ mov_rr(code, src_op->base, opts->gen.scratch1, SZ_B);
+ }
+ } else {
+ mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_B);
+ }
+ and_ir(code, 63, opts->gen.scratch1, SZ_D);
+ code_ptr zero_off = code->cur + 1;
+ jcc(code, CC_Z, code->cur + 2);
+ //add 2 cycles for every bit shifted
+ mov_ir(code, 2 * opts->gen.clock_divider, opts->gen.scratch2, SZ_D);
+ imul_rr(code, RCX, opts->gen.scratch2, SZ_D);
+ add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D);
+ cmp_ir(code, 32, opts->gen.scratch1, SZ_B);
+ code_ptr norm_off = code->cur + 1;
+ jcc(code, CC_L, code->cur + 2);
+ if (inst->op == M68K_ROXR || inst->op == M68K_ROXL) {
+ flag_to_carry(opts, FLAG_X);
+ init_flags |= X;
+ } else {
+ sub_ir(code, 32, opts->gen.scratch1, SZ_B);
+ }
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ op_ir(code, inst, 31, dst_op->base, inst->extra.size);
+ op_ir(code, inst, 1, dst_op->base, inst->extra.size);
+ } else {
+ op_irdisp(code, inst, 31, dst_op->base, dst_op->disp, inst->extra.size);
+ op_irdisp(code, inst, 1, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+
+ if (inst->op == M68K_ROXR || inst->op == M68K_ROXL) {
+ set_flag_cond(opts, CC_C, FLAG_X);
+ sub_ir(code, 32, opts->gen.scratch1, SZ_B);
+ *norm_off = code->cur - (norm_off+1);
+ flag_to_carry(opts, FLAG_X);
+ } else {
+ *norm_off = code->cur - (norm_off+1);
+ }
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ op_r(code, inst, dst_op->base, inst->extra.size);
+ } else {
+ op_rdisp(code, inst, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ update_flags(opts, init_flags);
+ code_ptr end_off = code->cur + 1;
+ jmp(code, code->cur + 2);
+ *zero_off = code->cur - (zero_off+1);
+ if (inst->op == M68K_ROXR || inst->op == M68K_ROXL) {
+ //Carry flag is set to X flag when count is 0, this is different from ROR/ROL
+ flag_to_flag(opts, FLAG_X, FLAG_C);
+ } else {
+ set_flag(opts, 0, FLAG_C);
+ }
+ *end_off = code->cur - (end_off+1);
+ }
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ cmp_ir(code, 0, dst_op->base, inst->extra.size);
+ } else {
+ cmp_irdisp(code, 0, dst_op->base, dst_op->disp, inst->extra.size);
+ }
+ update_flags(opts, Z|N);
+ }
+ }
+
+void translate_m68k_illegal(m68k_options *opts, m68kinst *inst)
+{
+ code_info *code = &opts->gen.code;
+ call(code, opts->gen.save_context);
+ call_args(code, (code_ptr)print_regs_exit, 1, opts->gen.context_reg);
+ }
+
+#define BIT_SUPERVISOR 5
+
+void translate_m68k_andi_ori_ccr_sr(m68k_options *opts, m68kinst *inst)
+{
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, 20);
+ //TODO: If ANDI to SR, trap if not in supervisor mode
+ uint32_t flag_mask = 0;
+ uint32_t base_flag = inst->op == M68K_ANDI_SR || inst->op == M68K_ANDI_CCR ? X0 : X1;
+ for (int i = 0; i < 5; i++)
+ {
+ if ((base_flag == X0) ^ (inst->src.params.immed & 1 << i) > 0)
+ {
+ flag_mask |= base_flag << ((4 - i) * 3);
+ }
+ }
+ update_flags(opts, flag_mask);
+ if (inst->op == M68K_ANDI_SR || inst->op == M68K_ORI_SR) {
+ if (inst->op == M68K_ANDI_SR) {
+ and_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ } else {
+ or_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ }
+ if (inst->op == M68K_ANDI_SR && !(inst->src.params.immed & (1 << (BIT_SUPERVISOR + 8)))) {
+ //leave supervisor mode
+ swap_ssp_usp(opts);
+ }
+ if ((inst->op == M68K_ANDI_SR && (inst->src.params.immed & 0x700) != 0x700)
+ || (inst->op == M68K_ORI_SR && inst->src.params.immed & 0x700)) {
+ call(code, opts->do_sync);
+ }
+ }
+ }
+
+void translate_m68k_eori_ccr_sr(m68k_options *opts, m68kinst *inst)
+{
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, 20);
+ //TODO: If ANDI to SR, trap if not in supervisor mode
+ if (inst->src.params.immed & 0x1) {
+ xor_flag(opts, 1, FLAG_C);
+ }
+ if (inst->src.params.immed & 0x2) {
+ xor_flag(opts, 1, FLAG_V);
+ }
+ if (inst->src.params.immed & 0x4) {
+ xor_flag(opts, 1, FLAG_Z);
+ }
+ if (inst->src.params.immed & 0x8) {
+ xor_flag(opts, 1, FLAG_N);
+ }
+ if (inst->src.params.immed & 0x10) {
+ xor_flag(opts, 1, FLAG_X);
+ }
+ if (inst->op == M68K_ORI_SR) {
+ xor_irdisp(code, inst->src.params.immed >> 8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ if (inst->src.params.immed & 0x700) {
+ call(code, opts->do_sync);
+ }
+ }
+ }
+
+void set_all_flags(m68k_options *opts, uint8_t flags)
+{
+ uint32_t flag_mask = flags & 0x10 ? X1 : X0;
+ flag_mask |= flags & 0x8 ? N1 : N0;
+ flag_mask |= flags & 0x4 ? Z1 : Z0;
+ flag_mask |= flags & 0x2 ? V1 : V0;
+ flag_mask |= flags & 0x1 ? C1 : C0;
+ update_flags(opts, flag_mask);
+ }
+
+void translate_m68k_move_ccr_sr(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ //TODO: Privilege check for MOVE to SR
+ if (src_op->mode == MODE_IMMED) {
+ set_all_flags(opts, src_op->disp);
+ if (inst->op == M68K_MOVE_SR) {
+ mov_irdisp(code, (src_op->disp >> 8), opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) {
+ //leave supervisor mode
+ swap_ssp_usp(opts);
+ }
+ call(code, opts->do_sync);
+ }
+ cycles(&opts->gen, 12);
+ } else {
+ if (src_op->base != opts->gen.scratch1) {
+ if (src_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, src_op->base, opts->gen.scratch1, SZ_W);
+ } else {
+ mov_rdispr(code, src_op->base, src_op->disp, opts->gen.scratch1, SZ_W);
+ }
+ }
+ call(code, inst->op == M68K_MOVE_SR ? opts->set_sr : opts->set_ccr);
+ cycles(&opts->gen, 12);
+ }
+ }
+
+void translate_m68k_stop(m68k_options *opts, m68kinst *inst)
+{
+ //TODO: Trap if not in system mode
+ //manual says 4 cycles, but it has to be at least 8 since it's a 2-word instruction
+ //possibly even 12 since that's how long MOVE to SR takes
+ //On further thought prefetch + the fact that this stops the CPU may make
+ //Motorola's accounting make sense here
+ code_info *code = &opts->gen.code;
+ cycles(&opts->gen, BUS*2);
+ set_all_flags(opts, inst->src.params.immed);
+ mov_irdisp(code, (inst->src.params.immed >> 8), opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) {
+ //leave supervisor mode
+ swap_ssp_usp(opts);
+ }
+ code_ptr loop_top = code->cur;
+ call(code, opts->do_sync);
+ cmp_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D);
+ code_ptr normal_cycle_up = code->cur + 1;
+ jcc(code, CC_A, code->cur + 2);
+ cycles(&opts->gen, BUS);
+ code_ptr after_cycle_up = code->cur + 1;
+ jmp(code, code->cur + 2);
+ *normal_cycle_up = code->cur - (normal_cycle_up + 1);
+ mov_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D);
+ *after_cycle_up = code->cur - (after_cycle_up+1);
+ cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D);
+ jcc(code, CC_C, loop_top);
+ }
+
+void translate_m68k_move_from_sr(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op)
+{
+ code_info *code = &opts->gen.code;
+ //TODO: Trap if not in system mode
+ call(code, opts->get_sr);
+ if (dst_op->mode == MODE_REG_DIRECT) {
+ mov_rr(code, opts->gen.scratch1, dst_op->base, SZ_W);
+ } else {
+ mov_rrdisp(code, opts->gen.scratch1, dst_op->base, dst_op->disp, SZ_W);
+ }
+ m68k_save_result(inst, opts);
+}
+
+void translate_m68k_reset(m68k_options *opts, m68kinst *inst)
+{
+ code_info *code = &opts->gen.code;
+ call(code, opts->gen.save_context);
+ call_args(code, (code_ptr)print_regs_exit, 1, opts->gen.context_reg);
+}
+
+void translate_m68k_rte(m68k_options *opts, m68kinst *inst)
+{
+ code_info *code = &opts->gen.code;
+ //TODO: Trap if not in system mode
+ //Read saved SR
+ areg_to_native(opts, 7, opts->gen.scratch1);
+ call(code, opts->read_16);
+ addi_areg(opts, 2, 7);
+ call(code, opts->set_sr);
+ //Read saved PC
+ areg_to_native(opts, 7, opts->gen.scratch1);
+ call(code, opts->read_32);
+ addi_areg(opts, 4, 7);
+ //Check if we've switched to user mode and swap stack pointers if needed
+ bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ code_ptr end_off = code->cur + 1;
+ jcc(code, CC_C, code->cur + 2);
+ swap_ssp_usp(opts);
+ *end_off = code->cur - (end_off+1);
+ //Get native address, sync components, recalculate integer points and jump to returned address
+ call(code, opts->native_addr_and_sync);
+ jmp_r(code, opts->gen.scratch1);
+}
+
+void translate_out_of_bounds(code_info *code)
+{
+ xor_rr(code, RDI, RDI, SZ_D);
+ call_args(code, (code_ptr)exit, 1, RDI);
+}
+
+void nop_fill_or_jmp_next(code_info *code, code_ptr old_end, code_ptr next_inst)
+{
+ if (next_inst == old_end && next_inst - code->cur < 2) {
+ while (code->cur < old_end) {
+ *(code->cur++) = 0x90; //NOP
+ }
+ } else {
+ jmp(code, next_inst);
+ }
+}
+
+m68k_context * m68k_handle_code_write(uint32_t address, m68k_context * context)
+{
+ uint32_t inst_start = get_instruction_start(context->native_code_map, address | 0xFF0000);
+ if (inst_start) {
+ m68k_options * options = context->options;
+ code_info *code = &options->gen.code;
+ code_ptr dst = get_native_address(context->native_code_map, inst_start);
+ code_info orig;
+ orig.cur = dst;
+ orig.last = dst + 128;
+ mov_ir(&orig, inst_start, options->gen.scratch2, SZ_D);
+
+ if (!options->retrans_stub) {
+ options->retrans_stub = code->cur;
+ call(code, options->gen.save_context);
+ push_r(code, options->gen.context_reg);
+ call_args(code,(code_ptr)m68k_retranslate_inst, 2, options->gen.scratch2, options->gen.context_reg);
+ pop_r(code, options->gen.context_reg);
+ mov_rr(code, RAX, options->gen.scratch1, SZ_PTR);
+ call(code, options->gen.load_context);
+ jmp_r(code, options->gen.scratch1);
+ }
+ jmp(&orig, options->retrans_stub);
+ }
+ return context;
+}
+
+void insert_breakpoint(m68k_context * context, uint32_t address, code_ptr bp_handler)
+{
+ static code_ptr bp_stub = NULL;
+ m68k_options * opts = context->options;
+ code_info native;
+ native.cur = get_native_address_trans(context, address);
+ native.last = native.cur + 128;
+ code_ptr start_native = native.cur;
+ mov_ir(&native, address, opts->gen.scratch1, SZ_D);
+ if (!bp_stub) {
+ code_info *code = &opts->gen.code;
+ check_code_prologue(code);
+ bp_stub = code->cur;
+ call(&native, bp_stub);
+
+ //Calculate length of prologue
+ check_cycles_int(&opts->gen, address);
+ int check_int_size = code->cur-bp_stub;
+ code->cur = bp_stub;
+
+ //Save context and call breakpoint handler
+ call(code, opts->gen.save_context);
+ push_r(code, opts->gen.scratch1);
+ call_args_abi(code, bp_handler, 2, opts->gen.context_reg, opts->gen.scratch1);
+ mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
+ //Restore context
+ call(code, opts->gen.load_context);
+ pop_r(code, opts->gen.scratch1);
+ //do prologue stuff
+ cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D);
+ code_ptr jmp_off = code->cur + 1;
+ jcc(code, CC_NC, code->cur + 7);
+ call(code, opts->gen.handle_cycle_limit_int);
+ *jmp_off = code->cur - (jmp_off+1);
+ //jump back to body of translated instruction
+ pop_r(code, opts->gen.scratch1);
+ add_ir(code, check_int_size - (native.cur-start_native), opts->gen.scratch1, SZ_PTR);
+ jmp_r(code, opts->gen.scratch1);
+ } else {
+ call(&native, bp_stub);
+ }
+}
+
+void init_m68k_opts(m68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks, uint32_t clock_divider)
+{
+ memset(opts, 0, sizeof(*opts));
+ opts->gen.memmap = memmap;
+ opts->gen.memmap_chunks = num_chunks;
+ opts->gen.address_size = SZ_D;
+ opts->gen.address_mask = 0xFFFFFF;
+ opts->gen.byte_swap = 1;
+ opts->gen.max_address = 0x1000000;
+ opts->gen.bus_cycles = BUS;
+ opts->gen.clock_divider = clock_divider;
+ opts->gen.mem_ptr_off = offsetof(m68k_context, mem_pointers);
+ opts->gen.ram_flags_off = offsetof(m68k_context, ram_code_flags);
+ opts->gen.ram_flags_shift = 11;
+ for (int i = 0; i < 8; i++)
+ {
+ opts->dregs[i] = opts->aregs[i] = -1;
+ }
+#ifdef X86_64
+ opts->dregs[0] = R10;
+ opts->dregs[1] = R11;
+ opts->dregs[2] = R12;
+ opts->dregs[3] = R8;
+ opts->aregs[0] = R13;
+ opts->aregs[1] = R14;
+ opts->aregs[2] = R9;
+ opts->aregs[7] = R15;
+
+ opts->flag_regs[0] = -1;
+ opts->flag_regs[1] = RBX;
+ opts->flag_regs[2] = RDX;
+ opts->flag_regs[3] = BH;
+ opts->flag_regs[4] = DH;
+
+ opts->gen.scratch2 = RDI;
+#else
+ opts->dregs[0] = RDX;
+ opts->aregs[7] = RDI;
+
+ for (int i = 0; i < 5; i++)
+ {
+ opts->flag_regs[i] = -1;
+ }
+ opts->gen.scratch2 = RBX;
+#endif
+ opts->gen.context_reg = RSI;
+ opts->gen.cycles = RAX;
+ opts->gen.limit = RBP;
+ opts->gen.scratch1 = RCX;
+
+
+ opts->gen.native_code_map = malloc(sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
+ memset(opts->gen.native_code_map, 0, sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
+ opts->gen.deferred = NULL;
+ opts->gen.ram_inst_sizes = malloc(sizeof(uint8_t *) * 64);
+ memset(opts->gen.ram_inst_sizes, 0, sizeof(uint8_t *) * 64);
+
+ code_info *code = &opts->gen.code;
+ init_code_info(code);
+
+ opts->gen.save_context = code->cur;
+ for (int i = 0; i < 5; i++)
+ if (opts->flag_regs[i] >= 0) {
+ mov_rrdisp(code, opts->flag_regs[i], opts->gen.context_reg, offsetof(m68k_context, flags) + i, SZ_B);
+ }
+ for (int i = 0; i < 8; i++)
+ {
+ if (opts->dregs[i] >= 0) {
+ mov_rrdisp(code, opts->dregs[i], opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * i, SZ_D);
+ }
+ if (opts->aregs[i] >= 0) {
+ mov_rrdisp(code, opts->aregs[i], opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, SZ_D);
+ }
+ }
+ mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(m68k_context, current_cycle), SZ_D);
+ retn(code);
+
+ opts->gen.load_context = code->cur;
+ for (int i = 0; i < 5; i++)
+ if (opts->flag_regs[i] >= 0) {
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + i, opts->flag_regs[i], SZ_B);
+ }
+ for (int i = 0; i < 8; i++)
+ {
+ if (opts->dregs[i] >= 0) {
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, dregs) + sizeof(uint32_t) * i, opts->dregs[i], SZ_D);
+ }
+ if (opts->aregs[i] >= 0) {
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, opts->aregs[i], SZ_D);
+ }
+ }
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, current_cycle), opts->gen.cycles, SZ_D);
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, target_cycle), opts->gen.limit, SZ_D);
+ retn(code);
+
+ opts->start_context = (start_fun)code->cur;
+ save_callee_save_regs(code);
+#ifdef X86_64
+ if (opts->gen.scratch2 != RDI) {
+ mov_rr(code, RDI, opts->gen.scratch2, SZ_PTR);
+ }
+#else
+ mov_rdispr(code, RSP, 20, opts->gen.scratch2, SZ_D);
+ mov_rdispr(code, RSP, 24, opts->gen.context_reg, SZ_D);
+#endif
+ call(code, opts->gen.load_context);
+ call_r(code, opts->gen.scratch2);
+ call(code, opts->gen.save_context);
+ restore_callee_save_regs(code);
+ retn(code);
+
+ opts->native_addr = code->cur;
+ call(code, opts->gen.save_context);
+ push_r(code, opts->gen.context_reg);
+ call_args(code, (code_ptr)get_native_address_trans, 2, opts->gen.context_reg, opts->gen.scratch1);
+ mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg
+ pop_r(code, opts->gen.context_reg);
+ call(code, opts->gen.load_context);
+ retn(code);
+
+ opts->native_addr_and_sync = code->cur;
+ call(code, opts->gen.save_context);
+ push_r(code, opts->gen.scratch1);
+
+ xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D);
+ call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1);
+ pop_r(code, RSI); //restore saved address from opts->gen.scratch1
+ push_r(code, RAX); //save context pointer for later
+ call_args(code, (code_ptr)get_native_address_trans, 2, RAX, RSI);
+ mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); //move result to scratch reg
+ pop_r(code, opts->gen.context_reg);
+ call(code, opts->gen.load_context);
+ retn(code);
+
+ opts->gen.handle_cycle_limit = code->cur;
+ cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.cycles, SZ_D);
+ code_ptr skip_sync = code->cur + 1;
+ jcc(code, CC_C, code->cur + 2);
+ opts->do_sync = code->cur;
+ push_r(code, opts->gen.scratch1);
+ push_r(code, opts->gen.scratch2);
+ call(code, opts->gen.save_context);
+ xor_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_D);
+ call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1);
+ mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
+ call(code, opts->gen.load_context);
+ pop_r(code, opts->gen.scratch2);
+ pop_r(code, opts->gen.scratch1);
+ *skip_sync = code->cur - (skip_sync+1);
+ retn(code);
+
+ opts->gen.handle_code_write = (code_ptr)m68k_handle_code_write;
+
+ opts->read_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_16, NULL);
+ opts->read_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_8, NULL);
+ opts->write_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_16, NULL);
+ opts->write_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_8, NULL);
+
+ opts->read_32 = code->cur;
+ push_r(code, opts->gen.scratch1);
+ call(code, opts->read_16);
+ mov_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_W);
+ pop_r(code, opts->gen.scratch1);
+ push_r(code, opts->gen.scratch2);
+ add_ir(code, 2, opts->gen.scratch1, SZ_D);
+ call(code, opts->read_16);
+ pop_r(code, opts->gen.scratch2);
+ movzx_rr(code, opts->gen.scratch1, opts->gen.scratch1, SZ_W, SZ_D);
+ shl_ir(code, 16, opts->gen.scratch2, SZ_D);
+ or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_D);
+ retn(code);
+
+ opts->write_32_lowfirst = code->cur;
+ push_r(code, opts->gen.scratch2);
+ push_r(code, opts->gen.scratch1);
+ add_ir(code, 2, opts->gen.scratch2, SZ_D);
+ call(code, opts->write_16);
+ pop_r(code, opts->gen.scratch1);
+ pop_r(code, opts->gen.scratch2);
+ shr_ir(code, 16, opts->gen.scratch1, SZ_D);
+ jmp(code, opts->write_16);
+
+ opts->write_32_highfirst = code->cur;
+ push_r(code, opts->gen.scratch1);
+ push_r(code, opts->gen.scratch2);
+ shr_ir(code, 16, opts->gen.scratch1, SZ_D);
+ call(code, opts->write_16);
+ pop_r(code, opts->gen.scratch2);
+ pop_r(code, opts->gen.scratch1);
+ add_ir(code, 2, opts->gen.scratch2, SZ_D);
+ jmp(code, opts->write_16);
+
+ opts->get_sr = code->cur;
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, status), opts->gen.scratch1, SZ_B);
+ shl_ir(code, 8, opts->gen.scratch1, SZ_W);
+ if (opts->flag_regs[FLAG_X] >= 0) {
+ mov_rr(code, opts->flag_regs[FLAG_X], opts->gen.scratch1, SZ_B);
+ } else {
+ int8_t offset = offsetof(m68k_context, flags);
+ if (offset) {
+ mov_rdispr(code, opts->gen.context_reg, offset, opts->gen.scratch1, SZ_B);
+ } else {
+ mov_rindr(code, opts->gen.context_reg, opts->gen.scratch1, SZ_B);
+ }
+ }
+ for (int flag = FLAG_N; flag <= FLAG_C; flag++)
+ {
+ shl_ir(code, 1, opts->gen.scratch1, SZ_B);
+ if (opts->flag_regs[flag] >= 0) {
+ or_rr(code, opts->flag_regs[flag], opts->gen.scratch1, SZ_B);
+ } else {
+ or_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, flags) + flag, opts->gen.scratch1, SZ_B);
+ }
+ }
+ retn(code);
+
+ opts->set_sr = code->cur;
+ for (int flag = FLAG_C; flag >= FLAG_X; flag--)
+ {
+ rcr_ir(code, 1, opts->gen.scratch1, SZ_B);
+ if (opts->flag_regs[flag] >= 0) {
+ setcc_r(code, CC_C, opts->flag_regs[flag]);
+ } else {
+ int8_t offset = offsetof(m68k_context, flags) + flag;
+ if (offset) {
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, offset);
+ } else {
+ setcc_rind(code, CC_C, opts->gen.context_reg);
+ }
+ }
+ }
+ shr_ir(code, 8, opts->gen.scratch1, SZ_W);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ retn(code);
+
+ opts->set_ccr = code->cur;
+ for (int flag = FLAG_C; flag >= FLAG_X; flag--)
+ {
+ rcr_ir(code, 1, opts->gen.scratch1, SZ_B);
+ if (opts->flag_regs[flag] >= 0) {
+ setcc_r(code, CC_C, opts->flag_regs[flag]);
+ } else {
+ int8_t offset = offsetof(m68k_context, flags) + flag;
+ if (offset) {
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, offset);
+ } else {
+ setcc_rind(code, CC_C, opts->gen.context_reg);
+ }
+ }
+ }
+ retn(code);
+
+ opts->gen.handle_cycle_limit_int = code->cur;
+ cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_cycle), opts->gen.cycles, SZ_D);
+ code_ptr do_int = code->cur + 1;
+ jcc(code, CC_NC, code->cur + 2);
+ cmp_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.cycles, SZ_D);
+ skip_sync = code->cur + 1;
+ jcc(code, CC_C, code->cur + 2);
+ call(code, opts->gen.save_context);
+ call_args_abi(code, (code_ptr)sync_components, 2, opts->gen.context_reg, opts->gen.scratch1);
+ mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
+ jmp(code, opts->gen.load_context);
+ *skip_sync = code->cur - (skip_sync+1);
+ retn(code);
+ *do_int = code->cur - (do_int+1);
+ //set target cycle to sync cycle
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, sync_cycle), opts->gen.limit, SZ_D);
+ //swap USP and SSP if not already in supervisor mode
+ bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ code_ptr already_supervisor = code->cur + 1;
+ jcc(code, CC_C, code->cur + 2);
+ swap_ssp_usp(opts);
+ *already_supervisor = code->cur - (already_supervisor+1);
+ //save PC
+ subi_areg(opts, 4, 7);
+ areg_to_native(opts, 7, opts->gen.scratch2);
+ call(code, opts->write_32_lowfirst);
+ //save status register
+ subi_areg(opts, 2, 7);
+ call(code, opts->get_sr);
+ areg_to_native(opts, 7, opts->gen.scratch2);
+ call(code, opts->write_16);
+ //update status register
+ and_irdisp(code, 0xF8, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch1, SZ_B);
+ or_ir(code, 0x20, opts->gen.scratch1, SZ_B);
+ or_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ //calculate interrupt vector address
+ mov_rdispr(code, opts->gen.context_reg, offsetof(m68k_context, int_num), opts->gen.scratch1, SZ_D);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(m68k_context, int_ack), SZ_W);
+ shl_ir(code, 2, opts->gen.scratch1, SZ_D);
+ add_ir(code, 0x60, opts->gen.scratch1, SZ_D);
+ call(code, opts->read_32);
+ call(code, opts->native_addr_and_sync);
+ cycles(&opts->gen, 24);
+ //discard function return address
+ pop_r(code, opts->gen.scratch2);
+ jmp_r(code, opts->gen.scratch1);
+
+ opts->trap = code->cur;
+ push_r(code, opts->gen.scratch2);
+ //swap USP and SSP if not already in supervisor mode
+ bt_irdisp(code, 5, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ already_supervisor = code->cur + 1;
+ jcc(code, CC_C, code->cur + 2);
+ swap_ssp_usp(opts);
+ *already_supervisor = code->cur - (already_supervisor+1);
+ //save PC
+ subi_areg(opts, 4, 7);
+ areg_to_native(opts, 7, opts->gen.scratch2);
+ call(code, opts->write_32_lowfirst);
+ //save status register
+ subi_areg(opts, 2, 7);
+ call(code, opts->get_sr);
+ areg_to_native(opts, 7, opts->gen.scratch2);
+ call(code, opts->write_16);
+ //set supervisor bit
+ or_irdisp(code, 0x20, opts->gen.context_reg, offsetof(m68k_context, status), SZ_B);
+ //calculate vector address
+ pop_r(code, opts->gen.scratch1);
+ shl_ir(code, 2, opts->gen.scratch1, SZ_D);
+ call(code, opts->read_32);
+ call(code, opts->native_addr_and_sync);
+ cycles(&opts->gen, 18);
+ jmp_r(code, opts->gen.scratch1);
+}
diff --git a/m68k_internal.h b/m68k_internal.h
new file mode 100644
index 0000000..a556505
--- /dev/null
+++ b/m68k_internal.h
@@ -0,0 +1,116 @@
+/*
+ Copyright 2014 Michael Pavone
+ This file is part of BlastEm.
+ BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
+*/
+#ifndef M68K_INTERNAL_H_
+#define M68K_INTERNAL_H_
+
+#include "68kinst.h"
+
+//functions implemented in host CPU specfic file
+void translate_out_of_bounds(code_info *code);
+void areg_to_native(m68k_options *opts, uint8_t reg, uint8_t native_reg);
+void dreg_to_native(m68k_options *opts, uint8_t reg, uint8_t native_reg);
+void areg_to_native_sx(m68k_options *opts, uint8_t reg, uint8_t native_reg);
+void dreg_to_native_sx(m68k_options *opts, uint8_t reg, uint8_t native_reg);
+void native_to_areg(m68k_options *opts, uint8_t native_reg, uint8_t reg);
+void native_to_dreg(m68k_options *opts, uint8_t native_reg, uint8_t reg);
+void ldi_areg(m68k_options *opts, int32_t value, uint8_t reg);
+void ldi_native(m68k_options *opts, int32_t value, uint8_t reg);
+void addi_native(m68k_options *opts, int32_t value, uint8_t reg);
+void subi_native(m68k_options *opts, int32_t value, uint8_t reg);
+void push_native(m68k_options *opts, uint8_t reg);
+void pop_native(m68k_options *opts, uint8_t reg);
+void sign_extend16_native(m68k_options *opts, uint8_t reg);
+void addi_areg(m68k_options *opts, int32_t val, uint8_t reg);
+void subi_areg(m68k_options *opts, int32_t val, uint8_t reg);
+void add_areg_native(m68k_options *opts, uint8_t reg, uint8_t native_reg);
+void add_dreg_native(m68k_options *opts, uint8_t reg, uint8_t native_reg);
+void calc_areg_displace(m68k_options *opts, m68k_op_info *op, uint8_t native_reg);
+void calc_index_disp8(m68k_options *opts, m68k_op_info *op, uint8_t native_reg);
+void calc_areg_index_disp8(m68k_options *opts, m68k_op_info *op, uint8_t native_reg);
+void nop_fill_or_jmp_next(code_info *code, code_ptr old_end, code_ptr next_inst);
+
+//functions implemented in m68k_core.c
+int8_t native_reg(m68k_op_info * op, m68k_options * opts);
+size_t dreg_offset(uint8_t reg);
+size_t areg_offset(uint8_t reg);
+size_t reg_offset(m68k_op_info *op);
+void translate_m68k_op(m68kinst * inst, host_ea * ea, m68k_options * opts, uint8_t dst);
+void print_regs_exit(m68k_context * context);
+void m68k_read_size(m68k_options *opts, uint8_t size);
+void m68k_write_size(m68k_options *opts, uint8_t size);
+void push_const(m68k_options *opts, int32_t value);
+void jump_m68k_abs(m68k_options * opts, uint32_t address);
+void swap_ssp_usp(m68k_options * opts);
+code_ptr get_native_address(native_map_slot * native_code_map, uint32_t address);
+void map_native_address(m68k_context * context, uint32_t address, code_ptr native_addr, uint8_t size, uint8_t native_size);
+uint8_t get_native_inst_size(m68k_options * opts, uint32_t address);
+uint8_t m68k_is_terminal(m68kinst * inst);
+void m68k_handle_deferred(m68k_context * context);
+code_ptr get_native_address_trans(m68k_context * context, uint32_t address);
+void * m68k_retranslate_inst(uint32_t address, m68k_context * context);
+
+//individual instructions
+void translate_m68k_bcc(m68k_options * opts, m68kinst * inst);
+void translate_m68k_scc(m68k_options * opts, m68kinst * inst);
+void translate_m68k_dbcc(m68k_options * opts, m68kinst * inst);
+void translate_m68k_rtr(m68k_options *opts, m68kinst * inst);
+void translate_m68k_trap(m68k_options *opts, m68kinst *inst);
+void translate_m68k_move(m68k_options * opts, m68kinst * inst);
+void translate_m68k_movep(m68k_options * opts, m68kinst * inst);
+void translate_m68k_arith(m68k_options *opts, m68kinst * inst, uint32_t flag_mask, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_unary(m68k_options *opts, m68kinst *inst, uint32_t flag_mask, host_ea *dst_op);
+void translate_m68k_invalid(m68k_options *opts, m68kinst *inst);
+void translate_m68k_cmp(m68k_options * opts, m68kinst * inst);
+void translate_m68k_clr(m68k_options * opts, m68kinst * inst);
+void translate_m68k_ext(m68k_options * opts, m68kinst * inst);
+void translate_m68k_abcd_sbcd(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_sl(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_asr(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_lsr(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_bit(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_chk(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_div(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_exg(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_mul(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_negx(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_rot(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_illegal(m68k_options *opts, m68kinst *inst);
+void translate_m68k_andi_ori_ccr_sr(m68k_options *opts, m68kinst *inst);
+void translate_m68k_eori_ccr_sr(m68k_options *opts, m68kinst *inst);
+void translate_m68k_move_ccr_sr(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_stop(m68k_options *opts, m68kinst *inst);
+void translate_m68k_move_from_sr(m68k_options *opts, m68kinst *inst, host_ea *src_op, host_ea *dst_op);
+void translate_m68k_reset(m68k_options *opts, m68kinst *inst);
+void translate_m68k_rte(m68k_options *opts, m68kinst *inst);
+
+//flag update bits
+#define X0 0x0001
+#define X1 0x0002
+#define X 0x0004
+#define N0 0x0008
+#define N1 0x0010
+#define N 0x0020
+#define Z0 0x0040
+#define Z1 0x0080
+#define Z 0x0100
+#define V0 0x0200
+#define V1 0x0400
+#define V 0x0800
+#define C0 0x1000
+#define C1 0x2000
+#define C 0x4000
+
+#define BUS 4
+#define PREDEC_PENALTY 2
+extern char disasm_buf[1024];
+
+m68k_context * sync_components(m68k_context * context, uint32_t address);
+
+void m68k_invalid();
+void bcd_add();
+void bcd_sub();
+
+#endif //M68K_INTERNAL_H_
diff --git a/m68k_to_x86.c b/m68k_to_x86.c
deleted file mode 100644
index b46a5c3..0000000
--- a/m68k_to_x86.c
+++ /dev/null
@@ -1,5028 +0,0 @@
-/*
- Copyright 2013 Michael Pavone
- This file is part of BlastEm.
- BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
-*/
-#include "gen_x86.h"
-#include "m68k_to_x86.h"
-#include "68kinst.h"
-#include "mem.h"
-#include "backend.h"
-#include <stdio.h>
-#include <stddef.h>
-#include <stdlib.h>
-#include <string.h>
-
-#define BUS 4
-#define PREDEC_PENALTY 2
-
-#define CYCLES RAX
-#define LIMIT RBP
-#define CONTEXT RSI
-#define SCRATCH1 RCX
-
-#ifdef X86_64
-#define SCRATCH2 RDI
-#else
-#define SCRATCH2 RBX
-#endif
-
-enum {
- FLAG_X,
- FLAG_N,
- FLAG_Z,
- FLAG_V,
- FLAG_C
-};
-
-char disasm_buf[1024];
-
-m68k_context * sync_components(m68k_context * context, uint32_t address);
-
-extern void bcd_add() asm("bcd_add");
-extern void bcd_sub() asm("bcd_sub");
-
-void m68k_invalid(uint32_t address, m68k_context * context)
-{
- printf("Invalid instruction at %X\n", address);
- exit(1);
-}
-
-code_ptr cycles(code_ptr dst, uint32_t num)
-{
- dst = add_ir(dst, num, CYCLES, SZ_D);
- return dst;
-}
-
-code_ptr check_cycles_int(code_ptr dst, uint32_t address, x86_68k_options * opts)
-{
- dst = cmp_rr(dst, CYCLES, LIMIT, SZ_D);
- code_ptr jmp_off = dst+1;
- dst = jcc(dst, CC_NC, dst + 7);
- dst = mov_ir(dst, address, SCRATCH1, SZ_D);
- dst = call(dst, opts->gen.handle_cycle_limit_int);
- *jmp_off = dst - (jmp_off+1);
- return dst;
-}
-
-code_ptr check_cycles(code_ptr dst, cpu_options * opts)
-{
- dst = cmp_rr(dst, CYCLES, LIMIT, SZ_D);
- code_ptr jmp_off = dst+1;
- dst = jcc(dst, CC_NC, dst + 7);
- dst = call(dst, opts->handle_cycle_limit);
- *jmp_off = dst - (jmp_off+1);
- return dst;
-}
-
-code_ptr set_flag(code_ptr dst, uint8_t val, uint8_t flag, x86_68k_options * opts)
-{
- if (opts->flag_regs[flag] >= 0) {
- dst = mov_ir(dst, val, opts->flag_regs[flag], SZ_B);
- } else {
- int8_t offset = offsetof(m68k_context, flags) + flag;
- if (offset) {
- dst = mov_irdisp8(dst, val, CONTEXT, offset, SZ_B);
- } else {
- dst = mov_irind(dst, val, CONTEXT, SZ_B);
- }
- }
-
- return dst;
-}
-
-code_ptr set_flag_cond(code_ptr dst, uint8_t cond, uint8_t flag, x86_68k_options *opts)
-{
- if (opts->flag_regs[flag] >= 0) {
- dst = setcc_r(dst, cond, opts->flag_regs[flag]);
- } else {
- int8_t offset = offsetof(m68k_context, flags) + flag;
- if (offset) {
- dst = setcc_rdisp8(dst, cond, CONTEXT, offset);
- } else {
- dst = setcc_rind(dst, cond, CONTEXT);
- }
- }
-
- return dst;
-}
-
-code_ptr check_flag(code_ptr dst, uint8_t flag, x86_68k_options *opts)
-{
- if (opts->flag_regs[flag] >= 0) {
- dst = cmp_ir(dst, 0, opts->flag_regs[flag], SZ_B);
- } else {
- dst = cmp_irdisp8(dst, 0, CONTEXT, offsetof(m68k_context, flags) + flag, SZ_B);
- }
- return dst;
-}
-
-code_ptr flag_to_reg(code_ptr dst, uint8_t flag, uint8_t reg, x86_68k_options *opts)
-{
- if (opts->flag_regs[flag] >= 0) {
- dst = mov_rr(dst, opts->flag_regs[flag], reg, SZ_B);
- } else {
- int8_t offset = offsetof(m68k_context, flags) + flag;
- if (offset) {
- dst = mov_rdisp8r(dst, CONTEXT, offset, reg, SZ_B);
- } else {
- dst = mov_rindr(dst, CONTEXT, reg, SZ_B);
- }
- }
- return dst;
-}
-
-code_ptr reg_to_flag(code_ptr dst, uint8_t flag, uint8_t reg, x86_68k_options *opts)
-{
- if (opts->flag_regs[flag] >= 0) {
- dst = mov_rr(dst, reg, opts->flag_regs[flag], SZ_B);
- } else {
- int8_t offset = offsetof(m68k_context, flags) + flag;
- if (offset) {
- dst = mov_rrdisp8(dst, reg, CONTEXT, offset, SZ_B);
- } else {
- dst = mov_rrind(dst, reg, CONTEXT, SZ_B);
- }
- }
- return dst;
-}
-
-code_ptr flag_to_flag(code_ptr dst, uint8_t flag1, uint8_t flag2, x86_68k_options *opts)
-{
- if (opts->flag_regs[flag1] >= 0 && opts->flag_regs[flag2] >= 0) {
- dst = mov_rr(dst, opts->flag_regs[flag1], opts->flag_regs[flag2], SZ_B);
- } else if(opts->flag_regs[flag1] >= 0) {
- dst = mov_rrdisp8(dst, opts->flag_regs[flag1], CONTEXT, offsetof(m68k_context, flags) + flag2, SZ_B);
- } else if (opts->flag_regs[flag2] >= 0) {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, flags) + flag1, opts->flag_regs[flag2], SZ_B);
- } else {
- dst = push_r(dst, SCRATCH1);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, flags) + flag1, SCRATCH1, SZ_B);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, flags) + flag2, SZ_B);
- dst = pop_r(dst, SCRATCH1);
- }
- return dst;
-}
-
-code_ptr flag_to_carry(code_ptr dst, uint8_t flag, x86_68k_options * opts)
-{
- if (opts->flag_regs[flag] >= 0) {
- dst = bt_ir(dst, 0, opts->flag_regs[flag], SZ_B);
- } else {
- dst = bt_irdisp8(dst, 0, CONTEXT, offsetof(m68k_context, flags) + flag, SZ_B);
- }
- return dst;
-}
-
-code_ptr or_flag_to_reg(code_ptr dst, uint8_t flag, uint8_t reg, x86_68k_options *opts)
-{
- if (opts->flag_regs[flag] >= 0) {
- dst = or_rr(dst, opts->flag_regs[flag], reg, SZ_B);
- } else {
- dst = or_rdisp8r(dst, CONTEXT, offsetof(m68k_context, flags) + flag, reg, SZ_B);
- }
- return dst;
-}
-
-code_ptr xor_flag_to_reg(code_ptr dst, uint8_t flag, uint8_t reg, x86_68k_options *opts)
-{
- if (opts->flag_regs[flag] >= 0) {
- dst = xor_rr(dst, opts->flag_regs[flag], reg, SZ_B);
- } else {
- dst = xor_rdisp8r(dst, CONTEXT, offsetof(m68k_context, flags) + flag, reg, SZ_B);
- }
- return dst;
-}
-
-code_ptr xor_flag(code_ptr dst, uint8_t val, uint8_t flag, x86_68k_options *opts)
-{
- if (opts->flag_regs[flag] >= 0) {
- dst = xor_ir(dst, val, opts->flag_regs[flag], SZ_B);
- } else {
- dst = xor_irdisp8(dst, val, CONTEXT, offsetof(m68k_context, flags) + flag, SZ_B);
- }
- return dst;
-}
-
-code_ptr cmp_flags(code_ptr dst, uint8_t flag1, uint8_t flag2, x86_68k_options *opts)
-{
- if (opts->flag_regs[flag1] >= 0 && opts->flag_regs[flag2] >= 0) {
- dst = cmp_rr(dst, opts->flag_regs[flag1], opts->flag_regs[flag2], SZ_B);
- } else if(opts->flag_regs[flag1] >= 0 || opts->flag_regs[flag2] >= 0) {
- if (opts->flag_regs[flag2] >= 0) {
- uint8_t tmp = flag1;
- flag1 = flag2;
- flag2 = tmp;
- }
- dst = cmp_rrdisp8(dst, opts->flag_regs[flag1], CONTEXT, offsetof(m68k_context, flags) + flag2, SZ_B);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, flags) + flag1, SCRATCH1, SZ_B);
- dst = cmp_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, flags) + flag2, SZ_B);
- }
- return dst;
-}
-
-int8_t native_reg(m68k_op_info * op, x86_68k_options * opts)
-{
- if (op->addr_mode == MODE_REG) {
- return opts->dregs[op->params.regs.pri];
- }
- if (op->addr_mode == MODE_AREG) {
- return opts->aregs[op->params.regs.pri];
- }
- return -1;
-}
-
-//must be called with an m68k_op_info that uses a register
-size_t reg_offset(m68k_op_info *op)
-{
- if (op->addr_mode == MODE_REG) {
- return offsetof(m68k_context, dregs) + sizeof(uint32_t) * op->params.regs.pri;
- }
- return offsetof(m68k_context, aregs) + sizeof(uint32_t) * op->params.regs.pri;
-}
-
-void print_regs_exit(m68k_context * context)
-{
- printf("XNZVC\n%d%d%d%d%d\n", context->flags[0], context->flags[1], context->flags[2], context->flags[3], context->flags[4]);
- for (int i = 0; i < 8; i++) {
- printf("d%d: %X\n", i, context->dregs[i]);
- }
- for (int i = 0; i < 8; i++) {
- printf("a%d: %X\n", i, context->aregs[i]);
- }
- exit(0);
-}
-
-code_ptr translate_m68k_src(m68kinst * inst, x86_ea * ea, code_ptr out, x86_68k_options * opts)
-{
- int8_t reg = native_reg(&(inst->src), opts);
- uint8_t sec_reg;
- int32_t dec_amount,inc_amount;
- if (reg >= 0) {
- ea->mode = MODE_REG_DIRECT;
- if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
- out = movsx_rr(out, reg, SCRATCH1, SZ_W, SZ_D);
- ea->base = SCRATCH1;
- } else {
- ea->base = reg;
- }
- return out;
- }
- switch (inst->src.addr_mode)
- {
- case MODE_REG:
- case MODE_AREG:
- //We only get one memory parameter, so if the dst operand is a register in memory,
- //we need to copy this to a temp register first
- reg = native_reg(&(inst->dst), opts);
- if (reg >= 0 || inst->dst.addr_mode == MODE_UNUSED || !(inst->dst.addr_mode == MODE_REG || inst->dst.addr_mode == MODE_AREG)
- || inst->op == M68K_EXG) {
-
- ea->mode = MODE_REG_DISPLACE8;
- ea->base = CONTEXT;
- ea->disp = reg_offset(&(inst->src));
- } else {
- if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
- out = movsx_rdisp8r(out, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_W, SZ_D);
- } else {
- out = mov_rdisp8r(out, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, inst->extra.size);
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- //we're explicitly handling the areg dest here, so we exit immediately
- return out;
- }
- break;
- case MODE_AREG_PREDEC:
- dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->src.params.regs.pri == 7 ? 2 :1));
- out = cycles(out, PREDEC_PENALTY);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- out = sub_ir(out, dec_amount, opts->aregs[inst->src.params.regs.pri], SZ_D);
- } else {
- out = sub_irdisp8(out, dec_amount, CONTEXT, reg_offset(&(inst->src)), SZ_D);
- }
- case MODE_AREG_INDIRECT:
- case MODE_AREG_POSTINC:
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- out = mov_rr(out, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- out = mov_rdisp8r(out, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
-
- if (inst->src.addr_mode == MODE_AREG_POSTINC) {
- inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->src.params.regs.pri == 7 ? 2 : 1));
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- out = add_ir(out, inc_amount, opts->aregs[inst->src.params.regs.pri], SZ_D);
- } else {
- out = add_irdisp8(out, inc_amount, CONTEXT, reg_offset(&(inst->src)), SZ_D);
- }
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = (inst->dst.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) ? SCRATCH2 : SCRATCH1;
- break;
- case MODE_AREG_DISPLACE:
- out = cycles(out, BUS);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- out = mov_rr(out, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- out = mov_rdisp8r(out, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- out = add_ir(out, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_AREG_INDEX_DISP8:
- out = cycles(out, 6);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- out = mov_rr(out, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- out = mov_rdisp8r(out, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- out = add_rr(out, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- out = add_rdisp8r(out, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- out = add_rr(out, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- out = add_rdisp8r(out, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- out = movsx_rr(out, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- out = movsx_rdisp8r(out, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- out = movsx_rr(out, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- out = movsx_rdisp8r(out, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- out = add_rr(out, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- out = add_ir(out, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_PC_DISPLACE:
- out = cycles(out, BUS);
- out = mov_ir(out, inst->src.params.regs.displacement + inst->address+2, SCRATCH1, SZ_D);
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_PC_INDEX_DISP8:
- out = cycles(out, 6);
- out = mov_ir(out, inst->address+2, SCRATCH1, SZ_D);
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- out = add_rr(out, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- out = add_rdisp8r(out, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- out = add_rr(out, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- out = add_rdisp8r(out, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- out = movsx_rr(out, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- out = movsx_rdisp8r(out, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- out = movsx_rr(out, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- out = movsx_rdisp8r(out, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- out = add_rr(out, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- out = add_ir(out, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_ABSOLUTE:
- case MODE_ABSOLUTE_SHORT:
- if (inst->src.addr_mode == MODE_ABSOLUTE) {
- out = cycles(out, BUS*2);
- } else {
- out = cycles(out, BUS);
- }
- out = mov_ir(out, inst->src.params.immed, SCRATCH1, SZ_D);
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_IMMEDIATE:
- case MODE_IMMEDIATE_WORD:
- if (inst->variant != VAR_QUICK) {
- out = cycles(out, (inst->extra.size == OPSIZE_LONG && inst->src.addr_mode == MODE_IMMEDIATE) ? BUS*2 : BUS);
- }
- ea->mode = MODE_IMMED;
- ea->disp = inst->src.params.immed;
- if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD && ea->disp & 0x8000) {
- ea->disp |= 0xFFFF0000;
- }
- return out;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%X: %s\naddress mode %d not implemented (src)\n", inst->address, disasm_buf, inst->src.addr_mode);
- exit(1);
- }
- if (inst->dst.addr_mode == MODE_AREG && inst->extra.size == OPSIZE_WORD) {
- if (ea->mode == MODE_REG_DIRECT) {
- out = movsx_rr(out, ea->base, SCRATCH1, SZ_W, SZ_D);
- } else {
- out = movsx_rdisp8r(out, ea->base, ea->disp, SCRATCH1, SZ_W, SZ_D);
- ea->mode = MODE_REG_DIRECT;
- }
- ea->base = SCRATCH1;
- }
- return out;
-}
-
-code_ptr translate_m68k_dst(m68kinst * inst, x86_ea * ea, code_ptr out, x86_68k_options * opts, uint8_t fake_read)
-{
- int8_t reg = native_reg(&(inst->dst), opts), sec_reg;
- int32_t dec_amount, inc_amount;
- if (reg >= 0) {
- ea->mode = MODE_REG_DIRECT;
- ea->base = reg;
- return out;
- }
- switch (inst->dst.addr_mode)
- {
- case MODE_REG:
- case MODE_AREG:
- ea->mode = MODE_REG_DISPLACE8;
- ea->base = CONTEXT;
- ea->disp = reg_offset(&(inst->dst));
- break;
- case MODE_AREG_PREDEC:
- if (inst->src.addr_mode == MODE_AREG_PREDEC) {
- out = push_r(out, SCRATCH1);
- }
- dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- out = sub_ir(out, dec_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
- } else {
- out = sub_irdisp8(out, dec_amount, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- case MODE_AREG_INDIRECT:
- case MODE_AREG_POSTINC:
- if (fake_read) {
- out = cycles(out, inst->extra.size == OPSIZE_LONG ? 8 : 4);
- } else {
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- out = mov_rr(out, opts->aregs[inst->dst.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- out = mov_rdisp8r(out, CONTEXT, reg_offset(&(inst->dst)), SCRATCH1, SZ_D);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- }
- if (inst->src.addr_mode == MODE_AREG_PREDEC) {
- //restore src operand to SCRATCH2
- out =pop_r(out, SCRATCH2);
- } else {
- //save reg value in SCRATCH2 so we can use it to save the result in memory later
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- out = mov_rr(out, opts->aregs[inst->dst.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- out = mov_rdisp8r(out, CONTEXT, reg_offset(&(inst->dst)), SCRATCH2, SZ_D);
- }
- }
-
- if (inst->dst.addr_mode == MODE_AREG_POSTINC) {
- inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- out = add_ir(out, inc_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
- } else {
- out = add_irdisp8(out, inc_amount, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_AREG_DISPLACE:
- out = cycles(out, fake_read ? BUS+(inst->extra.size == OPSIZE_LONG ? BUS*2 : BUS) : BUS);
- reg = fake_read ? SCRATCH2 : SCRATCH1;
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- out = mov_rr(out, opts->aregs[inst->dst.params.regs.pri], reg, SZ_D);
- } else {
- out = mov_rdisp8r(out, CONTEXT, reg_offset(&(inst->dst)), reg, SZ_D);
- }
- out = add_ir(out, inst->dst.params.regs.displacement, reg, SZ_D);
- if (!fake_read) {
- out = push_r(out, SCRATCH1);
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- out = pop_r(out, SCRATCH2);
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_AREG_INDEX_DISP8:
- out = cycles(out, fake_read ? (6 + inst->extra.size == OPSIZE_LONG ? 8 : 4) : 6);
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- out = mov_rr(out, opts->aregs[inst->dst.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- out = mov_rdisp8r(out, CONTEXT, reg_offset(&(inst->dst)), SCRATCH1, SZ_D);
- }
- sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
- if (inst->dst.params.regs.sec & 1) {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- out = add_rr(out, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- out = add_rdisp8r(out, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- out = add_rr(out, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- out = add_rdisp8r(out, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- out = movsx_rr(out, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- out = movsx_rdisp8r(out, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- out = movsx_rr(out, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- out = movsx_rdisp8r(out, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- out = add_rr(out, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->dst.params.regs.displacement) {
- out = add_ir(out, inst->dst.params.regs.displacement, SCRATCH1, SZ_D);
- }
- if (fake_read) {
- out = mov_rr(out, SCRATCH1, SCRATCH2, SZ_D);
- } else {
- out = push_r(out, SCRATCH1);
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- out = pop_r(out, SCRATCH2);
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_PC_DISPLACE:
- out = cycles(out, fake_read ? BUS+(inst->extra.size == OPSIZE_LONG ? BUS*2 : BUS) : BUS);
- out = mov_ir(out, inst->dst.params.regs.displacement + inst->address+2, fake_read ? SCRATCH2 : SCRATCH1, SZ_D);
- if (!fake_read) {
- out = push_r(out, SCRATCH1);
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- out = pop_r(out, SCRATCH2);
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_PC_INDEX_DISP8:
- out = cycles(out, fake_read ? (6 + inst->extra.size == OPSIZE_LONG ? 8 : 4) : 6);
- out = mov_ir(out, inst->address+2, SCRATCH1, SZ_D);
- sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
- if (inst->dst.params.regs.sec & 1) {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- out = add_rr(out, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- out = add_rdisp8r(out, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- out = add_rr(out, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- out = add_rdisp8r(out, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- out = movsx_rr(out, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- out = movsx_rdisp8r(out, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- out = movsx_rr(out, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- out = movsx_rdisp8r(out, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- out = add_rr(out, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->dst.params.regs.displacement) {
- out = add_ir(out, inst->dst.params.regs.displacement, SCRATCH1, SZ_D);
- }
- if (fake_read) {
- out = mov_rr(out, SCRATCH1, SCRATCH2, SZ_D);
- } else {
- out = push_r(out, SCRATCH1);
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- out = pop_r(out, SCRATCH2);
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- case MODE_ABSOLUTE:
- case MODE_ABSOLUTE_SHORT:
- //Add cycles for reading address from instruction stream
- out = cycles(out, (inst->dst.addr_mode == MODE_ABSOLUTE ? BUS*2 : BUS) + (fake_read ? (inst->extra.size == OPSIZE_LONG ? BUS*2 : BUS) : 0));
- out = mov_ir(out, inst->dst.params.immed, fake_read ? SCRATCH2 : SCRATCH1, SZ_D);
- if (!fake_read) {
- out = push_r(out, SCRATCH1);
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->read_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->read_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->read_32);
- break;
- }
- out = pop_r(out, SCRATCH2);
- }
- ea->mode = MODE_REG_DIRECT;
- ea->base = SCRATCH1;
- break;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%X: %s\naddress mode %d not implemented (dst)\n", inst->address, disasm_buf, inst->dst.addr_mode);
- exit(1);
- }
- return out;
-}
-
-code_ptr m68k_save_result(m68kinst * inst, code_ptr out, x86_68k_options * opts)
-{
- if (inst->dst.addr_mode != MODE_REG && inst->dst.addr_mode != MODE_AREG) {
- if (inst->dst.addr_mode == MODE_AREG_PREDEC && inst->src.addr_mode == MODE_AREG_PREDEC && inst->op != M68K_MOVE) {
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- out = mov_rr(out, opts->aregs[inst->dst.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- out = mov_rdisp8r(out, CONTEXT, reg_offset(&(inst->dst)), SCRATCH2, SZ_D);
- }
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- out = call(out, opts->write_8);
- break;
- case OPSIZE_WORD:
- out = call(out, opts->write_16);
- break;
- case OPSIZE_LONG:
- out = call(out, opts->write_32_lowfirst);
- break;
- }
- }
- return out;
-}
-
-code_ptr get_native_address(native_map_slot * native_code_map, uint32_t address)
-{
- address &= 0xFFFFFF;
- address /= 2;
- uint32_t chunk = address / NATIVE_CHUNK_SIZE;
- if (!native_code_map[chunk].base) {
- return NULL;
- }
- uint32_t offset = address % NATIVE_CHUNK_SIZE;
- if (native_code_map[chunk].offsets[offset] == INVALID_OFFSET || native_code_map[chunk].offsets[offset] == EXTENSION_WORD) {
- return NULL;
- }
- return native_code_map[chunk].base + native_code_map[chunk].offsets[offset];
-}
-
-code_ptr get_native_from_context(m68k_context * context, uint32_t address)
-{
- return get_native_address(context->native_code_map, address);
-}
-
-uint32_t get_instruction_start(native_map_slot * native_code_map, uint32_t address)
-{
- address &= 0xFFFFFF;
- address /= 2;
- uint32_t chunk = address / NATIVE_CHUNK_SIZE;
- if (!native_code_map[chunk].base) {
- return 0;
- }
- uint32_t offset = address % NATIVE_CHUNK_SIZE;
- if (native_code_map[chunk].offsets[offset] == INVALID_OFFSET) {
- return 0;
- }
- while (native_code_map[chunk].offsets[offset] == EXTENSION_WORD) {
- --address;
- chunk = address / NATIVE_CHUNK_SIZE;
- offset = address % NATIVE_CHUNK_SIZE;
- }
- return address*2;
-}
-
-void map_native_address(m68k_context * context, uint32_t address, code_ptr native_addr, uint8_t size, uint8_t native_size)
-{
- native_map_slot * native_code_map = context->native_code_map;
- x86_68k_options * opts = context->options;
- address &= 0xFFFFFF;
- if (address > 0xE00000) {
- context->ram_code_flags[(address & 0xC000) >> 14] |= 1 << ((address & 0x3800) >> 11);
- if (((address & 0x3FFF) + size) & 0xC000) {
- context->ram_code_flags[((address+size) & 0xC000) >> 14] |= 1 << (((address+size) & 0x3800) >> 11);
- }
- uint32_t slot = (address & 0xFFFF)/1024;
- if (!opts->gen.ram_inst_sizes[slot]) {
- opts->gen.ram_inst_sizes[slot] = malloc(sizeof(uint8_t) * 512);
- }
- opts->gen.ram_inst_sizes[slot][((address & 0xFFFF)/2)%512] = native_size;
- }
- address/= 2;
- uint32_t chunk = address / NATIVE_CHUNK_SIZE;
- if (!native_code_map[chunk].base) {
- native_code_map[chunk].base = native_addr;
- native_code_map[chunk].offsets = malloc(sizeof(int32_t) * NATIVE_CHUNK_SIZE);
- memset(native_code_map[chunk].offsets, 0xFF, sizeof(int32_t) * NATIVE_CHUNK_SIZE);
- }
- uint32_t offset = address % NATIVE_CHUNK_SIZE;
- native_code_map[chunk].offsets[offset] = native_addr-native_code_map[chunk].base;
- for(address++,size-=2; size; address++,size-=2) {
- chunk = address / NATIVE_CHUNK_SIZE;
- offset = address % NATIVE_CHUNK_SIZE;
- if (!native_code_map[chunk].base) {
- native_code_map[chunk].base = native_addr;
- native_code_map[chunk].offsets = malloc(sizeof(int32_t) * NATIVE_CHUNK_SIZE);
- memset(native_code_map[chunk].offsets, 0xFF, sizeof(int32_t) * NATIVE_CHUNK_SIZE);
- }
- native_code_map[chunk].offsets[offset] = EXTENSION_WORD;
- }
-}
-
-uint8_t get_native_inst_size(x86_68k_options * opts, uint32_t address)
-{
- if (address < 0xE00000) {
- return 0;
- }
- uint32_t slot = (address & 0xFFFF)/1024;
- return opts->gen.ram_inst_sizes[slot][((address & 0xFFFF)/2)%512];
-}
-
-code_ptr translate_m68k_move(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- int8_t reg, flags_reg, sec_reg;
- uint8_t dir = 0;
- int32_t offset;
- int32_t inc_amount, dec_amount;
- x86_ea src;
- dst = translate_m68k_src(inst, &src, dst, opts);
- reg = native_reg(&(inst->dst), opts);
- if (inst->dst.addr_mode != MODE_AREG) {
- //update statically set flags
- dst = set_flag(dst, 0, FLAG_V, opts);
- dst = set_flag(dst, 0, FLAG_C, opts);
- }
-
- if (inst->dst.addr_mode != MODE_AREG) {
- if (src.mode == MODE_REG_DIRECT) {
- flags_reg = src.base;
- } else {
- if (reg >= 0) {
- flags_reg = reg;
- } else {
- if(src.mode == MODE_REG_DISPLACE8) {
- dst = mov_rdisp8r(dst, src.base, src.disp, SCRATCH1, inst->extra.size);
- } else {
- dst = mov_ir(dst, src.disp, SCRATCH1, inst->extra.size);
- }
- src.mode = MODE_REG_DIRECT;
- flags_reg = src.base = SCRATCH1;
- }
- }
- }
- uint8_t size = inst->extra.size;
- switch(inst->dst.addr_mode)
- {
- case MODE_AREG:
- size = OPSIZE_LONG;
- case MODE_REG:
- if (reg >= 0) {
- if (src.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, src.base, reg, size);
- } else if (src.mode == MODE_REG_DISPLACE8) {
- dst = mov_rdisp8r(dst, src.base, src.disp, reg, size);
- } else {
- dst = mov_ir(dst, src.disp, reg, size);
- }
- } else if(src.mode == MODE_REG_DIRECT) {
- dst = mov_rrdisp8(dst, src.base, CONTEXT, reg_offset(&(inst->dst)), size);
- } else {
- dst = mov_irdisp8(dst, src.disp, CONTEXT, reg_offset(&(inst->dst)), size);
- }
- if (inst->dst.addr_mode != MODE_AREG) {
- dst = cmp_ir(dst, 0, flags_reg, size);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- }
- break;
- case MODE_AREG_PREDEC:
- dec_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = sub_ir(dst, dec_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
- } else {
- dst = sub_irdisp8(dst, dec_amount, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- case MODE_AREG_INDIRECT:
- case MODE_AREG_POSTINC:
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->dst.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->dst)), SCRATCH2, SZ_D);
- }
- if (src.mode == MODE_REG_DIRECT) {
- if (src.base != SCRATCH1) {
- dst = mov_rr(dst, src.base, SCRATCH1, inst->extra.size);
- }
- } else if (src.mode == MODE_REG_DISPLACE8) {
- dst = mov_rdisp8r(dst, src.base, src.disp, SCRATCH1, inst->extra.size);
- } else {
- dst = mov_ir(dst, src.disp, SCRATCH1, inst->extra.size);
- }
- if (inst->dst.addr_mode != MODE_AREG) {
- dst = cmp_ir(dst, 0, flags_reg, inst->extra.size);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- dst = call(dst, opts->write_8);
- break;
- case OPSIZE_WORD:
- dst = call(dst, opts->write_16);
- break;
- case OPSIZE_LONG:
- dst = call(dst, opts->write_32_highfirst);
- break;
- }
- if (inst->dst.addr_mode == MODE_AREG_POSTINC) {
- inc_amount = inst->extra.size == OPSIZE_WORD ? 2 : (inst->extra.size == OPSIZE_LONG ? 4 : (inst->dst.params.regs.pri == 7 ? 2 : 1));
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = add_ir(dst, inc_amount, opts->aregs[inst->dst.params.regs.pri], SZ_D);
- } else {
- dst = add_irdisp8(dst, inc_amount, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- }
- break;
- case MODE_AREG_DISPLACE:
- dst = cycles(dst, BUS);
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->dst.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->dst)), SCRATCH2, SZ_D);
- }
- dst = add_ir(dst, inst->dst.params.regs.displacement, SCRATCH2, SZ_D);
- if (src.mode == MODE_REG_DIRECT) {
- if (src.base != SCRATCH1) {
- dst = mov_rr(dst, src.base, SCRATCH1, inst->extra.size);
- }
- } else if (src.mode == MODE_REG_DISPLACE8) {
- dst = mov_rdisp8r(dst, src.base, src.disp, SCRATCH1, inst->extra.size);
- } else {
- dst = mov_ir(dst, src.disp, SCRATCH1, inst->extra.size);
- }
- if (inst->dst.addr_mode != MODE_AREG) {
- dst = cmp_ir(dst, 0, flags_reg, inst->extra.size);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- dst = call(dst, opts->write_8);
- break;
- case OPSIZE_WORD:
- dst = call(dst, opts->write_16);
- break;
- case OPSIZE_LONG:
- dst = call(dst, opts->write_32_highfirst);
- break;
- }
- break;
- case MODE_AREG_INDEX_DISP8:
- dst = cycles(dst, 6);//TODO: Check to make sure this is correct
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->dst.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->dst)), SCRATCH2, SZ_D);
- }
- sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
- if (inst->dst.params.regs.sec & 1) {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- }
- } else {
- if (src.base == SCRATCH1) {
- dst = push_r(dst, SCRATCH1);
- }
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH1, SCRATCH2, SZ_D);
- if (src.base == SCRATCH1) {
- dst = pop_r(dst, SCRATCH1);
- }
- }
- if (inst->dst.params.regs.displacement) {
- dst = add_ir(dst, inst->dst.params.regs.displacement, SCRATCH2, SZ_D);
- }
- if (src.mode == MODE_REG_DIRECT) {
- if (src.base != SCRATCH1) {
- dst = mov_rr(dst, src.base, SCRATCH1, inst->extra.size);
- }
- } else if (src.mode == MODE_REG_DISPLACE8) {
- dst = mov_rdisp8r(dst, src.base, src.disp, SCRATCH1, inst->extra.size);
- } else {
- dst = mov_ir(dst, src.disp, SCRATCH1, inst->extra.size);
- }
- if (inst->dst.addr_mode != MODE_AREG) {
- dst = cmp_ir(dst, 0, flags_reg, inst->extra.size);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- dst = call(dst, opts->write_8);
- break;
- case OPSIZE_WORD:
- dst = call(dst, opts->write_16);
- break;
- case OPSIZE_LONG:
- dst = call(dst, opts->write_32_highfirst);
- break;
- }
- break;
- case MODE_PC_DISPLACE:
- dst = cycles(dst, BUS);
- dst = mov_ir(dst, inst->dst.params.regs.displacement + inst->address+2, SCRATCH2, SZ_D);
- if (src.mode == MODE_REG_DIRECT) {
- if (src.base != SCRATCH1) {
- dst = mov_rr(dst, src.base, SCRATCH1, inst->extra.size);
- }
- } else if (src.mode == MODE_REG_DISPLACE8) {
- dst = mov_rdisp8r(dst, src.base, src.disp, SCRATCH1, inst->extra.size);
- } else {
- dst = mov_ir(dst, src.disp, SCRATCH1, inst->extra.size);
- }
- if (inst->dst.addr_mode != MODE_AREG) {
- dst = cmp_ir(dst, 0, flags_reg, inst->extra.size);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- dst = call(dst, opts->write_8);
- break;
- case OPSIZE_WORD:
- dst = call(dst, opts->write_16);
- break;
- case OPSIZE_LONG:
- dst = call(dst, opts->write_32_highfirst);
- break;
- }
- break;
- case MODE_PC_INDEX_DISP8:
- dst = cycles(dst, 6);//TODO: Check to make sure this is correct
- dst = mov_ir(dst, inst->address, SCRATCH2, SZ_D);
- sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
- if (inst->dst.params.regs.sec & 1) {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- }
- } else {
- if (src.base == SCRATCH1) {
- dst = push_r(dst, SCRATCH1);
- }
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH1, SCRATCH2, SZ_D);
- if (src.base == SCRATCH1) {
- dst = pop_r(dst, SCRATCH1);
- }
- }
- if (inst->dst.params.regs.displacement) {
- dst = add_ir(dst, inst->dst.params.regs.displacement, SCRATCH2, SZ_D);
- }
- if (src.mode == MODE_REG_DIRECT) {
- if (src.base != SCRATCH1) {
- dst = mov_rr(dst, src.base, SCRATCH1, inst->extra.size);
- }
- } else if (src.mode == MODE_REG_DISPLACE8) {
- dst = mov_rdisp8r(dst, src.base, src.disp, SCRATCH1, inst->extra.size);
- } else {
- dst = mov_ir(dst, src.disp, SCRATCH1, inst->extra.size);
- }
- if (inst->dst.addr_mode != MODE_AREG) {
- dst = cmp_ir(dst, 0, flags_reg, inst->extra.size);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- dst = call(dst, opts->write_8);
- break;
- case OPSIZE_WORD:
- dst = call(dst, opts->write_16);
- break;
- case OPSIZE_LONG:
- dst = call(dst, opts->write_32_highfirst);
- break;
- }
- break;
- case MODE_ABSOLUTE:
- case MODE_ABSOLUTE_SHORT:
- if (src.mode == MODE_REG_DIRECT) {
- if (src.base != SCRATCH1) {
- dst = mov_rr(dst, src.base, SCRATCH1, inst->extra.size);
- }
- } else if (src.mode == MODE_REG_DISPLACE8) {
- dst = mov_rdisp8r(dst, src.base, src.disp, SCRATCH1, inst->extra.size);
- } else {
- dst = mov_ir(dst, src.disp, SCRATCH1, inst->extra.size);
- }
- if (inst->dst.addr_mode == MODE_ABSOLUTE) {
- dst = cycles(dst, BUS*2);
- } else {
- dst = cycles(dst, BUS);
- }
- dst = mov_ir(dst, inst->dst.params.immed, SCRATCH2, SZ_D);
- if (inst->dst.addr_mode != MODE_AREG) {
- dst = cmp_ir(dst, 0, flags_reg, inst->extra.size);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- }
- switch (inst->extra.size)
- {
- case OPSIZE_BYTE:
- dst = call(dst, opts->write_8);
- break;
- case OPSIZE_WORD:
- dst = call(dst, opts->write_16);
- break;
- case OPSIZE_LONG:
- dst = call(dst, opts->write_32_highfirst);
- break;
- }
- break;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%X: %s\naddress mode %d not implemented (move dst)\n", inst->address, disasm_buf, inst->dst.addr_mode);
- exit(1);
- }
-
- //add cycles for prefetch
- dst = cycles(dst, BUS);
- return dst;
-}
-
-code_ptr translate_m68k_movem(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- int8_t bit,reg,sec_reg;
- uint8_t early_cycles;
- if(inst->src.addr_mode == MODE_REG) {
- //reg to mem
- early_cycles = 8;
- int8_t dir;
- switch (inst->dst.addr_mode)
- {
- case MODE_AREG_INDIRECT:
- case MODE_AREG_PREDEC:
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->dst.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->dst)), SCRATCH2, SZ_D);
- }
- break;
- case MODE_AREG_DISPLACE:
- early_cycles += BUS;
- reg = SCRATCH2;
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->dst.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->dst)), SCRATCH2, SZ_D);
- }
- dst = add_ir(dst, inst->dst.params.regs.displacement, SCRATCH2, SZ_D);
- break;
- case MODE_AREG_INDEX_DISP8:
- early_cycles += 6;
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->dst.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->dst)), SCRATCH2, SZ_D);
- }
- sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
- if (inst->dst.params.regs.sec & 1) {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- }
- } else {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH1, SCRATCH2, SZ_D);
- }
- if (inst->dst.params.regs.displacement) {
- dst = add_ir(dst, inst->dst.params.regs.displacement, SCRATCH2, SZ_D);
- }
- break;
- case MODE_PC_DISPLACE:
- early_cycles += BUS;
- dst = mov_ir(dst, inst->dst.params.regs.displacement + inst->address+2, SCRATCH2, SZ_D);
- break;
- case MODE_PC_INDEX_DISP8:
- early_cycles += 6;
- dst = mov_ir(dst, inst->address+2, SCRATCH2, SZ_D);
- sec_reg = (inst->dst.params.regs.sec >> 1) & 0x7;
- if (inst->dst.params.regs.sec & 1) {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- }
- } else {
- if (inst->dst.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH1, SCRATCH2, SZ_D);
- }
- if (inst->dst.params.regs.displacement) {
- dst = add_ir(dst, inst->dst.params.regs.displacement, SCRATCH2, SZ_D);
- }
- break;
- case MODE_ABSOLUTE:
- early_cycles += 4;
- case MODE_ABSOLUTE_SHORT:
- early_cycles += 4;
- dst = mov_ir(dst, inst->dst.params.immed, SCRATCH2, SZ_D);
- break;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%X: %s\naddress mode %d not implemented (movem dst)\n", inst->address, disasm_buf, inst->dst.addr_mode);
- exit(1);
- }
- if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
- reg = 15;
- dir = -1;
- } else {
- reg = 0;
- dir = 1;
- }
- dst = cycles(dst, early_cycles);
- for(bit=0; reg < 16 && reg >= 0; reg += dir, bit++) {
- if (inst->src.params.immed & (1 << bit)) {
- if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
- dst = sub_ir(dst, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, SCRATCH2, SZ_D);
- }
- dst = push_r(dst, SCRATCH2);
- if (reg > 7) {
- if (opts->aregs[reg-8] >= 0) {
- dst = mov_rr(dst, opts->aregs[reg-8], SCRATCH1, inst->extra.size);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * (reg-8), SCRATCH1, inst->extra.size);
- }
- } else {
- if (opts->dregs[reg] >= 0) {
- dst = mov_rr(dst, opts->dregs[reg], SCRATCH1, inst->extra.size);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t) * (reg), SCRATCH1, inst->extra.size);
- }
- }
- if (inst->extra.size == OPSIZE_LONG) {
- dst = call(dst, opts->write_32_lowfirst);
- } else {
- dst = call(dst, opts->write_16);
- }
- dst = pop_r(dst, SCRATCH2);
- if (inst->dst.addr_mode != MODE_AREG_PREDEC) {
- dst = add_ir(dst, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, SCRATCH2, SZ_D);
- }
- }
- }
- if (inst->dst.addr_mode == MODE_AREG_PREDEC) {
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = mov_rr(dst, SCRATCH2, opts->aregs[inst->dst.params.regs.pri], SZ_D);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- }
- } else {
- //mem to reg
- early_cycles = 4;
- switch (inst->src.addr_mode)
- {
- case MODE_AREG_INDIRECT:
- case MODE_AREG_POSTINC:
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- break;
- case MODE_AREG_DISPLACE:
- early_cycles += BUS;
- reg = SCRATCH2;
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- break;
- case MODE_AREG_INDEX_DISP8:
- early_cycles += 6;
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- break;
- case MODE_PC_DISPLACE:
- early_cycles += BUS;
- dst = mov_ir(dst, inst->src.params.regs.displacement + inst->address+2, SCRATCH1, SZ_D);
- break;
- case MODE_PC_INDEX_DISP8:
- early_cycles += 6;
- dst = mov_ir(dst, inst->address+2, SCRATCH1, SZ_D);
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- break;
- case MODE_ABSOLUTE:
- early_cycles += 4;
- case MODE_ABSOLUTE_SHORT:
- early_cycles += 4;
- dst = mov_ir(dst, inst->src.params.immed, SCRATCH1, SZ_D);
- break;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%X: %s\naddress mode %d not implemented (movem src)\n", inst->address, disasm_buf, inst->src.addr_mode);
- exit(1);
- }
- dst = cycles(dst, early_cycles);
- for(reg = 0; reg < 16; reg ++) {
- if (inst->dst.params.immed & (1 << reg)) {
- dst = push_r(dst, SCRATCH1);
- if (inst->extra.size == OPSIZE_LONG) {
- dst = call(dst, opts->read_32);
- } else {
- dst = call(dst, opts->read_16);
- }
- if (inst->extra.size == OPSIZE_WORD) {
- dst = movsx_rr(dst, SCRATCH1, SCRATCH1, SZ_W, SZ_D);
- }
- if (reg > 7) {
- if (opts->aregs[reg-8] >= 0) {
- dst = mov_rr(dst, SCRATCH1, opts->aregs[reg-8], SZ_D);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * (reg-8), SZ_D);
- }
- } else {
- if (opts->dregs[reg] >= 0) {
- dst = mov_rr(dst, SCRATCH1, opts->dregs[reg], SZ_D);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t) * (reg), SZ_D);
- }
- }
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, (inst->extra.size == OPSIZE_LONG) ? 4 : 2, SCRATCH1, SZ_D);
- }
- }
- if (inst->src.addr_mode == MODE_AREG_POSTINC) {
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, SCRATCH1, opts->aregs[inst->src.params.regs.pri], SZ_D);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, reg_offset(&(inst->src)), SZ_D);
- }
- }
- }
- //prefetch
- dst = cycles(dst, 4);
- return dst;
-}
-
-code_ptr translate_m68k_clr(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- dst = set_flag(dst, 0, FLAG_N, opts);
- dst = set_flag(dst, 0, FLAG_V, opts);
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = set_flag(dst, 1, FLAG_Z, opts);
- int8_t reg = native_reg(&(inst->dst), opts);
- if (reg >= 0) {
- dst = cycles(dst, (inst->extra.size == OPSIZE_LONG ? 6 : 4));
- return xor_rr(dst, reg, reg, inst->extra.size);
- }
- x86_ea dst_op;
- dst = translate_m68k_dst(inst, &dst_op, dst, opts, 1);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = xor_rr(dst, dst_op.base, dst_op.base, inst->extra.size);
- } else {
- dst = mov_irdisp8(dst, 0, dst_op.base, dst_op.disp, inst->extra.size);
- }
- dst = m68k_save_result(inst, dst, opts);
- return dst;
-}
-
-code_ptr translate_m68k_ext(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- x86_ea dst_op;
- uint8_t dst_size = inst->extra.size;
- inst->extra.size--;
- dst = translate_m68k_dst(inst, &dst_op, dst, opts, 0);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = movsx_rr(dst, dst_op.base, dst_op.base, inst->extra.size, dst_size);
- dst = cmp_ir(dst, 0, dst_op.base, dst_size);
- } else {
- dst = movsx_rdisp8r(dst, dst_op.base, dst_op.disp, SCRATCH1, inst->extra.size, dst_size);
- dst = cmp_ir(dst, 0, SCRATCH1, dst_size);
- dst = mov_rrdisp8(dst, SCRATCH1, dst_op.base, dst_op.disp, dst_size);
- }
- inst->extra.size = dst_size;
- dst = set_flag(dst, 0, FLAG_V, opts);
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- //M68K EXT only operates on registers so no need for a call to save result here
- return dst;
-}
-
-code_ptr translate_m68k_lea(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- int8_t dst_reg = native_reg(&(inst->dst), opts), sec_reg;
- switch(inst->src.addr_mode)
- {
- case MODE_AREG_INDIRECT:
- dst = cycles(dst, BUS);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- if (dst_reg >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], dst_reg, SZ_D);
- } else {
- dst = mov_rrdisp8(dst, opts->aregs[inst->src.params.regs.pri], CONTEXT, offsetof(m68k_context, aregs) + 4 * inst->dst.params.regs.pri, SZ_D);
- }
- } else {
- if (dst_reg >= 0) {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, dst_reg, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, SCRATCH1, SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, aregs) + 4 * inst->dst.params.regs.pri, SZ_D);
- }
- }
- break;
- case MODE_AREG_DISPLACE:
- dst = cycles(dst, 8);
- if (dst_reg >= 0) {
- if (inst->src.params.regs.pri != inst->dst.params.regs.pri) {
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], dst_reg, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), dst_reg, SZ_D);
- }
- }
- dst = add_ir(dst, inst->src.params.regs.displacement, dst_reg, SZ_D);
- } else {
- if (inst->src.params.regs.pri != inst->dst.params.regs.pri) {
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rrdisp8(dst, opts->aregs[inst->src.params.regs.pri], CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- }
- dst = add_irdisp8(dst, inst->src.params.regs.displacement, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- break;
- case MODE_AREG_INDEX_DISP8:
- dst = cycles(dst, 12);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH2, SZ_D);
- }
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH1, SCRATCH2, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH2, SZ_D);
- }
- if (dst_reg >= 0) {
- dst = mov_rr(dst, SCRATCH2, dst_reg, SZ_D);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- break;
- case MODE_PC_DISPLACE:
- dst = cycles(dst, 8);
- if (dst_reg >= 0) {
- dst = mov_ir(dst, inst->src.params.regs.displacement + inst->address+2, dst_reg, SZ_D);
- } else {
- dst = mov_irdisp8(dst, inst->src.params.regs.displacement + inst->address+2, CONTEXT, offsetof(m68k_context, aregs) + 4 * inst->dst.params.regs.pri, SZ_D);
- }
- break;
- case MODE_PC_INDEX_DISP8:
- dst = cycles(dst, BUS*3);
- dst = mov_ir(dst, inst->address+2, SCRATCH1, SZ_D);
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- if (dst_reg >= 0) {
- dst = mov_rr(dst, SCRATCH1, dst_reg, SZ_D);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- break;
- case MODE_ABSOLUTE:
- case MODE_ABSOLUTE_SHORT:
- dst = cycles(dst, (inst->src.addr_mode == MODE_ABSOLUTE) ? BUS * 3 : BUS * 2);
- if (dst_reg >= 0) {
- dst = mov_ir(dst, inst->src.params.immed, dst_reg, SZ_D);
- } else {
- dst = mov_irdisp8(dst, inst->src.params.immed, CONTEXT, reg_offset(&(inst->dst)), SZ_D);
- }
- break;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%X: %s\naddress mode %d not implemented (lea src)\n", inst->address, disasm_buf, inst->src.addr_mode);
- exit(1);
- }
- return dst;
-}
-
-code_ptr translate_m68k_pea(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- uint8_t sec_reg;
- switch(inst->src.addr_mode)
- {
- case MODE_AREG_INDIRECT:
- dst = cycles(dst, BUS);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, SCRATCH1, SZ_D);
- }
- break;
- case MODE_AREG_DISPLACE:
- dst = cycles(dst, 8);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- break;
- case MODE_AREG_INDEX_DISP8:
- dst = cycles(dst, 6);//TODO: Check to make sure this is correct
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- break;
- case MODE_PC_DISPLACE:
- dst = cycles(dst, 8);
- dst = mov_ir(dst, inst->src.params.regs.displacement + inst->address+2, SCRATCH1, SZ_D);
- break;
- case MODE_ABSOLUTE:
- case MODE_ABSOLUTE_SHORT:
- dst = cycles(dst, (inst->src.addr_mode == MODE_ABSOLUTE) ? BUS * 3 : BUS * 2);
- dst = mov_ir(dst, inst->src.params.immed, SCRATCH1, SZ_D);
- break;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%X: %s\naddress mode %d not implemented (lea src)\n", inst->address, disasm_buf, inst->src.addr_mode);
- exit(1);
- }
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_lowfirst);
- return dst;
-}
-
-code_ptr translate_m68k_bsr(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- int32_t disp = inst->src.params.immed;
- uint32_t after = inst->address + (inst->variant == VAR_BYTE ? 2 : 4);
- //TODO: Add cycles in the right place relative to pushing the return address on the stack
- dst = cycles(dst, 10);
- dst = mov_ir(dst, after, SCRATCH1, SZ_D);
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_highfirst);
- code_ptr dest_addr = get_native_address(opts->gen.native_code_map, (inst->address+2) + disp);
- if (!dest_addr) {
- opts->gen.deferred = defer_address(opts->gen.deferred, (inst->address+2) + disp, dst + 1);
- //dummy address to be replaced later
- dest_addr = dst + 256;
- }
- dst = jmp(dst, (char *)dest_addr);
- return dst;
-}
-
-code_ptr translate_m68k_bcc(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- dst = cycles(dst, 10);//TODO: Adjust this for branch not taken case
- int32_t disp = inst->src.params.immed;
- uint32_t after = inst->address + 2;
- code_ptr dest_addr = get_native_address(opts->gen.native_code_map, after + disp);
- if (inst->extra.cond == COND_TRUE) {
- if (!dest_addr) {
- opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, dst + 1);
- //dummy address to be replaced later, make sure it generates a 4-byte displacement
- dest_addr = dst + 256;
- }
- dst = jmp(dst, dest_addr);
- } else {
- uint8_t cond = CC_NZ;
- switch (inst->extra.cond)
- {
- case COND_HIGH:
- cond = CC_Z;
- case COND_LOW_SAME:
- dst = flag_to_reg(dst, FLAG_Z, SCRATCH1, opts);
- dst = or_flag_to_reg(dst, FLAG_C, SCRATCH1, opts);
- break;
- case COND_CARRY_CLR:
- cond = CC_Z;
- case COND_CARRY_SET:
- dst = check_flag(dst, FLAG_C, opts);
- break;
- case COND_NOT_EQ:
- cond = CC_Z;
- case COND_EQ:
- dst = check_flag(dst, FLAG_Z, opts);
- break;
- case COND_OVERF_CLR:
- cond = CC_Z;
- case COND_OVERF_SET:
- dst = check_flag(dst, FLAG_V, opts);
- break;
- case COND_PLUS:
- cond = CC_Z;
- case COND_MINUS:
- dst = check_flag(dst, FLAG_N, opts);
- break;
- case COND_GREATER_EQ:
- cond = CC_Z;
- case COND_LESS:
- dst = cmp_flags(dst, FLAG_N, FLAG_V, opts);
- break;
- case COND_GREATER:
- cond = CC_Z;
- case COND_LESS_EQ:
- dst = flag_to_reg(dst, FLAG_V, SCRATCH1, opts);
- dst = xor_flag_to_reg(dst, FLAG_N, SCRATCH1, opts);
- dst = or_flag_to_reg(dst, FLAG_Z, SCRATCH1, opts);
- break;
- }
- if (!dest_addr) {
- opts->gen.deferred = defer_address(opts->gen.deferred, after + disp, dst + 2);
- //dummy address to be replaced later, make sure it generates a 4-byte displacement
- dest_addr = dst + 256;
- }
- dst = jcc(dst, cond, dest_addr);
- }
- return dst;
-}
-
-code_ptr translate_m68k_scc(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- uint8_t cond = inst->extra.cond;
- x86_ea dst_op;
- inst->extra.size = OPSIZE_BYTE;
- dst = translate_m68k_dst(inst, &dst_op, dst, opts, 1);
- if (cond == COND_TRUE || cond == COND_FALSE) {
- if ((inst->dst.addr_mode == MODE_REG || inst->dst.addr_mode == MODE_AREG) && inst->extra.cond == COND_TRUE) {
- dst = cycles(dst, 6);
- } else {
- dst = cycles(dst, BUS);
- }
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_ir(dst, cond == COND_TRUE ? 0xFF : 0, dst_op.base, SZ_B);
- } else {
- dst = mov_irdisp8(dst, cond == COND_TRUE ? 0xFF : 0, dst_op.base, dst_op.disp, SZ_B);
- }
- } else {
- uint8_t cc = CC_NZ;
- switch (cond)
- {
- case COND_HIGH:
- cc = CC_Z;
- case COND_LOW_SAME:
- dst = flag_to_reg(dst, FLAG_Z, SCRATCH1, opts);
- dst = or_flag_to_reg(dst, FLAG_C, SCRATCH1, opts);
- break;
- case COND_CARRY_CLR:
- cc = CC_Z;
- case COND_CARRY_SET:
- dst = check_flag(dst, FLAG_C, opts);
- break;
- case COND_NOT_EQ:
- cc = CC_Z;
- case COND_EQ:
- dst = check_flag(dst, FLAG_Z, opts);
- break;
- case COND_OVERF_CLR:
- cc = CC_Z;
- case COND_OVERF_SET:
- dst = check_flag(dst, FLAG_V, opts);
- break;
- case COND_PLUS:
- cc = CC_Z;
- case COND_MINUS:
- dst = check_flag(dst, FLAG_N, opts);
- break;
- case COND_GREATER_EQ:
- cc = CC_Z;
- case COND_LESS:
- dst = cmp_flags(dst, FLAG_N, FLAG_V, opts);
- break;
- case COND_GREATER:
- cc = CC_Z;
- case COND_LESS_EQ:
- dst = flag_to_reg(dst, FLAG_V, SCRATCH1, opts);
- dst = xor_flag_to_reg(dst, FLAG_N, SCRATCH1, opts);
- dst = or_flag_to_reg(dst, FLAG_Z, SCRATCH1, opts);
- break;
- }
- code_ptr true_off = dst + 1;
- dst = jcc(dst, cc, dst+2);
- dst = cycles(dst, BUS);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_ir(dst, 0, dst_op.base, SZ_B);
- } else {
- dst = mov_irdisp8(dst, 0, dst_op.base, dst_op.disp, SZ_B);
- }
- code_ptr end_off = dst+1;
- dst = jmp(dst, dst+2);
- *true_off = dst - (true_off+1);
- dst = cycles(dst, 6);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_ir(dst, 0xFF, dst_op.base, SZ_B);
- } else {
- dst = mov_irdisp8(dst, 0xFF, dst_op.base, dst_op.disp, SZ_B);
- }
- *end_off = dst - (end_off+1);
- }
- dst = m68k_save_result(inst, dst, opts);
- return dst;
-}
-
-code_ptr translate_m68k_jmp(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- code_ptr dest_addr;
- uint8_t sec_reg;
- uint32_t m68k_addr;
- switch(inst->src.addr_mode)
- {
- case MODE_AREG_INDIRECT:
- dst = cycles(dst, BUS*2);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, SCRATCH1, SZ_D);
- }
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- break;
- case MODE_AREG_INDEX_DISP8:
- dst = cycles(dst, BUS*3);//TODO: CHeck that this is correct
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- //32-bit index register
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- //16-bit index register
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- break;
- case MODE_PC_DISPLACE:
- dst = cycles(dst, 10);
- m68k_addr = inst->src.params.regs.displacement + inst->address + 2;
- if ((m68k_addr & 0xFFFFFF) < 0x400000) {
- dest_addr = get_native_address(opts->gen.native_code_map, m68k_addr);
- if (!dest_addr) {
- opts->gen.deferred = defer_address(opts->gen.deferred, m68k_addr, dst + 1);
- //dummy address to be replaced later, make sure it generates a 4-byte displacement
- dest_addr = dst + 256;
- }
- dst = jmp(dst, dest_addr);
- } else {
- dst = mov_ir(dst, m68k_addr, SCRATCH1, SZ_D);
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
- break;
- case MODE_PC_INDEX_DISP8:
- dst = cycles(dst, BUS*3);//TODO: CHeck that this is correct
- dst = mov_ir(dst, inst->address+2, SCRATCH1, SZ_D);
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- break;
- case MODE_ABSOLUTE:
- case MODE_ABSOLUTE_SHORT:
- dst = cycles(dst, inst->src.addr_mode == MODE_ABSOLUTE ? 12 : 10);
- m68k_addr = inst->src.params.immed;
- if ((m68k_addr & 0xFFFFFF) < 0x400000) {
- dest_addr = get_native_address(opts->gen.native_code_map, m68k_addr);
- if (!dest_addr) {
- opts->gen.deferred = defer_address(opts->gen.deferred, m68k_addr, dst + 1);
- //dummy address to be replaced later, make sure it generates a 4-byte displacement
- dest_addr = dst + 256;
- }
- dst = jmp(dst, dest_addr);
- } else {
- dst = mov_ir(dst, m68k_addr, SCRATCH1, SZ_D);
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
- break;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%s\naddress mode %d not yet supported (jmp)\n", disasm_buf, inst->src.addr_mode);
- exit(1);
- }
- return dst;
-}
-
-code_ptr translate_m68k_jsr(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- code_ptr dest_addr;
- uint8_t sec_reg;
- uint32_t after;
- uint32_t m68k_addr;
- switch(inst->src.addr_mode)
- {
- case MODE_AREG_INDIRECT:
- dst = cycles(dst, BUS*2);
- dst = mov_ir(dst, inst->address + 2, SCRATCH1, SZ_D);
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_highfirst);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, SCRATCH1, SZ_D);
- }
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- break;
- case MODE_AREG_DISPLACE:
- dst = cycles(dst, BUS*2);
- dst = mov_ir(dst, inst->address + 4, SCRATCH1, SZ_D);
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_highfirst);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + 4 * inst->src.params.regs.pri, SCRATCH1, SZ_D);
- }
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- break;
- case MODE_AREG_INDEX_DISP8:
- dst = cycles(dst, BUS*3);//TODO: CHeck that this is correct
- dst = mov_ir(dst, inst->address + 4, SCRATCH1, SZ_D);
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_highfirst);
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- break;
- case MODE_PC_DISPLACE:
- //TODO: Add cycles in the right place relative to pushing the return address on the stack
- dst = cycles(dst, 10);
- dst = mov_ir(dst, inst->address + 4, SCRATCH1, SZ_D);
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_highfirst);
- m68k_addr = inst->src.params.regs.displacement + inst->address + 2;
- if ((m68k_addr & 0xFFFFFF) < 0x400000) {
- dest_addr = get_native_address(opts->gen.native_code_map, m68k_addr);
- if (!dest_addr) {
- opts->gen.deferred = defer_address(opts->gen.deferred, m68k_addr, dst + 1);
- //dummy address to be replaced later, make sure it generates a 4-byte displacement
- dest_addr = dst + 256;
- }
- dst = jmp(dst, dest_addr);
- } else {
- dst = mov_ir(dst, m68k_addr, SCRATCH1, SZ_D);
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
- break;
- case MODE_PC_INDEX_DISP8:
- dst = cycles(dst, BUS*3);//TODO: CHeck that this is correct
- dst = mov_ir(dst, inst->address + 4, SCRATCH1, SZ_D);
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_highfirst);
- dst = mov_ir(dst, inst->address+2, SCRATCH1, SZ_D);
- sec_reg = (inst->src.params.regs.sec >> 1) & 0x7;
- if (inst->src.params.regs.sec & 1) {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->aregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = add_rr(dst, opts->dregs[sec_reg], SCRATCH1, SZ_D);
- } else {
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH1, SZ_D);
- }
- }
- } else {
- if (inst->src.params.regs.sec & 0x10) {
- if (opts->aregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->aregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- } else {
- if (opts->dregs[sec_reg] >= 0) {
- dst = movsx_rr(dst, opts->dregs[sec_reg], SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movsx_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t)*sec_reg, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = add_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- break;
- case MODE_ABSOLUTE:
- case MODE_ABSOLUTE_SHORT:
- //TODO: Add cycles in the right place relative to pushing the return address on the stack
- dst = cycles(dst, inst->src.addr_mode == MODE_ABSOLUTE ? 12 : 10);
- dst = mov_ir(dst, inst->address + (inst->src.addr_mode == MODE_ABSOLUTE ? 6 : 4), SCRATCH1, SZ_D);
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_highfirst);
- m68k_addr = inst->src.params.immed;
- if ((m68k_addr & 0xFFFFFF) < 0x400000) {
- dest_addr = get_native_address(opts->gen.native_code_map, m68k_addr);
- if (!dest_addr) {
- opts->gen.deferred = defer_address(opts->gen.deferred, m68k_addr, dst + 1);
- //dummy address to be replaced later, make sure it generates a 4-byte displacement
- dest_addr = dst + 256;
- }
- dst = jmp(dst, dest_addr);
- } else {
- dst = mov_ir(dst, m68k_addr, SCRATCH1, SZ_D);
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
- break;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%s\naddress mode %d not yet supported (jsr)\n", disasm_buf, inst->src.addr_mode);
- exit(1);
- }
- return dst;
-}
-
-code_ptr translate_m68k_rts(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- //TODO: Add cycles
- dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
- dst = add_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = call(dst, opts->read_32);
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- return dst;
-}
-
-code_ptr translate_m68k_dbcc(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- //best case duration
- dst = cycles(dst, 10);
- code_ptr skip_loc = NULL;
- //TODO: Check if COND_TRUE technically valid here even though
- //it's basically a slow NOP
- if (inst->extra.cond != COND_FALSE) {
- uint8_t cond = CC_NZ;
- switch (inst->extra.cond)
- {
- case COND_HIGH:
- cond = CC_Z;
- case COND_LOW_SAME:
- dst = flag_to_reg(dst, FLAG_Z, SCRATCH1, opts);
- dst = or_flag_to_reg(dst, FLAG_C, SCRATCH1, opts);
- break;
- case COND_CARRY_CLR:
- cond = CC_Z;
- case COND_CARRY_SET:
- dst = check_flag(dst, FLAG_C, opts);
- break;
- case COND_NOT_EQ:
- cond = CC_Z;
- case COND_EQ:
- dst = check_flag(dst, FLAG_Z, opts);
- break;
- case COND_OVERF_CLR:
- cond = CC_Z;
- case COND_OVERF_SET:
- dst = check_flag(dst, FLAG_V, opts);
- break;
- case COND_PLUS:
- cond = CC_Z;
- case COND_MINUS:
- dst = check_flag(dst, FLAG_N, opts);
- break;
- case COND_GREATER_EQ:
- cond = CC_Z;
- case COND_LESS:
- dst = cmp_flags(dst, FLAG_N, FLAG_V, opts);
- break;
- case COND_GREATER:
- cond = CC_Z;
- case COND_LESS_EQ:
- dst = flag_to_reg(dst, FLAG_V, SCRATCH1, opts);
- dst = xor_flag_to_reg(dst, FLAG_N, SCRATCH1, opts);
- dst = or_flag_to_reg(dst, FLAG_Z, SCRATCH1, opts);
- break;
- }
- skip_loc = dst + 1;
- dst = jcc(dst, cond, dst + 2);
- }
- if (opts->dregs[inst->dst.params.regs.pri] >= 0) {
- dst = sub_ir(dst, 1, opts->dregs[inst->dst.params.regs.pri], SZ_W);
- dst = cmp_ir(dst, -1, opts->dregs[inst->dst.params.regs.pri], SZ_W);
- } else {
- dst = sub_irdisp8(dst, 1, CONTEXT, offsetof(m68k_context, dregs) + 4 * inst->dst.params.regs.pri, SZ_W);
- dst = cmp_irdisp8(dst, -1, CONTEXT, offsetof(m68k_context, dregs) + 4 * inst->dst.params.regs.pri, SZ_W);
- }
- code_ptr loop_end_loc = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- uint32_t after = inst->address + 2;
- code_ptr dest_addr = get_native_address(opts->gen.native_code_map, after + inst->src.params.immed);
- if (!dest_addr) {
- opts->gen.deferred = defer_address(opts->gen.deferred, after + inst->src.params.immed, dst + 1);
- //dummy address to be replaced later, make sure it generates a 4-byte displacement
- dest_addr = dst + 256;
- }
- dst = jmp(dst, dest_addr);
- *loop_end_loc = dst - (loop_end_loc+1);
- if (skip_loc) {
- dst = cycles(dst, 2);
- *skip_loc = dst - (skip_loc+1);
- dst = cycles(dst, 2);
- } else {
- dst = cycles(dst, 4);
- }
- return dst;
-}
-
-code_ptr translate_m68k_link(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- int8_t reg = native_reg(&(inst->src), opts);
- //compensate for displacement word
- dst = cycles(dst, BUS);
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- if (reg >= 0) {
- dst = mov_rr(dst, reg, SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- dst = call(dst, opts->write_32_highfirst);
- if (reg >= 0) {
- dst = mov_rr(dst, opts->aregs[7], reg, SZ_D);
- } else {
- dst = mov_rrdisp8(dst, opts->aregs[7], CONTEXT, reg_offset(&(inst->src)), SZ_D);
- }
- dst = add_ir(dst, inst->dst.params.immed, opts->aregs[7], SZ_D);
- //prefetch
- dst = cycles(dst, BUS);
- return dst;
-}
-
-code_ptr translate_m68k_movep(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- int8_t reg;
- dst = cycles(dst, BUS*2);
- if (inst->src.addr_mode == MODE_REG) {
- if (opts->aregs[inst->dst.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->dst.params.regs.pri], SCRATCH2, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->dst)), SCRATCH2, SZ_D);
- }
- if (inst->dst.params.regs.displacement) {
- dst = add_ir(dst, inst->dst.params.regs.displacement, SCRATCH2, SZ_D);
- }
- reg = native_reg(&(inst->src), opts);
- if (inst->extra.size == OPSIZE_LONG) {
- if (reg >= 0) {
- dst = mov_rr(dst, reg, SCRATCH1, SZ_D);
- dst = shr_ir(dst, 24, SCRATCH1, SZ_D);
- dst = push_r(dst, SCRATCH2);
- dst = call(dst, opts->write_8);
- dst = pop_r(dst, SCRATCH2);
- dst = mov_rr(dst, reg, SCRATCH1, SZ_D);
- dst = shr_ir(dst, 16, SCRATCH1, SZ_D);
-
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src))+3, SCRATCH1, SZ_B);
- dst = push_r(dst, SCRATCH2);
- dst = call(dst, opts->write_8);
- dst = pop_r(dst, SCRATCH2);
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src))+2, SCRATCH1, SZ_B);
- }
- dst = add_ir(dst, 2, SCRATCH2, SZ_D);
- dst = push_r(dst, SCRATCH2);
- dst = call(dst, opts->write_8);
- dst = pop_r(dst, SCRATCH2);
- dst = add_ir(dst, 2, SCRATCH2, SZ_D);
- }
- if (reg >= 0) {
- dst = mov_rr(dst, reg, SCRATCH1, SZ_W);
- dst = shr_ir(dst, 8, SCRATCH1, SZ_W);
- dst = push_r(dst, SCRATCH2);
- dst = call(dst, opts->write_8);
- dst = pop_r(dst, SCRATCH2);
- dst = mov_rr(dst, reg, SCRATCH1, SZ_W);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src))+1, SCRATCH1, SZ_B);
- dst = push_r(dst, SCRATCH2);
- dst = call(dst, opts->write_8);
- dst = pop_r(dst, SCRATCH2);
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_B);
- }
- dst = add_ir(dst, 2, SCRATCH2, SZ_D);
- dst = call(dst, opts->write_8);
- } else {
- if (opts->aregs[inst->src.params.regs.pri] >= 0) {
- dst = mov_rr(dst, opts->aregs[inst->src.params.regs.pri], SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, reg_offset(&(inst->src)), SCRATCH1, SZ_D);
- }
- if (inst->src.params.regs.displacement) {
- dst = add_ir(dst, inst->src.params.regs.displacement, SCRATCH1, SZ_D);
- }
- reg = native_reg(&(inst->dst), opts);
- if (inst->extra.size == OPSIZE_LONG) {
- if (reg >= 0) {
- dst = push_r(dst, SCRATCH1);
- dst = call(dst, opts->read_8);
- dst = shl_ir(dst, 24, SCRATCH1, SZ_D);
- dst = mov_rr(dst, SCRATCH1, reg, SZ_D);
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, 2, SCRATCH1, SZ_D);
- dst = push_r(dst, SCRATCH1);
- dst = call(dst, opts->read_8);
- dst = shl_ir(dst, 16, SCRATCH1, SZ_D);
- dst = or_rr(dst, SCRATCH1, reg, SZ_D);
- } else {
- dst = push_r(dst, SCRATCH1);
- dst = call(dst, opts->read_8);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, reg_offset(&(inst->dst))+3, SZ_B);
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, 2, SCRATCH1, SZ_D);
- dst = push_r(dst, SCRATCH1);
- dst = call(dst, opts->read_8);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, reg_offset(&(inst->dst))+2, SZ_B);
- }
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, 2, SCRATCH1, SZ_D);
- }
- dst = push_r(dst, SCRATCH1);
- dst = call(dst, opts->read_8);
- if (reg >= 0) {
-
- dst = shl_ir(dst, 8, SCRATCH1, SZ_W);
- dst = mov_rr(dst, SCRATCH1, reg, SZ_W);
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, 2, SCRATCH1, SZ_D);
- dst = call(dst, opts->read_8);
- dst = mov_rr(dst, SCRATCH1, reg, SZ_B);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, reg_offset(&(inst->dst))+1, SZ_B);
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, 2, SCRATCH1, SZ_D);
- dst = call(dst, opts->read_8);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, reg_offset(&(inst->dst)), SZ_B);
- }
- }
- return dst;
-}
-
-code_ptr translate_m68k_cmp(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- uint8_t size = inst->extra.size;
- x86_ea src_op, dst_op;
- dst = translate_m68k_src(inst, &src_op, dst, opts);
- if (inst->dst.addr_mode == MODE_AREG_POSTINC) {
- dst = push_r(dst, SCRATCH1);
- dst = translate_m68k_dst(inst, &dst_op, dst, opts, 0);
- dst = pop_r(dst, SCRATCH2);
- src_op.base = SCRATCH2;
- } else {
- dst = translate_m68k_dst(inst, &dst_op, dst, opts, 0);
- if (inst->dst.addr_mode == MODE_AREG && size == OPSIZE_WORD) {
- size = OPSIZE_LONG;
- }
- }
- dst = cycles(dst, BUS);
- if (src_op.mode == MODE_REG_DIRECT) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = cmp_rr(dst, src_op.base, dst_op.base, size);
- } else {
- dst = cmp_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size);
- }
- } else if (src_op.mode == MODE_REG_DISPLACE8) {
- dst = cmp_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, size);
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = cmp_ir(dst, src_op.disp, dst_op.base, size);
- } else {
- dst = cmp_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, size);
- }
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag_cond(dst, CC_O, FLAG_V, opts);
- return dst;
-}
-
-typedef code_ptr (*shift_ir_t)(code_ptr out, uint8_t val, uint8_t dst, uint8_t size);
-typedef code_ptr (*shift_irdisp8_t)(code_ptr out, uint8_t val, uint8_t dst_base, int8_t disp, uint8_t size);
-typedef code_ptr (*shift_clr_t)(code_ptr out, uint8_t dst, uint8_t size);
-typedef code_ptr (*shift_clrdisp8_t)(code_ptr out, uint8_t dst_base, int8_t disp, uint8_t size);
-
-code_ptr translate_shift(code_ptr dst, m68kinst * inst, x86_ea *src_op, x86_ea * dst_op, x86_68k_options * opts, shift_ir_t shift_ir, shift_irdisp8_t shift_irdisp8, shift_clr_t shift_clr, shift_clrdisp8_t shift_clrdisp8, shift_ir_t special, shift_irdisp8_t special_disp8)
-{
- code_ptr end_off = NULL;
- code_ptr nz_off = NULL;
- code_ptr z_off = NULL;
- if (inst->src.addr_mode == MODE_UNUSED) {
- dst = cycles(dst, BUS);
- //Memory shift
- dst = shift_ir(dst, 1, dst_op->base, SZ_W);
- } else {
- dst = cycles(dst, inst->extra.size == OPSIZE_LONG ? 8 : 6);
- if (src_op->mode == MODE_IMMED) {
- if (src_op->disp != 1 && inst->op == M68K_ASL) {
- dst = set_flag(dst, 0, FLAG_V, opts);
- for (int i = 0; i < src_op->disp; i++) {
- if (dst_op->mode == MODE_REG_DIRECT) {
- dst = shift_ir(dst, 1, dst_op->base, inst->extra.size);
- } else {
- dst = shift_irdisp8(dst, 1, dst_op->base, dst_op->disp, inst->extra.size);
- }
- //dst = setcc_r(dst, CC_O, FLAG_V);
- code_ptr after_flag_set = dst+1;
- dst = jcc(dst, CC_NO, dst+2);
- dst = set_flag(dst, 1, FLAG_V, opts);
- *after_flag_set = dst - (after_flag_set+1);
- }
- } else {
- if (dst_op->mode == MODE_REG_DIRECT) {
- dst = shift_ir(dst, src_op->disp, dst_op->base, inst->extra.size);
- } else {
- dst = shift_irdisp8(dst, src_op->disp, dst_op->base, dst_op->disp, inst->extra.size);
- }
- dst = set_flag_cond(dst, CC_O, FLAG_V, opts);
- }
- } else {
- if (src_op->base != RCX) {
- if (src_op->mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, src_op->base, RCX, SZ_B);
- } else {
- dst = mov_rdisp8r(dst, src_op->base, src_op->disp, RCX, SZ_B);
- }
-
- }
- dst = and_ir(dst, 63, RCX, SZ_D);
- nz_off = dst+1;
- dst = jcc(dst, CC_NZ, dst+2);
- //Flag behavior for shift count of 0 is different for x86 than 68K
- if (dst_op->mode == MODE_REG_DIRECT) {
- dst = cmp_ir(dst, 0, dst_op->base, inst->extra.size);
- } else {
- dst = cmp_irdisp8(dst, 0, dst_op->base, dst_op->disp, inst->extra.size);
- }
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag(dst, 0, FLAG_C, opts);
- //For other instructions, this flag will be set below
- if (inst->op == M68K_ASL) {
- dst = set_flag(dst, 0, FLAG_V, opts);
- }
- z_off = dst+1;
- dst = jmp(dst, dst+2);
- *nz_off = dst - (nz_off + 1);
- //add 2 cycles for every bit shifted
- dst = add_rr(dst, RCX, CYCLES, SZ_D);
- dst = add_rr(dst, RCX, CYCLES, SZ_D);
- if (inst->op == M68K_ASL) {
- //ASL has Overflow flag behavior that depends on all of the bits shifted through the MSB
- //Easiest way to deal with this is to shift one bit at a time
- dst = set_flag(dst, 0, FLAG_V, opts);
- code_ptr loop_start = dst;
- if (dst_op->mode == MODE_REG_DIRECT) {
- dst = shift_ir(dst, 1, dst_op->base, inst->extra.size);
- } else {
- dst = shift_irdisp8(dst, 1, dst_op->base, dst_op->disp, inst->extra.size);
- }
- //dst = setcc_r(dst, CC_O, FLAG_V);
- code_ptr after_flag_set = dst+1;
- dst = jcc(dst, CC_NO, dst+2);
- dst = set_flag(dst, 1, FLAG_V, opts);
- *after_flag_set = dst - (after_flag_set+1);
- dst = loop(dst, loop_start);
- } else {
- //x86 shifts modulo 32 for operand sizes less than 64-bits
- //but M68K shifts modulo 64, so we need to check for large shifts here
- dst = cmp_ir(dst, 32, RCX, SZ_B);
- code_ptr norm_shift_off = dst + 1;
- dst = jcc(dst, CC_L, dst+2);
- if (special) {
- code_ptr after_flag_set = NULL;
- if (inst->extra.size == OPSIZE_LONG) {
- code_ptr neq_32_off = dst + 1;
- dst = jcc(dst, CC_NZ, dst+2);
-
- //set the carry bit to the lsb
- if (dst_op->mode == MODE_REG_DIRECT) {
- dst = special(dst, 1, dst_op->base, SZ_D);
- } else {
- dst = special_disp8(dst, 1, dst_op->base, dst_op->disp, SZ_D);
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- after_flag_set = dst+1;
- dst = jmp(dst, dst+2);
- *neq_32_off = dst - (neq_32_off+1);
- }
- dst = set_flag(dst, 0, FLAG_C, opts);
- if (after_flag_set) {
- *after_flag_set = dst - (after_flag_set+1);
- }
- dst = set_flag(dst, 1, FLAG_Z, opts);
- dst = set_flag(dst, 0, FLAG_N, opts);
- if (dst_op->mode == MODE_REG_DIRECT) {
- dst = xor_rr(dst, dst_op->base, dst_op->base, inst->extra.size);
- } else {
- dst = mov_irdisp8(dst, 0, dst_op->base, dst_op->disp, inst->extra.size);
- }
- } else {
- if (dst_op->mode == MODE_REG_DIRECT) {
- dst = shift_ir(dst, 31, dst_op->base, inst->extra.size);
- dst = shift_ir(dst, 1, dst_op->base, inst->extra.size);
- } else {
- dst = shift_irdisp8(dst, 31, dst_op->base, dst_op->disp, inst->extra.size);
- dst = shift_irdisp8(dst, 1, dst_op->base, dst_op->disp, inst->extra.size);
- }
-
- }
- end_off = dst+1;
- dst = jmp(dst, dst+2);
- *norm_shift_off = dst - (norm_shift_off+1);
- if (dst_op->mode == MODE_REG_DIRECT) {
- dst = shift_clr(dst, dst_op->base, inst->extra.size);
- } else {
- dst = shift_clrdisp8(dst, dst_op->base, dst_op->disp, inst->extra.size);
- }
- }
- }
-
- }
- if (!special && end_off) {
- *end_off = dst - (end_off + 1);
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- if (special && end_off) {
- *end_off = dst - (end_off + 1);
- }
- //set X flag to same as C flag
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- } else {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- if (z_off) {
- *z_off = dst - (z_off + 1);
- }
- if (inst->op != M68K_ASL) {
- dst = set_flag(dst, 0, FLAG_V, opts);
- }
- if (inst->src.addr_mode == MODE_UNUSED) {
- dst = m68k_save_result(inst, dst, opts);
- }
- return dst;
-}
-
-#define BIT_SUPERVISOR 5
-
-code_ptr translate_m68k(code_ptr dst, m68kinst * inst, x86_68k_options * opts)
-{
- code_ptr end_off, zero_off, norm_off;
- uint8_t dst_reg;
- dst = check_cycles_int(dst, inst->address, opts);
- if (inst->op == M68K_MOVE) {
- return translate_m68k_move(dst, inst, opts);
- } else if(inst->op == M68K_LEA) {
- return translate_m68k_lea(dst, inst, opts);
- } else if(inst->op == M68K_PEA) {
- return translate_m68k_pea(dst, inst, opts);
- } else if(inst->op == M68K_BSR) {
- return translate_m68k_bsr(dst, inst, opts);
- } else if(inst->op == M68K_BCC) {
- return translate_m68k_bcc(dst, inst, opts);
- } else if(inst->op == M68K_JMP) {
- return translate_m68k_jmp(dst, inst, opts);
- } else if(inst->op == M68K_JSR) {
- return translate_m68k_jsr(dst, inst, opts);
- } else if(inst->op == M68K_RTS) {
- return translate_m68k_rts(dst, inst, opts);
- } else if(inst->op == M68K_DBCC) {
- return translate_m68k_dbcc(dst, inst, opts);
- } else if(inst->op == M68K_CLR) {
- return translate_m68k_clr(dst, inst, opts);
- } else if(inst->op == M68K_MOVEM) {
- return translate_m68k_movem(dst, inst, opts);
- } else if(inst->op == M68K_LINK) {
- return translate_m68k_link(dst, inst, opts);
- } else if(inst->op == M68K_EXT) {
- return translate_m68k_ext(dst, inst, opts);
- } else if(inst->op == M68K_SCC) {
- return translate_m68k_scc(dst, inst, opts);
- } else if(inst->op == M68K_MOVEP) {
- return translate_m68k_movep(dst, inst, opts);
- } else if(inst->op == M68K_INVALID) {
- if (inst->src.params.immed == 0x7100) {
- return retn(dst);
- }
- dst = mov_ir(dst, inst->address, SCRATCH2, SZ_D);
-#ifdef X86_32
- dst = push_r(dst, CONTEXT);
- dst = push_r(dst, SCRATCH2);
-#endif
- return call(dst, (code_ptr)m68k_invalid);
- } else if(inst->op == M68K_CMP) {
- return translate_m68k_cmp(dst, inst, opts);
- }
- x86_ea src_op, dst_op;
- if (inst->src.addr_mode != MODE_UNUSED) {
- dst = translate_m68k_src(inst, &src_op, dst, opts);
- }
- if (inst->dst.addr_mode != MODE_UNUSED) {
- dst = translate_m68k_dst(inst, &dst_op, dst, opts, 0);
- }
- uint8_t size;
- switch(inst->op)
- {
- case M68K_ABCD:
- if (src_op.base != SCRATCH2) {
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, src_op.base, SCRATCH2, SZ_B);
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH2, SZ_B);
- }
- }
- if (dst_op.base != SCRATCH1) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, dst_op.base, SCRATCH1, SZ_B);
- } else {
- dst = mov_rdisp8r(dst, dst_op.base, dst_op.disp, SCRATCH1, SZ_B);
- }
- }
- dst = flag_to_carry(dst, FLAG_X, opts);
- dst = jcc(dst, CC_NC, dst+5);
- dst = add_ir(dst, 1, SCRATCH1, SZ_B);
- dst = call(dst, (code_ptr)bcd_add);
- dst = reg_to_flag(dst, CH, FLAG_C, opts);
- dst = reg_to_flag(dst, CH, FLAG_X, opts);
- dst = cmp_ir(dst, 0, SCRATCH1, SZ_B);
- dst = jcc(dst, CC_Z, dst+4);
- dst = set_flag(dst, 0, FLAG_Z, opts);
- if (dst_op.base != SCRATCH1) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, SCRATCH1, dst_op.base, SZ_B);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH1, dst_op.base, dst_op.disp, SZ_B);
- }
- }
- dst = m68k_save_result(inst, dst, opts);
- break;
- case M68K_ADD:
- dst = cycles(dst, BUS);
- size = inst->dst.addr_mode == MODE_AREG ? OPSIZE_LONG : inst->extra.size;
- if (src_op.mode == MODE_REG_DIRECT) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = add_rr(dst, src_op.base, dst_op.base, size);
- } else {
- dst = add_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size);
- }
- } else if (src_op.mode == MODE_REG_DISPLACE8) {
- dst = add_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, size);
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = add_ir(dst, src_op.disp, dst_op.base, size);
- } else {
- dst = add_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, size);
- }
- }
- if (inst->dst.addr_mode != MODE_AREG) {
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag_cond(dst, CC_O, FLAG_V, opts);
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- } else {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- }
- dst = m68k_save_result(inst, dst, opts);
- break;
- case M68K_ADDX: {
- dst = cycles(dst, BUS);
- dst = flag_to_carry(dst, FLAG_X, opts);
- if (src_op.mode == MODE_REG_DIRECT) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = adc_rr(dst, src_op.base, dst_op.base, inst->extra.size);
- } else {
- dst = adc_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
- }
- } else if (src_op.mode == MODE_REG_DISPLACE8) {
- dst = adc_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = adc_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = adc_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
-
- code_ptr after_flag_set = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- dst = set_flag(dst, 0, FLAG_Z, opts);
- *after_flag_set = dst - (after_flag_set+1);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag_cond(dst, CC_O, FLAG_V, opts);
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- } else {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- dst = m68k_save_result(inst, dst, opts);
- break;
- }
- case M68K_AND:
- dst = cycles(dst, BUS);
- if (src_op.mode == MODE_REG_DIRECT) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = and_rr(dst, src_op.base, dst_op.base, inst->extra.size);
- } else {
- dst = and_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
- }
- } else if (src_op.mode == MODE_REG_DISPLACE8) {
- dst = and_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = and_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = and_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag(dst, 0, FLAG_V, opts);
- dst = m68k_save_result(inst, dst, opts);
- break;
- case M68K_ANDI_CCR:
- case M68K_ANDI_SR:
- dst = cycles(dst, 20);
- //TODO: If ANDI to SR, trap if not in supervisor mode
- if (!(inst->src.params.immed & 0x1)) {
- dst = set_flag(dst, 0, FLAG_C, opts);
- }
- if (!(inst->src.params.immed & 0x2)) {
- dst = set_flag(dst, 0, FLAG_V, opts);
- }
- if (!(inst->src.params.immed & 0x4)) {
- dst = set_flag(dst, 0, FLAG_Z, opts);
- }
- if (!(inst->src.params.immed & 0x8)) {
- dst = set_flag(dst, 0, FLAG_N, opts);
- }
- if (!(inst->src.params.immed & 0x10)) {
- dst = set_flag(dst, 0, FLAG_X, opts);
- }
- if (inst->op == M68K_ANDI_SR) {
- dst = and_irdisp8(dst, inst->src.params.immed >> 8, CONTEXT, offsetof(m68k_context, status), SZ_B);
- if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) {
- //leave supervisor mode
- dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_B);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_B);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_B);
- }
- if (inst->src.params.immed & 0x700) {
- dst = call(dst, opts->do_sync);
- }
- }
- break;
- case M68K_ASL:
- case M68K_LSL:
- dst = translate_shift(dst, inst, &src_op, &dst_op, opts, shl_ir, shl_irdisp8, shl_clr, shl_clrdisp8, shr_ir, shr_irdisp8);
- break;
- case M68K_ASR:
- dst = translate_shift(dst, inst, &src_op, &dst_op, opts, sar_ir, sar_irdisp8, sar_clr, sar_clrdisp8, NULL, NULL);
- break;
- case M68K_LSR:
- dst = translate_shift(dst, inst, &src_op, &dst_op, opts, shr_ir, shr_irdisp8, shr_clr, shr_clrdisp8, shl_ir, shl_irdisp8);
- break;
- case M68K_BCHG:
- case M68K_BCLR:
- case M68K_BSET:
- case M68K_BTST:
- dst = cycles(dst, inst->extra.size == OPSIZE_BYTE ? 4 : (
- inst->op == M68K_BTST ? 6 : (inst->op == M68K_BCLR ? 10 : 8))
- );
- if (src_op.mode == MODE_IMMED) {
- if (inst->extra.size == OPSIZE_BYTE) {
- src_op.disp &= 0x7;
- }
- if (inst->op == M68K_BTST) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = bt_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = bt_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- } else if (inst->op == M68K_BSET) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = bts_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = bts_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- } else if (inst->op == M68K_BCLR) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = btr_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = btr_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = btc_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = btc_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- } else {
- if (src_op.mode == MODE_REG_DISPLACE8 || (inst->dst.addr_mode != MODE_REG && src_op.base != SCRATCH1 && src_op.base != SCRATCH2)) {
- if (dst_op.base == SCRATCH1) {
- dst = push_r(dst, SCRATCH2);
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, src_op.base, SCRATCH2, SZ_B);
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH2, SZ_B);
- }
- src_op.base = SCRATCH2;
- } else {
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_B);
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_B);
- }
- src_op.base = SCRATCH1;
- }
- }
- uint8_t size = inst->extra.size;
- if (dst_op.mode == MODE_REG_DISPLACE8) {
- if (src_op.base != SCRATCH1 && src_op.base != SCRATCH2) {
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_D);
- src_op.mode = MODE_REG_DIRECT;
- }
- src_op.base = SCRATCH1;
- }
- //b### with register destination is modulo 32
- //x86 with a memory destination isn't modulo anything
- //so use an and here to force the value to be modulo 32
- dst = and_ir(dst, 31, SCRATCH1, SZ_D);
- } else if(inst->dst.addr_mode != MODE_REG) {
- //b### with memory destination is modulo 8
- //x86-64 doesn't support 8-bit bit operations
- //so we fake it by forcing the bit number to be modulo 8
- dst = and_ir(dst, 7, src_op.base, SZ_D);
- size = SZ_D;
- }
- if (inst->op == M68K_BTST) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = bt_rr(dst, src_op.base, dst_op.base, size);
- } else {
- dst = bt_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size);
- }
- } else if (inst->op == M68K_BSET) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = bts_rr(dst, src_op.base, dst_op.base, size);
- } else {
- dst = bts_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size);
- }
- } else if (inst->op == M68K_BCLR) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = btr_rr(dst, src_op.base, dst_op.base, size);
- } else {
- dst = btr_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size);
- }
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = btc_rr(dst, src_op.base, dst_op.base, size);
- } else {
- dst = btc_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size);
- }
- }
- if (src_op.base == SCRATCH2) {
- dst = pop_r(dst, SCRATCH2);
- }
- }
- //x86 sets the carry flag to the value of the bit tested
- //68K sets the zero flag to the complement of the bit tested
- dst = set_flag_cond(dst, CC_NC, FLAG_Z, opts);
- if (inst->op != M68K_BTST) {
- dst = m68k_save_result(inst, dst, opts);
- }
- break;
- case M68K_CHK:
- {
- dst = cycles(dst, 6);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = cmp_ir(dst, 0, dst_op.base, inst->extra.size);
- } else {
- dst = cmp_irdisp8(dst, 0, dst_op.base, dst_op.disp, inst->extra.size);
- }
- uint32_t isize;
- switch(inst->src.addr_mode)
- {
- case MODE_AREG_DISPLACE:
- case MODE_AREG_INDEX_DISP8:
- case MODE_ABSOLUTE_SHORT:
- case MODE_PC_INDEX_DISP8:
- case MODE_PC_DISPLACE:
- case MODE_IMMEDIATE:
- isize = 4;
- break;
- case MODE_ABSOLUTE:
- isize = 6;
- break;
- default:
- isize = 2;
- }
- code_ptr passed = dst+1;
- dst = jcc(dst, CC_GE, dst+2);
- dst = set_flag(dst, 1, FLAG_N, opts);
- dst = mov_ir(dst, VECTOR_CHK, SCRATCH2, SZ_D);
- dst = mov_ir(dst, inst->address+isize, SCRATCH1, SZ_D);
- dst = jmp(dst, opts->trap);
- *passed = dst - (passed+1);
- if (dst_op.mode == MODE_REG_DIRECT) {
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = cmp_rr(dst, src_op.base, dst_op.base, inst->extra.size);
- } else if(src_op.mode == MODE_REG_DISPLACE8) {
- dst = cmp_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = cmp_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- }
- } else if(dst_op.mode == MODE_REG_DISPLACE8) {
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = cmp_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
- } else {
- dst = cmp_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- passed = dst+1;
- dst = jcc(dst, CC_LE, dst+2);
- dst = set_flag(dst, 0, FLAG_N, opts);
- dst = mov_ir(dst, VECTOR_CHK, SCRATCH2, SZ_D);
- dst = mov_ir(dst, inst->address+isize, SCRATCH1, SZ_D);
- dst = jmp(dst, opts->trap);
- *passed = dst - (passed+1);
- dst = cycles(dst, 4);
- break;
- }
- case M68K_DIVS:
- case M68K_DIVU:
- {
- //TODO: cycle exact division
- dst = cycles(dst, inst->op == M68K_DIVS ? 158 : 140);
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = push_r(dst, RDX);
- dst = push_r(dst, RAX);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, dst_op.base, RAX, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, dst_op.base, dst_op.disp, RAX, SZ_D);
- }
- if (src_op.mode == MODE_IMMED) {
- dst = mov_ir(dst, (src_op.disp & 0x8000) && inst->op == M68K_DIVS ? src_op.disp | 0xFFFF0000 : src_op.disp, SCRATCH2, SZ_D);
- } else if (src_op.mode == MODE_REG_DIRECT) {
- if (inst->op == M68K_DIVS) {
- dst = movsx_rr(dst, src_op.base, SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movzx_rr(dst, src_op.base, SCRATCH2, SZ_W, SZ_D);
- }
- } else if (src_op.mode == MODE_REG_DISPLACE8) {
- if (inst->op == M68K_DIVS) {
- dst = movsx_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movzx_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = cmp_ir(dst, 0, SCRATCH2, SZ_D);
- code_ptr not_zero = dst+1;
- dst = jcc(dst, CC_NZ, dst+2);
- dst = pop_r(dst, RAX);
- dst = pop_r(dst, RDX);
- dst = mov_ir(dst, VECTOR_INT_DIV_ZERO, SCRATCH2, SZ_D);
- dst = mov_ir(dst, inst->address+2, SCRATCH1, SZ_D);
- dst = jmp(dst, opts->trap);
- *not_zero = dst - (not_zero+1);
- if (inst->op == M68K_DIVS) {
- dst = cdq(dst);
- } else {
- dst = xor_rr(dst, RDX, RDX, SZ_D);
- }
- if (inst->op == M68K_DIVS) {
- dst = idiv_r(dst, SCRATCH2, SZ_D);
- } else {
- dst = div_r(dst, SCRATCH2, SZ_D);
- }
- code_ptr skip_sec_check;
- if (inst->op == M68K_DIVS) {
- dst = cmp_ir(dst, 0x8000, RAX, SZ_D);
- skip_sec_check = dst + 1;
- dst = jcc(dst, CC_GE, dst+2);
- dst = cmp_ir(dst, -0x8000, RAX, SZ_D);
- norm_off = dst+1;
- dst = jcc(dst, CC_L, dst+2);
- } else {
- dst = cmp_ir(dst, 0x10000, RAX, SZ_D);
- norm_off = dst+1;
- dst = jcc(dst, CC_NC, dst+2);
- }
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, RDX, dst_op.base, SZ_W);
- dst = shl_ir(dst, 16, dst_op.base, SZ_D);
- dst = mov_rr(dst, RAX, dst_op.base, SZ_W);
- } else {
- dst = mov_rrdisp8(dst, RDX, dst_op.base, dst_op.disp, SZ_W);
- dst = shl_irdisp8(dst, 16, dst_op.base, dst_op.disp, SZ_D);
- dst = mov_rrdisp8(dst, RAX, dst_op.base, dst_op.disp, SZ_W);
- }
- dst = cmp_ir(dst, 0, RAX, SZ_W);
- dst = pop_r(dst, RAX);
- dst = pop_r(dst, RDX);
- dst = set_flag(dst, 0, FLAG_V, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- end_off = dst+1;
- dst = jmp(dst, dst+2);
- *norm_off = dst - (norm_off + 1);
- if (inst->op == M68K_DIVS) {
- *skip_sec_check = dst - (skip_sec_check+1);
- }
- dst = pop_r(dst, RAX);
- dst = pop_r(dst, RDX);
- dst = set_flag(dst, 1, FLAG_V, opts);
- *end_off = dst - (end_off + 1);
- break;
- }
- case M68K_EOR:
- dst = cycles(dst, BUS);
- if (src_op.mode == MODE_REG_DIRECT) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = xor_rr(dst, src_op.base, dst_op.base, inst->extra.size);
- } else {
- dst = xor_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
- }
- } else if (src_op.mode == MODE_REG_DISPLACE8) {
- dst = xor_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = xor_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = xor_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag(dst, 0, FLAG_V, opts);
- dst = m68k_save_result(inst, dst, opts);
- break;
- case M68K_EORI_CCR:
- case M68K_EORI_SR:
- dst = cycles(dst, 20);
- //TODO: If ANDI to SR, trap if not in supervisor mode
- if (inst->src.params.immed & 0x1) {
- dst = xor_flag(dst, 1, FLAG_C, opts);
- }
- if (inst->src.params.immed & 0x2) {
- dst = xor_flag(dst, 1, FLAG_V, opts);
- }
- if (inst->src.params.immed & 0x4) {
- dst = xor_flag(dst, 1, FLAG_Z, opts);
- }
- if (inst->src.params.immed & 0x8) {
- dst = xor_flag(dst, 1, FLAG_N, opts);
- }
- if (inst->src.params.immed & 0x10) {
- dst = xor_flag(dst, 1, FLAG_X, opts);
- }
- if (inst->op == M68K_ORI_SR) {
- dst = xor_irdisp8(dst, inst->src.params.immed >> 8, CONTEXT, offsetof(m68k_context, status), SZ_B);
- if (inst->src.params.immed & 0x700) {
- dst = call(dst, opts->do_sync);
- }
- }
- break;
- case M68K_EXG:
- dst = cycles(dst, 6);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, dst_op.base, SCRATCH2, SZ_D);
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, src_op.base, dst_op.base, SZ_D);
- dst = mov_rr(dst, SCRATCH2, src_op.base, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH2, src_op.base, src_op.disp, SZ_D);
- }
- } else {
- dst = mov_rdisp8r(dst, dst_op.base, dst_op.disp, SCRATCH2, SZ_D);
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = mov_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, SZ_D);
- dst = mov_rr(dst, SCRATCH2, src_op.base, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH1, dst_op.base, dst_op.disp, SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH2, src_op.base, src_op.disp, SZ_D);
- }
- }
- break;
- case M68K_ILLEGAL:
- dst = call(dst, opts->gen.save_context);
-#ifdef X86_64
- dst = mov_rr(dst, CONTEXT, RDI, SZ_PTR);
-#else
- dst = push_r(dst, CONTEXT);
-#endif
- dst = call(dst, (code_ptr)print_regs_exit);
- break;
- case M68K_MOVE_FROM_SR:
- //TODO: Trap if not in system mode
- dst = call(dst, opts->get_sr);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, SCRATCH1, dst_op.base, SZ_W);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH1, dst_op.base, dst_op.disp, SZ_W);
- }
- dst = m68k_save_result(inst, dst, opts);
- break;
- case M68K_MOVE_CCR:
- case M68K_MOVE_SR:
- //TODO: Privilege check for MOVE to SR
- if (src_op.mode == MODE_IMMED) {
- dst = set_flag(dst, src_op.disp & 0x1, FLAG_C, opts);
- dst = set_flag(dst, (src_op.disp >> 1) & 0x1, FLAG_V, opts);
- dst = set_flag(dst, (src_op.disp >> 2) & 0x1, FLAG_Z, opts);
- dst = set_flag(dst, (src_op.disp >> 3) & 0x1, FLAG_N, opts);
- dst = set_flag(dst, (src_op.disp >> 4) & 0x1, FLAG_X, opts);
- if (inst->op == M68K_MOVE_SR) {
- dst = mov_irdisp8(dst, (src_op.disp >> 8), CONTEXT, offsetof(m68k_context, status), SZ_B);
- if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) {
- //leave supervisor mode
- dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
- }
- dst = call(dst, opts->do_sync);
- }
- dst = cycles(dst, 12);
- } else {
- if (src_op.base != SCRATCH1) {
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_W);
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_W);
- }
- }
- dst = call(dst, inst->op == M68K_MOVE_SR ? opts->set_sr : opts->set_ccr);
- dst = cycles(dst, 12);
-
- }
- break;
- case M68K_MOVE_USP:
- dst = cycles(dst, BUS);
- //TODO: Trap if not in supervisor mode
- //dst = bt_irdisp8(dst, BIT_SUPERVISOR, CONTEXT, offsetof(m68k_context, status), SZ_B);
- if (inst->src.addr_mode == MODE_UNUSED) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, dst_op.base, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SCRATCH1, SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH1, dst_op.base, dst_op.disp, SZ_D);
- }
- } else {
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = mov_rrdisp8(dst, src_op.base, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
- }
- }
- break;
- //case M68K_MOVEP:
- case M68K_MULS:
- case M68K_MULU:
- dst = cycles(dst, 70); //TODO: Calculate the actual value based on the value of the <ea> parameter
- if (src_op.mode == MODE_IMMED) {
- dst = mov_ir(dst, inst->op == M68K_MULU ? (src_op.disp & 0xFFFF) : ((src_op.disp & 0x8000) ? src_op.disp | 0xFFFF0000 : src_op.disp), SCRATCH1, SZ_D);
- } else if (src_op.mode == MODE_REG_DIRECT) {
- if (inst->op == M68K_MULS) {
- dst = movsx_rr(dst, src_op.base, SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movzx_rr(dst, src_op.base, SCRATCH1, SZ_W, SZ_D);
- }
- } else {
- if (inst->op == M68K_MULS) {
- dst = movsx_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_W, SZ_D);
- } else {
- dst = movzx_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_W, SZ_D);
- }
- }
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst_reg = dst_op.base;
- if (inst->op == M68K_MULS) {
- dst = movsx_rr(dst, dst_reg, dst_reg, SZ_W, SZ_D);
- } else {
- dst = movzx_rr(dst, dst_reg, dst_reg, SZ_W, SZ_D);
- }
- } else {
- dst_reg = SCRATCH2;
- if (inst->op == M68K_MULS) {
- dst = movsx_rdisp8r(dst, dst_op.base, dst_op.disp, SCRATCH2, SZ_W, SZ_D);
- } else {
- dst = movzx_rdisp8r(dst, dst_op.base, dst_op.disp, SCRATCH2, SZ_W, SZ_D);
- }
- }
- dst = imul_rr(dst, SCRATCH1, dst_reg, SZ_D);
- if (dst_op.mode == MODE_REG_DISPLACE8) {
- dst = mov_rrdisp8(dst, dst_reg, dst_op.base, dst_op.disp, SZ_D);
- }
- dst = set_flag(dst, 0, FLAG_V, opts);
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = cmp_ir(dst, 0, dst_reg, SZ_D);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- break;
- //case M68K_NBCD:
- case M68K_NEG:
- dst = cycles(dst, BUS);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = neg_r(dst, dst_op.base, inst->extra.size);
- } else {
- dst = neg_rdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size);
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag_cond(dst, CC_O, FLAG_V, opts);
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- } else {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- dst = m68k_save_result(inst, dst, opts);
- break;
- case M68K_NEGX: {
- dst = cycles(dst, BUS);
- if (dst_op.mode == MODE_REG_DIRECT) {
- if (dst_op.base == SCRATCH1) {
- dst = push_r(dst, SCRATCH2);
- dst = xor_rr(dst, SCRATCH2, SCRATCH2, inst->extra.size);
- dst = flag_to_carry(dst, FLAG_X, opts);
- dst = sbb_rr(dst, dst_op.base, SCRATCH2, inst->extra.size);
- dst = mov_rr(dst, SCRATCH2, dst_op.base, inst->extra.size);
- dst = pop_r(dst, SCRATCH2);
- } else {
- dst = xor_rr(dst, SCRATCH1, SCRATCH1, inst->extra.size);
- dst = flag_to_carry(dst, FLAG_X, opts);
- dst = sbb_rr(dst, dst_op.base, SCRATCH1, inst->extra.size);
- dst = mov_rr(dst, SCRATCH1, dst_op.base, inst->extra.size);
- }
- } else {
- dst = xor_rr(dst, SCRATCH1, SCRATCH1, inst->extra.size);
- dst = flag_to_carry(dst, FLAG_X, opts);
- dst = sbb_rdisp8r(dst, dst_op.base, dst_op.disp, SCRATCH1, inst->extra.size);
- dst = mov_rrdisp8(dst, SCRATCH1, dst_op.base, dst_op.disp, inst->extra.size);
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- code_ptr after_flag_set = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- dst = set_flag(dst, 0, FLAG_Z, opts);
- *after_flag_set = dst - (after_flag_set+1);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag_cond(dst, CC_O, FLAG_V, opts);
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- } else {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- dst = m68k_save_result(inst, dst, opts);
- break;
- }
- case M68K_NOP:
- dst = cycles(dst, BUS);
- break;
- case M68K_NOT:
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = not_r(dst, dst_op.base, inst->extra.size);
- dst = cmp_ir(dst, 0, dst_op.base, inst->extra.size);
- } else {
- dst = not_rdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size);
- dst = cmp_irdisp8(dst, 0, dst_op.base, dst_op.disp, inst->extra.size);
- }
-
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag(dst, 0, FLAG_V, opts);
- dst = m68k_save_result(inst, dst, opts);
- break;
- case M68K_OR:
- dst = cycles(dst, BUS);
- if (src_op.mode == MODE_REG_DIRECT) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = or_rr(dst, src_op.base, dst_op.base, inst->extra.size);
- } else {
- dst = or_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
- }
- } else if (src_op.mode == MODE_REG_DISPLACE8) {
- dst = or_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = or_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = or_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag(dst, 0, FLAG_V, opts);
- dst = m68k_save_result(inst, dst, opts);
- break;
- case M68K_ORI_CCR:
- case M68K_ORI_SR:
- dst = cycles(dst, 20);
- //TODO: If ANDI to SR, trap if not in supervisor mode
- if (inst->src.params.immed & 0x1) {
- dst = set_flag(dst, 1, FLAG_C, opts);
- }
- if (inst->src.params.immed & 0x2) {
- dst = set_flag(dst, 1, FLAG_V, opts);
- }
- if (inst->src.params.immed & 0x4) {
- dst = set_flag(dst, 1, FLAG_Z, opts);
- }
- if (inst->src.params.immed & 0x8) {
- dst = set_flag(dst, 1, FLAG_N, opts);
- }
- if (inst->src.params.immed & 0x10) {
- dst = set_flag(dst, 1, FLAG_X, opts);
- }
- if (inst->op == M68K_ORI_SR) {
- dst = or_irdisp8(dst, inst->src.params.immed >> 8, CONTEXT, offsetof(m68k_context, status), SZ_B);
- if (inst->src.params.immed & 0x700) {
- dst = call(dst, opts->do_sync);
- }
- }
- break;
- case M68K_RESET:
- dst = call(dst, opts->gen.save_context);
-#ifdef X86_64
- dst = mov_rr(dst, CONTEXT, RDI, SZ_PTR);
-#else
- dst = push_r(dst, CONTEXT);
-#endif
- dst = call(dst, (code_ptr)print_regs_exit);
- break;
- case M68K_ROL:
- case M68K_ROR:
- dst = set_flag(dst, 0, FLAG_V, opts);
- if (inst->src.addr_mode == MODE_UNUSED) {
- dst = cycles(dst, BUS);
- //Memory rotate
- if (inst->op == M68K_ROL) {
- dst = rol_ir(dst, 1, dst_op.base, inst->extra.size);
- } else {
- dst = ror_ir(dst, 1, dst_op.base, inst->extra.size);
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- dst = cmp_ir(dst, 0, dst_op.base, inst->extra.size);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = m68k_save_result(inst, dst, opts);
- } else {
- if (src_op.mode == MODE_IMMED) {
- dst = cycles(dst, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + src_op.disp*2);
- if (dst_op.mode == MODE_REG_DIRECT) {
- if (inst->op == M68K_ROL) {
- dst = rol_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = ror_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- }
- } else {
- if (inst->op == M68K_ROL) {
- dst = rol_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- } else {
- dst = ror_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- } else {
- if (src_op.mode == MODE_REG_DIRECT) {
- if (src_op.base != SCRATCH1) {
- dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_B);
- }
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_B);
- }
- dst = and_ir(dst, 63, SCRATCH1, SZ_D);
- zero_off = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- dst = add_rr(dst, SCRATCH1, CYCLES, SZ_D);
- dst = add_rr(dst, SCRATCH1, CYCLES, SZ_D);
- dst = cmp_ir(dst, 32, SCRATCH1, SZ_B);
- norm_off = dst+1;
- dst = jcc(dst, CC_L, dst+2);
- dst = sub_ir(dst, 32, SCRATCH1, SZ_B);
- if (dst_op.mode == MODE_REG_DIRECT) {
- if (inst->op == M68K_ROL) {
- dst = rol_ir(dst, 31, dst_op.base, inst->extra.size);
- dst = rol_ir(dst, 1, dst_op.base, inst->extra.size);
- } else {
- dst = ror_ir(dst, 31, dst_op.base, inst->extra.size);
- dst = ror_ir(dst, 1, dst_op.base, inst->extra.size);
- }
- } else {
- if (inst->op == M68K_ROL) {
- dst = rol_irdisp8(dst, 31, dst_op.base, dst_op.disp, inst->extra.size);
- dst = rol_irdisp8(dst, 1, dst_op.base, dst_op.disp, inst->extra.size);
- } else {
- dst = ror_irdisp8(dst, 31, dst_op.base, dst_op.disp, inst->extra.size);
- dst = ror_irdisp8(dst, 1, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- *norm_off = dst - (norm_off+1);
- if (dst_op.mode == MODE_REG_DIRECT) {
- if (inst->op == M68K_ROL) {
- dst = rol_clr(dst, dst_op.base, inst->extra.size);
- } else {
- dst = ror_clr(dst, dst_op.base, inst->extra.size);
- }
- } else {
- if (inst->op == M68K_ROL) {
- dst = rol_clrdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size);
- } else {
- dst = ror_clrdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- end_off = dst + 1;
- dst = jmp(dst, dst+2);
- *zero_off = dst - (zero_off+1);
- dst = set_flag(dst, 0, FLAG_C, opts);
- *end_off = dst - (end_off+1);
- }
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = cmp_ir(dst, 0, dst_op.base, inst->extra.size);
- } else {
- dst = cmp_irdisp8(dst, 0, dst_op.base, dst_op.disp, inst->extra.size);
- }
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- }
- break;
- case M68K_ROXL:
- case M68K_ROXR:
- dst = set_flag(dst, 0, FLAG_V, opts);
- if (inst->src.addr_mode == MODE_UNUSED) {
- dst = cycles(dst, BUS);
- //Memory rotate
- dst = flag_to_carry(dst, FLAG_X, opts);
- if (inst->op == M68K_ROXL) {
- dst = rcl_ir(dst, 1, dst_op.base, inst->extra.size);
- } else {
- dst = rcr_ir(dst, 1, dst_op.base, inst->extra.size);
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- if (opts->flag_regs[FLAG_C] < 0) {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- dst = cmp_ir(dst, 0, dst_op.base, inst->extra.size);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- }
- dst = m68k_save_result(inst, dst, opts);
- } else {
- if (src_op.mode == MODE_IMMED) {
- dst = cycles(dst, (inst->extra.size == OPSIZE_LONG ? 8 : 6) + src_op.disp*2);
- dst = flag_to_carry(dst, FLAG_X, opts);
- if (dst_op.mode == MODE_REG_DIRECT) {
- if (inst->op == M68K_ROXL) {
- dst = rcl_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = rcr_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- }
- } else {
- if (inst->op == M68K_ROXL) {
- dst = rcl_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- } else {
- dst = rcr_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- } else {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- } else {
- if (src_op.mode == MODE_REG_DIRECT) {
- if (src_op.base != SCRATCH1) {
- dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_B);
- }
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH1, SZ_B);
- }
- dst = and_ir(dst, 63, SCRATCH1, SZ_D);
- zero_off = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- dst = add_rr(dst, SCRATCH1, CYCLES, SZ_D);
- dst = add_rr(dst, SCRATCH1, CYCLES, SZ_D);
- dst = cmp_ir(dst, 32, SCRATCH1, SZ_B);
- norm_off = dst+1;
- dst = jcc(dst, CC_L, dst+2);
- dst = flag_to_carry(dst, FLAG_X, opts);
- if (dst_op.mode == MODE_REG_DIRECT) {
- if (inst->op == M68K_ROXL) {
- dst = rcl_ir(dst, 31, dst_op.base, inst->extra.size);
- dst = rcl_ir(dst, 1, dst_op.base, inst->extra.size);
- } else {
- dst = rcr_ir(dst, 31, dst_op.base, inst->extra.size);
- dst = rcr_ir(dst, 1, dst_op.base, inst->extra.size);
- }
- } else {
- if (inst->op == M68K_ROXL) {
- dst = rcl_irdisp8(dst, 31, dst_op.base, dst_op.disp, inst->extra.size);
- dst = rcl_irdisp8(dst, 1, dst_op.base, dst_op.disp, inst->extra.size);
- } else {
- dst = rcr_irdisp8(dst, 31, dst_op.base, dst_op.disp, inst->extra.size);
- dst = rcr_irdisp8(dst, 1, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- dst = sub_ir(dst, 32, SCRATCH1, SZ_B);
- *norm_off = dst - (norm_off+1);
- dst = flag_to_carry(dst, FLAG_X, opts);
- if (dst_op.mode == MODE_REG_DIRECT) {
- if (inst->op == M68K_ROXL) {
- dst = rcl_clr(dst, dst_op.base, inst->extra.size);
- } else {
- dst = rcr_clr(dst, dst_op.base, inst->extra.size);
- }
- } else {
- if (inst->op == M68K_ROXL) {
- dst = rcl_clrdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size);
- } else {
- dst = rcr_clrdisp8(dst, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- } else {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- end_off = dst + 1;
- dst = jmp(dst, dst+2);
- *zero_off = dst - (zero_off+1);
- //Carry flag is set to X flag when count is 0, this is different from ROR/ROL
- dst = flag_to_flag(dst, FLAG_X, FLAG_C, opts);
- *end_off = dst - (end_off+1);
- }
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = cmp_ir(dst, 0, dst_op.base, inst->extra.size);
- } else {
- dst = cmp_irdisp8(dst, 0, dst_op.base, dst_op.disp, inst->extra.size);
- }
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- }
- break;
- case M68K_RTE:
- //TODO: Trap if not in system mode
- //Read saved SR
- dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
- dst = call(dst, opts->read_16);
- dst = add_ir(dst, 2, opts->aregs[7], SZ_D);
- dst = call(dst, opts->set_sr);
- //Read saved PC
- dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
- dst = call(dst, opts->read_32);
- dst = add_ir(dst, 4, opts->aregs[7], SZ_D);
- //Check if we've switched to user mode and swap stack pointers if needed
- dst = bt_irdisp8(dst, 5, CONTEXT, offsetof(m68k_context, status), SZ_B);
- end_off = dst+1;
- dst = jcc(dst, CC_C, dst+2);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
- *end_off = dst - (end_off+1);
- //Get native address, sync components, recalculate integer points and jump to returned address
- dst = call(dst, opts->native_addr_and_sync);
- dst = jmp_r(dst, SCRATCH1);
- break;
- case M68K_RTR:
- //Read saved CCR
- dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
- dst = call(dst, opts->read_16);
- dst = add_ir(dst, 2, opts->aregs[7], SZ_D);
- dst = call(dst, opts->set_ccr);
- //Read saved PC
- dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
- dst = call(dst, opts->read_32);
- dst = add_ir(dst, 4, opts->aregs[7], SZ_D);
- //Get native address and jump to it
- dst = call(dst, opts->native_addr);
- dst = jmp_r(dst, SCRATCH1);
- break;
- case M68K_SBCD: {
- if (src_op.base != SCRATCH2) {
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, src_op.base, SCRATCH2, SZ_B);
- } else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, SCRATCH2, SZ_B);
- }
- }
- if (dst_op.base != SCRATCH1) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, dst_op.base, SCRATCH1, SZ_B);
- } else {
- dst = mov_rdisp8r(dst, dst_op.base, dst_op.disp, SCRATCH1, SZ_B);
- }
- }
- dst = flag_to_carry(dst, FLAG_X, opts);
- dst = jcc(dst, CC_NC, dst+5);
- dst = sub_ir(dst, 1, SCRATCH1, SZ_B);
- dst = call(dst, (code_ptr)bcd_sub);
- dst = reg_to_flag(dst, CH, FLAG_C, opts);
- dst = reg_to_flag(dst, CH, FLAG_X, opts);
- dst = cmp_ir(dst, 0, SCRATCH1, SZ_B);
- code_ptr after_flag_set = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- dst = set_flag(dst, 0, FLAG_Z, opts);
- *after_flag_set = dst - (after_flag_set+1);
- if (dst_op.base != SCRATCH1) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, SCRATCH1, dst_op.base, SZ_B);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH1, dst_op.base, dst_op.disp, SZ_B);
- }
- }
- dst = m68k_save_result(inst, dst, opts);
- break;
- }
- case M68K_STOP: {
- //TODO: Trap if not in system mode
- //manual says 4 cycles, but it has to be at least 8 since it's a 2-word instruction
- //possibly even 12 since that's how long MOVE to SR takes
- dst = cycles(dst, BUS*2);
- dst = set_flag(dst, src_op.disp & 0x1, FLAG_C, opts);
- dst = set_flag(dst, (src_op.disp >> 1) & 0x1, FLAG_V, opts);
- dst = set_flag(dst, (src_op.disp >> 2) & 0x1, FLAG_Z, opts);
- dst = set_flag(dst, (src_op.disp >> 3) & 0x1, FLAG_N, opts);
- dst = set_flag(dst, (src_op.disp >> 4) & 0x1, FLAG_X, opts);
- dst = mov_irdisp8(dst, (src_op.disp >> 8), CONTEXT, offsetof(m68k_context, status), SZ_B);
- if (!((inst->src.params.immed >> 8) & (1 << BIT_SUPERVISOR))) {
- //leave supervisor mode
- dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, opts->aregs[7], SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
- }
- code_ptr loop_top = dst;
- dst = call(dst, opts->do_sync);
- dst = cmp_rr(dst, LIMIT, CYCLES, SZ_D);
- code_ptr normal_cycle_up = dst + 1;
- dst = jcc(dst, CC_A, dst+2);
- dst = cycles(dst, BUS);
- code_ptr after_cycle_up = dst + 1;
- dst = jmp(dst, dst+2);
- *normal_cycle_up = dst - (normal_cycle_up + 1);
- dst = mov_rr(dst, LIMIT, CYCLES, SZ_D);
- *after_cycle_up = dst - (after_cycle_up+1);
- dst = cmp_rdisp8r(dst, CONTEXT, offsetof(m68k_context, int_cycle), CYCLES, SZ_D);
- dst = jcc(dst, CC_C, loop_top);
- break;
- }
- case M68K_SUB:
- size = inst->dst.addr_mode == MODE_AREG ? OPSIZE_LONG : inst->extra.size;
- dst = cycles(dst, BUS);
- if (src_op.mode == MODE_REG_DIRECT) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = sub_rr(dst, src_op.base, dst_op.base, size);
- } else {
- dst = sub_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size);
- }
- } else if (src_op.mode == MODE_REG_DISPLACE8) {
- dst = sub_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, size);
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = sub_ir(dst, src_op.disp, dst_op.base, size);
- } else {
- dst = sub_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, size);
- }
- }
- if (inst->dst.addr_mode != MODE_AREG) {
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag_cond(dst, CC_O, FLAG_V, opts);
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- } else {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- }
- dst = m68k_save_result(inst, dst, opts);
- break;
- case M68K_SUBX: {
- dst = cycles(dst, BUS);
- dst = flag_to_carry(dst, FLAG_X, opts);
- if (src_op.mode == MODE_REG_DIRECT) {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = sbb_rr(dst, src_op.base, dst_op.base, inst->extra.size);
- } else {
- dst = sbb_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, inst->extra.size);
- }
- } else if (src_op.mode == MODE_REG_DISPLACE8) {
- dst = sbb_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = sbb_ir(dst, src_op.disp, dst_op.base, inst->extra.size);
- } else {
- dst = sbb_irdisp8(dst, src_op.disp, dst_op.base, dst_op.disp, inst->extra.size);
- }
- }
- dst = set_flag_cond(dst, CC_C, FLAG_C, opts);
- if (opts->flag_regs[FLAG_C] < 0) {
- dst = set_flag_cond(dst, CC_C, FLAG_X, opts);
- }
- code_ptr after_flag_set = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- dst = set_flag(dst, 0, FLAG_Z, opts);
- *after_flag_set = dst - (after_flag_set+1);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag_cond(dst, CC_O, FLAG_V, opts);
- if (opts->flag_regs[FLAG_C] >= 0) {
- dst = flag_to_flag(dst, FLAG_C, FLAG_X, opts);
- }
- dst = m68k_save_result(inst, dst, opts);
- break;
- }
- case M68K_SWAP:
- dst = cycles(dst, BUS);
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = rol_ir(dst, 16, src_op.base, SZ_D);
- dst = cmp_ir(dst, 0, src_op.base, SZ_D);
- } else{
- dst = rol_irdisp8(dst, 16, src_op.base, src_op.disp, SZ_D);
- dst = cmp_irdisp8(dst, 0, src_op.base, src_op.disp, SZ_D);
- }
-
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag(dst, 0, FLAG_V, opts);
- break;
- //case M68K_TAS:
- case M68K_TRAP:
- dst = mov_ir(dst, src_op.disp + VECTOR_TRAP_0, SCRATCH2, SZ_D);
- dst = mov_ir(dst, inst->address+2, SCRATCH1, SZ_D);
- dst = jmp(dst, opts->trap);
- break;
- //case M68K_TRAPV:
- case M68K_TST:
- dst = cycles(dst, BUS);
- if (src_op.mode == MODE_REG_DIRECT) {
- dst = cmp_ir(dst, 0, src_op.base, inst->extra.size);
- } else { //M68000 doesn't support immedate operand for tst, so this must be MODE_REG_DISPLACE8
- dst = cmp_irdisp8(dst, 0, src_op.base, src_op.disp, inst->extra.size);
- }
- dst = set_flag(dst, 0, FLAG_C, opts);
- dst = set_flag_cond(dst, CC_Z, FLAG_Z, opts);
- dst = set_flag_cond(dst, CC_S, FLAG_N, opts);
- dst = set_flag(dst, 0, FLAG_V, opts);
- break;
- case M68K_UNLK:
- dst = cycles(dst, BUS);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, dst_op.base, opts->aregs[7], SZ_D);
- } else {
- dst = mov_rdisp8r(dst, dst_op.base, dst_op.disp, opts->aregs[7], SZ_D);
- }
- dst = mov_rr(dst, opts->aregs[7], SCRATCH1, SZ_D);
- dst = call(dst, opts->read_32);
- if (dst_op.mode == MODE_REG_DIRECT) {
- dst = mov_rr(dst, SCRATCH1, dst_op.base, SZ_D);
- } else {
- dst = mov_rrdisp8(dst, SCRATCH1, dst_op.base, dst_op.disp, SZ_D);
- }
- dst = add_ir(dst, 4, opts->aregs[7], SZ_D);
- break;
- default:
- m68k_disasm(inst, disasm_buf);
- printf("%X: %s\ninstruction %d not yet implemented\n", inst->address, disasm_buf, inst->op);
- exit(1);
- }
- return dst;
-}
-
-uint8_t m68k_is_terminal(m68kinst * inst)
-{
- return inst->op == M68K_RTS || inst->op == M68K_RTE || inst->op == M68K_RTR || inst->op == M68K_JMP
- || inst->op == M68K_TRAP || inst->op == M68K_ILLEGAL || inst->op == M68K_INVALID || inst->op == M68K_RESET
- || (inst->op == M68K_BCC && inst->extra.cond == COND_TRUE);
-}
-
-void m68k_handle_deferred(m68k_context * context)
-{
- x86_68k_options * opts = context->options;
- process_deferred(&opts->gen.deferred, context, (native_addr_func)get_native_from_context);
- if (opts->gen.deferred) {
- translate_m68k_stream(opts->gen.deferred->address, context);
- }
-}
-
-code_ptr translate_m68k_stream(uint32_t address, m68k_context * context)
-{
- m68kinst instbuf;
- x86_68k_options * opts = context->options;
- code_ptr dst = opts->gen.cur_code;
- code_ptr dst_end = opts->gen.code_end;
- address &= 0xFFFFFF;
- if(get_native_address(opts->gen.native_code_map, address)) {
- return dst;
- }
- char disbuf[1024];
- uint16_t *encoded, *next;
- if ((address & 0xFFFFFF) < 0x400000) {
- encoded = context->mem_pointers[0] + (address & 0xFFFFFF)/2;
- } else if ((address & 0xFFFFFF) > 0xE00000) {
- encoded = context->mem_pointers[1] + (address & 0xFFFF)/2;
- } else {
- printf("attempt to translate non-memory address: %X\n", address);
- exit(1);
- }
- do {
- if (opts->address_log) {
- fprintf(opts->address_log, "%X\n", address);
- }
- do {
- if (dst_end-dst < MAX_NATIVE_SIZE) {
- if (dst_end-dst < 5) {
- puts("out of code memory, not enough space for jmp to next chunk");
- exit(1);
- }
- size_t size = 1024*1024;
- opts->gen.cur_code = alloc_code(&size);
- opts->gen.code_end = opts->gen.cur_code + size;
- jmp(dst, opts->gen.cur_code);
- dst = opts->gen.cur_code;
- dst_end = opts->gen.code_end;
- }
- if (address >= 0x400000 && address < 0xE00000) {
- dst = xor_rr(dst, RDI, RDI, SZ_D);
-#ifdef X86_32
- dst = push_r(dst, RDI);
-#endif
- dst = call(dst, (code_ptr)exit);
- break;
- }
- code_ptr existing = get_native_address(opts->gen.native_code_map, address);
- if (existing) {
- dst = jmp(dst, existing);
- break;
- }
- next = m68k_decode(encoded, &instbuf, address);
- if (instbuf.op == M68K_INVALID) {
- instbuf.src.params.immed = *encoded;
- }
- uint16_t m68k_size = (next-encoded)*2;
- address += m68k_size;
- encoded = next;
- //m68k_disasm(&instbuf, disbuf);
- //printf("%X: %s\n", instbuf.address, disbuf);
- code_ptr after = translate_m68k(dst, &instbuf, opts);
- map_native_address(context, instbuf.address, dst, m68k_size, after-dst);
- dst = after;
- } while(!m68k_is_terminal(&instbuf));
- process_deferred(&opts->gen.deferred, context, (native_addr_func)get_native_from_context);
- if (opts->gen.deferred) {
- address = opts->gen.deferred->address;
- if ((address & 0xFFFFFF) < 0x400000) {
- encoded = context->mem_pointers[0] + (address & 0xFFFFFF)/2;
- } else if ((address & 0xFFFFFF) > 0xE00000) {
- encoded = context->mem_pointers[1] + (address & 0xFFFF)/2;
- } else {
- printf("attempt to translate non-memory address: %X\n", address);
- exit(1);
- }
- } else {
- encoded = NULL;
- }
- } while(encoded != NULL);
- opts->gen.cur_code = dst;
- return dst;
-}
-
-code_ptr get_native_address_trans(m68k_context * context, uint32_t address)
-{
- address &= 0xFFFFFF;
- code_ptr ret = get_native_address(context->native_code_map, address);
- if (!ret) {
- translate_m68k_stream(address, context);
- ret = get_native_address(context->native_code_map, address);
- }
- return ret;
-}
-
-void * m68k_retranslate_inst(uint32_t address, m68k_context * context)
-{
- x86_68k_options * opts = context->options;
- uint8_t orig_size = get_native_inst_size(opts, address);
- code_ptr orig_start = get_native_address(context->native_code_map, address);
- uint32_t orig = address;
- address &= 0xFFFF;
- code_ptr dst = opts->gen.cur_code;
- code_ptr dst_end = opts->gen.code_end;
- uint16_t *after, *inst = context->mem_pointers[1] + address/2;
- m68kinst instbuf;
- after = m68k_decode(inst, &instbuf, orig);
- if (orig_size != MAX_NATIVE_SIZE) {
- if (dst_end - dst < 128) {
- size_t size = 1024*1024;
- dst = alloc_code(&size);
- opts->gen.code_end = dst_end = dst + size;
- opts->gen.cur_code = dst;
- }
- deferred_addr * orig_deferred = opts->gen.deferred;
- code_ptr native_end = translate_m68k(dst, &instbuf, opts);
- uint8_t is_terminal = m68k_is_terminal(&instbuf);
- if ((native_end - dst) <= orig_size) {
- code_ptr native_next;
- if (!is_terminal) {
- native_next = get_native_address(context->native_code_map, orig + (after-inst)*2);
- }
- if (is_terminal || (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5))) {
- remove_deferred_until(&opts->gen.deferred, orig_deferred);
- native_end = translate_m68k(orig_start, &instbuf, opts);
- if (!is_terminal) {
- if (native_next == orig_start + orig_size && (native_next-native_end) < 2) {
- while (native_end < orig_start + orig_size) {
- *(native_end++) = 0x90; //NOP
- }
- } else {
- jmp(native_end, native_next);
- }
- }
- m68k_handle_deferred(context);
- return orig_start;
- }
- }
-
- map_native_address(context, instbuf.address, dst, (after-inst)*2, MAX_NATIVE_SIZE);
- opts->gen.cur_code = dst+MAX_NATIVE_SIZE;
- jmp(orig_start, dst);
- if (!m68k_is_terminal(&instbuf)) {
- jmp(native_end, get_native_address_trans(context, orig + (after-inst)*2));
- }
- m68k_handle_deferred(context);
- return dst;
- } else {
- dst = translate_m68k(orig_start, &instbuf, opts);
- if (!m68k_is_terminal(&instbuf)) {
- dst = jmp(dst, get_native_address_trans(context, orig + (after-inst)*2));
- }
- m68k_handle_deferred(context);
- return orig_start;
- }
-}
-
-m68k_context * m68k_handle_code_write(uint32_t address, m68k_context * context)
-{
- uint32_t inst_start = get_instruction_start(context->native_code_map, address | 0xFF0000);
- if (inst_start) {
- code_ptr dst = get_native_address(context->native_code_map, inst_start);
- dst = mov_ir(dst, inst_start, SCRATCH2, SZ_D);
- x86_68k_options * options = context->options;
- if (!options->retrans_stub) {
- if (options->gen.code_end - options->gen.cur_code < 32) {
- size_t size = 1024*1024;
- options->gen.cur_code = alloc_code(&size);
- options->gen.code_end = options->gen.cur_code + size;
- }
- code_ptr rdst = options->retrans_stub = options->gen.cur_code;
- rdst = call(rdst, options->gen.save_context);
- rdst = push_r(rdst, CONTEXT);
-#ifdef X86_32
- rdst = push_r(rdst, CONTEXT);
- rdst = push_r(rdst, SCRATCH2);
-#endif
- rdst = call(rdst, (code_ptr)m68k_retranslate_inst);
-#ifdef X86_32
- rdst = add_ir(rdst, 8, RSP, SZ_D);
-#endif
- rdst = pop_r(rdst, CONTEXT);
- rdst = mov_rr(rdst, RAX, SCRATCH1, SZ_PTR);
- rdst = call(rdst, options->gen.load_context);
- rdst = jmp_r(rdst, SCRATCH1);
- options->gen.cur_code = rdst;
- }
- dst = jmp(dst, options->retrans_stub);
- }
- return context;
-}
-
-void insert_breakpoint(m68k_context * context, uint32_t address, code_ptr bp_handler)
-{
- static code_ptr bp_stub = NULL;
- code_ptr native = get_native_address_trans(context, address);
- code_ptr start_native = native;
- native = mov_ir(native, address, SCRATCH1, SZ_D);
- if (!bp_stub) {
- x86_68k_options * opts = context->options;
- code_ptr dst = opts->gen.cur_code;
- code_ptr dst_end = opts->gen.code_end;
- if (dst_end - dst < 128) {
- size_t size = 1024*1024;
- dst = alloc_code(&size);
- opts->gen.code_end = dst_end = dst + size;
- }
- bp_stub = dst;
- native = call(native, bp_stub);
-
- //Calculate length of prologue
- dst = check_cycles_int(dst, address, opts);
- int check_int_size = dst-bp_stub;
- dst = bp_stub;
-
- //Save context and call breakpoint handler
- dst = call(dst, opts->gen.save_context);
- dst = push_r(dst, SCRATCH1);
-#ifdef X86_64
- dst = mov_rr(dst, CONTEXT, RDI, SZ_PTR);
- dst = mov_rr(dst, SCRATCH1, RSI, SZ_D);
-#else
- dst = push_r(dst, SCRATCH1);
- dst = push_r(dst, CONTEXT);
-#endif
- dst = call(dst, bp_handler);
-#ifdef X86_32
- dst = add_ir(dst, 8, RSP, SZ_D);
-#endif
- dst = mov_rr(dst, RAX, CONTEXT, SZ_PTR);
- //Restore context
- dst = call(dst, opts->gen.load_context);
- dst = pop_r(dst, SCRATCH1);
- //do prologue stuff
- dst = cmp_rr(dst, CYCLES, LIMIT, SZ_D);
- code_ptr jmp_off = dst+1;
- dst = jcc(dst, CC_NC, dst + 7);
- dst = call(dst, opts->gen.handle_cycle_limit_int);
- *jmp_off = dst - (jmp_off+1);
- //jump back to body of translated instruction
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_PTR);
- dst = jmp_r(dst, SCRATCH1);
- opts->gen.cur_code = dst;
- } else {
- native = call(native, bp_stub);
- }
-}
-
-void remove_breakpoint(m68k_context * context, uint32_t address)
-{
- code_ptr native = get_native_address(context->native_code_map, address);
- check_cycles_int(native, address, context->options);
-}
-
-void start_68k_context(m68k_context * context, uint32_t address)
-{
- code_ptr addr = get_native_address_trans(context, address);
- x86_68k_options * options = context->options;
- options->start_context(addr, context);
-}
-
-void m68k_reset(m68k_context * context)
-{
- //TODO: Make this actually use the normal read functions
- context->aregs[7] = context->mem_pointers[0][0] << 16 | context->mem_pointers[0][1];
- uint32_t address = context->mem_pointers[0][2] << 16 | context->mem_pointers[0][3];
- start_68k_context(context, address);
-}
-
-code_ptr gen_mem_fun(cpu_options * opts, memmap_chunk * memmap, uint32_t num_chunks, ftype fun_type)
-{
- code_ptr dst = opts->cur_code;
- code_ptr start = dst;
- dst = check_cycles(dst, opts);
- dst = cycles(dst, BUS);
- dst = and_ir(dst, 0xFFFFFF, SCRATCH1, SZ_D);
- code_ptr lb_jcc = NULL, ub_jcc = NULL;
- uint8_t is_write = fun_type == WRITE_16 || fun_type == WRITE_8;
- uint8_t adr_reg = is_write ? SCRATCH2 : SCRATCH1;
- uint16_t access_flag = is_write ? MMAP_WRITE : MMAP_READ;
- uint8_t size = (fun_type == READ_16 || fun_type == WRITE_16) ? SZ_W : SZ_B;
- for (uint32_t chunk = 0; chunk < num_chunks; chunk++)
- {
- if (memmap[chunk].start > 0) {
- dst = cmp_ir(dst, memmap[chunk].start, adr_reg, SZ_D);
- lb_jcc = dst + 1;
- dst = jcc(dst, CC_C, dst+2);
- }
- if (memmap[chunk].end < 0x1000000) {
- dst = cmp_ir(dst, memmap[chunk].end, adr_reg, SZ_D);
- ub_jcc = dst + 1;
- dst = jcc(dst, CC_NC, dst+2);
- }
-
- if (memmap[chunk].mask != 0xFFFFFF) {
- dst = and_ir(dst, memmap[chunk].mask, adr_reg, SZ_D);
- }
- void * cfun;
- switch (fun_type)
- {
- case READ_16:
- cfun = memmap[chunk].read_16;
- break;
- case READ_8:
- cfun = memmap[chunk].read_8;
- break;
- case WRITE_16:
- cfun = memmap[chunk].write_16;
- break;
- case WRITE_8:
- cfun = memmap[chunk].write_8;
- break;
- default:
- cfun = NULL;
- }
- if(memmap[chunk].buffer && memmap[chunk].flags & access_flag) {
- if (memmap[chunk].flags & MMAP_PTR_IDX) {
- if (memmap[chunk].flags & MMAP_FUNC_NULL) {
- dst = cmp_irdisp8(dst, 0, CONTEXT, offsetof(m68k_context, mem_pointers) + sizeof(void*) * memmap[chunk].ptr_index, SZ_PTR);
- code_ptr not_null = dst+1;
- dst = jcc(dst, CC_NZ, dst+2);
- dst = call(dst, opts->save_context);
-#ifdef X86_64
- if (is_write) {
- if (SCRATCH2 != RDI) {
- dst = mov_rr(dst, SCRATCH2, RDI, SZ_D);
- }
- dst = mov_rr(dst, SCRATCH1, RDX, size);
- } else {
- dst = push_r(dst, CONTEXT);
- dst = mov_rr(dst, SCRATCH1, RDI, SZ_D);
- }
- dst = test_ir(dst, 8, RSP, SZ_D);
- code_ptr adjust_rsp = dst+1;
- dst = jcc(dst, CC_NZ, dst+2);
- dst = call(dst, cfun);
- code_ptr no_adjust = dst+1;
- dst = jmp(dst, dst+2);
- *adjust_rsp = dst - (adjust_rsp + 1);
- dst = sub_ir(dst, 8, RSP, SZ_PTR);
- dst = call(dst, cfun);
- dst = add_ir(dst, 8, RSP, SZ_PTR);
- *no_adjust = dst - (no_adjust + 1);
-#else
- if (is_write) {
- dst = push_r(dst, SCRATCH1);
- } else {
- dst = push_r(dst, CONTEXT);//save CONTEXT for later
- }
- dst = push_r(dst, CONTEXT);
- dst = push_r(dst, is_write ? SCRATCH2 : SCRATCH1);
- dst = call(dst, cfun);
- dst = add_ir(dst, is_write ? 12 : 8, RSP, SZ_D);
-#endif
- if (is_write) {
- dst = mov_rr(dst, RAX, CONTEXT, SZ_PTR);
- } else {
- dst = pop_r(dst, CONTEXT);
- dst = mov_rr(dst, RAX, SCRATCH1, size);
- }
- dst = jmp(dst, opts->load_context);
-
- *not_null = dst - (not_null + 1);
- }
- if (size == SZ_B) {
- dst = xor_ir(dst, 1, adr_reg, SZ_D);
- }
- dst = add_rdisp8r(dst, CONTEXT, offsetof(m68k_context, mem_pointers) + sizeof(void*) * memmap[chunk].ptr_index, adr_reg, SZ_PTR);
- if (is_write) {
- dst = mov_rrind(dst, SCRATCH1, SCRATCH2, size);
-
- } else {
- dst = mov_rindr(dst, SCRATCH1, SCRATCH1, size);
- }
- } else {
- uint8_t tmp_size = size;
- if (size == SZ_B) {
- if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) {
- dst = bt_ir(dst, 0, adr_reg, SZ_D);
- code_ptr good_addr = dst + 1;
- dst = jcc(dst, (memmap[chunk].flags & MMAP_ONLY_ODD) ? CC_C : CC_NC, dst+2);
- if (!is_write) {
- dst = mov_ir(dst, 0xFF, SCRATCH1, SZ_B);
- }
- dst = retn(dst);
- *good_addr = dst - (good_addr + 1);
- dst = shr_ir(dst, 1, adr_reg, SZ_D);
- } else {
- dst = xor_ir(dst, 1, adr_reg, SZ_D);
- }
- } else if ((memmap[chunk].flags & MMAP_ONLY_ODD) || (memmap[chunk].flags & MMAP_ONLY_EVEN)) {
- tmp_size = SZ_B;
- dst = shr_ir(dst, 1, adr_reg, SZ_D);
- if ((memmap[chunk].flags & MMAP_ONLY_EVEN) && is_write) {
- dst = shr_ir(dst, 8, SCRATCH1, SZ_W);
- }
- }
- if ((intptr_t)memmap[chunk].buffer <= 0x7FFFFFFF && (intptr_t)memmap[chunk].buffer >= -2147483648) {
- if (is_write) {
- dst = mov_rrdisp32(dst, SCRATCH1, SCRATCH2, (intptr_t)memmap[chunk].buffer, tmp_size);
- } else {
- dst = mov_rdisp32r(dst, SCRATCH1, (intptr_t)memmap[chunk].buffer, SCRATCH1, tmp_size);
- }
- } else {
- if (is_write) {
- if (memmap[chunk].flags & MMAP_CODE) {
- dst = push_r(dst, SCRATCH2);
- }
- dst = push_r(dst, SCRATCH1);
- dst = mov_ir(dst, (intptr_t)memmap[chunk].buffer, SCRATCH1, SZ_PTR);
- dst = add_rr(dst, SCRATCH1, SCRATCH2, SZ_PTR);
- dst = pop_r(dst, SCRATCH1);
- dst = mov_rrind(dst, SCRATCH1, SCRATCH2, tmp_size);
- if (memmap[chunk].flags & MMAP_CODE) {
- dst = pop_r(dst, SCRATCH2);
- }
- } else {
- dst = mov_ir(dst, (intptr_t)memmap[chunk].buffer, SCRATCH2, SZ_PTR);
- dst = mov_rindexr(dst, SCRATCH2, SCRATCH1, 0, SCRATCH1, tmp_size);
- }
- }
- if (size != tmp_size && !is_write) {
- if (memmap[chunk].flags & MMAP_ONLY_EVEN) {
- dst = shl_ir(dst, 8, SCRATCH1, SZ_W);
- dst = mov_ir(dst, 0xFF, SCRATCH1, SZ_B);
- } else {
- dst = or_ir(dst, 0xFF00, SCRATCH1, SZ_W);
- }
- }
- }
- if (is_write && (memmap[chunk].flags & MMAP_CODE)) {
- dst = mov_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- dst = shr_ir(dst, 11, SCRATCH1, SZ_D);
- dst = bt_rrdisp32(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, ram_code_flags), SZ_D);
- code_ptr not_code = dst+1;
- dst = jcc(dst, CC_NC, dst+2);
- dst = call(dst, opts->save_context);
-#ifdef X86_32
- dst = push_r(dst, CONTEXT);
- dst = push_r(dst, SCRATCH2);
-#endif
- dst = call(dst, (code_ptr)m68k_handle_code_write);
-#ifdef X86_32
- dst = add_ir(dst, 8, RSP, SZ_D);
-#endif
- dst = mov_rr(dst, RAX, CONTEXT, SZ_PTR);
- dst = call(dst, opts->load_context);
- *not_code = dst - (not_code+1);
- }
- dst = retn(dst);
- } else if (cfun) {
- dst = call(dst, opts->save_context);
-#ifdef X86_64
- if (is_write) {
- if (SCRATCH2 != RDI) {
- dst = mov_rr(dst, SCRATCH2, RDI, SZ_D);
- }
- dst = mov_rr(dst, SCRATCH1, RDX, size);
- } else {
- dst = push_r(dst, CONTEXT);
- dst = mov_rr(dst, SCRATCH1, RDI, SZ_D);
- }
- dst = test_ir(dst, 8, RSP, SZ_D);
- code_ptr adjust_rsp = dst+1;
- dst = jcc(dst, CC_NZ, dst+2);
- dst = call(dst, cfun);
- code_ptr no_adjust = dst+1;
- dst = jmp(dst, dst+2);
- *adjust_rsp = dst - (adjust_rsp + 1);
- dst = sub_ir(dst, 8, RSP, SZ_PTR);
- dst = call(dst, cfun);
- dst = add_ir(dst, 8, RSP, SZ_PTR);
- *no_adjust = dst - (no_adjust+1);
-#else
- if (is_write) {
- dst = push_r(dst, SCRATCH1);
- } else {
- dst = push_r(dst, CONTEXT);//save CONTEXT for later
- }
- dst = push_r(dst, CONTEXT);
- dst = push_r(dst, is_write ? SCRATCH2 : SCRATCH1);
- dst = call(dst, cfun);
- dst = add_ir(dst, is_write ? 12 : 8, RSP, SZ_D);
-#endif
- if (is_write) {
- dst = mov_rr(dst, RAX, CONTEXT, SZ_PTR);
- } else {
- dst = pop_r(dst, CONTEXT);
- dst = mov_rr(dst, RAX, SCRATCH1, size);
- }
- dst = jmp(dst, opts->load_context);
- } else {
- //Not sure the best course of action here
- if (!is_write) {
- dst = mov_ir(dst, size == SZ_B ? 0xFF : 0xFFFF, SCRATCH1, size);
- }
- dst = retn(dst);
- }
- if (lb_jcc) {
- *lb_jcc = dst - (lb_jcc+1);
- lb_jcc = NULL;
- }
- if (ub_jcc) {
- *ub_jcc = dst - (ub_jcc+1);
- ub_jcc = NULL;
- }
- }
- if (!is_write) {
- dst = mov_ir(dst, size == SZ_B ? 0xFF : 0xFFFF, SCRATCH1, size);
- }
- dst = retn(dst);
- opts->cur_code = dst;
- return start;
-}
-
-void init_x86_68k_opts(x86_68k_options * opts, memmap_chunk * memmap, uint32_t num_chunks)
-{
- memset(opts, 0, sizeof(*opts));
- for (int i = 0; i < 8; i++)
- opts->dregs[i] = opts->aregs[i] = -1;
-#ifdef X86_64
- opts->dregs[0] = R10;
- opts->dregs[1] = R11;
- opts->dregs[2] = R12;
- opts->dregs[3] = R8;
- opts->aregs[0] = R13;
- opts->aregs[1] = R14;
- opts->aregs[2] = R9;
- opts->aregs[7] = R15;
-
- opts->flag_regs[0] = -1;
- opts->flag_regs[1] = RBX;
- opts->flag_regs[2] = RDX;
- opts->flag_regs[3] = BH;
- opts->flag_regs[4] = DH;
-#else
- opts->dregs[0] = RDX;
- opts->aregs[7] = RDI;
-
- for (int i = 0; i < 5; i++)
- opts->flag_regs[i] = -1;
-#endif
-
-
- opts->gen.native_code_map = malloc(sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
- memset(opts->gen.native_code_map, 0, sizeof(native_map_slot) * NATIVE_MAP_CHUNKS);
- opts->gen.deferred = NULL;
- size_t size = 1024 * 1024;
- opts->gen.cur_code = alloc_code(&size);
- opts->gen.code_end = opts->gen.cur_code + size;
- opts->gen.ram_inst_sizes = malloc(sizeof(code_ptr) * 64);
- memset(opts->gen.ram_inst_sizes, 0, sizeof(code_ptr) * 64);
-
- code_ptr dst = opts->gen.cur_code;
-
- opts->gen.save_context = dst;
- for (int i = 0; i < 5; i++)
- if (opts->flag_regs[i] >= 0) {
- dst = mov_rrdisp8(dst, opts->flag_regs[i], CONTEXT, offsetof(m68k_context, flags) + i, SZ_B);
- }
- for (int i = 0; i < 8; i++)
- {
- if (opts->dregs[i] >= 0) {
- dst = mov_rrdisp8(dst, opts->dregs[i], CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t) * i, SZ_D);
- }
- if (opts->aregs[i] >= 0) {
- dst = mov_rrdisp8(dst, opts->aregs[i], CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, SZ_D);
- }
- }
- dst = mov_rrdisp8(dst, CYCLES, CONTEXT, offsetof(m68k_context, current_cycle), SZ_D);
- dst = retn(dst);
-
- opts->gen.load_context = dst;
- for (int i = 0; i < 5; i++)
- if (opts->flag_regs[i] >= 0) {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, flags) + i, opts->flag_regs[i], SZ_B);
- }
- for (int i = 0; i < 8; i++)
- {
- if (opts->dregs[i] >= 0) {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, dregs) + sizeof(uint32_t) * i, opts->dregs[i], SZ_D);
- }
- if (opts->aregs[i] >= 0) {
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * i, opts->aregs[i], SZ_D);
- }
- }
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, current_cycle), CYCLES, SZ_D);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, target_cycle), LIMIT, SZ_D);
- dst = retn(dst);
-
- opts->start_context = (start_fun)dst;
-#ifdef X86_64
- if (SCRATCH2 != RDI) {
- dst = mov_rr(dst, RDI, SCRATCH2, SZ_PTR);
- }
- //save callee save registers
- dst = push_r(dst, RBP);
- dst = push_r(dst, R12);
- dst = push_r(dst, R13);
- dst = push_r(dst, R14);
- dst = push_r(dst, R15);
-#else
- //save callee save registers
- dst = push_r(dst, RBP);
- dst = push_r(dst, RBX);
- dst = push_r(dst, RSI);
- dst = push_r(dst, RDI);
-
- dst = mov_rdisp8r(dst, RSP, 20, SCRATCH2, SZ_D);
- dst = mov_rdisp8r(dst, RSP, 24, CONTEXT, SZ_D);
-#endif
- dst = call(dst, opts->gen.load_context);
- dst = call_r(dst, SCRATCH2);
- dst = call(dst, opts->gen.save_context);
-#ifdef X86_64
- //restore callee save registers
- dst = pop_r(dst, R15);
- dst = pop_r(dst, R14);
- dst = pop_r(dst, R13);
- dst = pop_r(dst, R12);
- dst = pop_r(dst, RBP);
-#else
- dst = pop_r(dst, RDI);
- dst = pop_r(dst, RSI);
- dst = pop_r(dst, RBX);
- dst = pop_r(dst, RBP);
-#endif
- dst = retn(dst);
-
- opts->native_addr = dst;
- dst = call(dst, opts->gen.save_context);
- dst = push_r(dst, CONTEXT);
-#ifdef X86_64
- dst = mov_rr(dst, CONTEXT, RDI, SZ_PTR); //move context to 1st arg reg
- dst = mov_rr(dst, SCRATCH1, RSI, SZ_D); //move address to 2nd arg reg
-#else
- dst = push_r(dst, SCRATCH1);
- dst = push_r(dst, CONTEXT);
-#endif
- dst = call(dst, (code_ptr)get_native_address_trans);
-#ifdef X86_32
- dst = add_ir(dst, 8, RSP, SZ_D);
-#endif
- dst = mov_rr(dst, RAX, SCRATCH1, SZ_PTR); //move result to scratch reg
- dst = pop_r(dst, CONTEXT);
- dst = call(dst, opts->gen.load_context);
- dst = retn(dst);
-
- opts->native_addr_and_sync = dst;
- dst = call(dst, opts->gen.save_context);
- dst = push_r(dst, SCRATCH1);
-#ifdef X86_64
- dst = mov_rr(dst, CONTEXT, RDI, SZ_PTR);
- dst = xor_rr(dst, RSI, RSI, SZ_D);
- dst = test_ir(dst, 8, RSP, SZ_PTR); //check stack alignment
- code_ptr do_adjust_rsp = dst+1;
- dst = jcc(dst, CC_NZ, dst+2);
- dst = call(dst, (code_ptr)sync_components);
- code_ptr no_adjust_rsp = dst+1;
- dst = jmp(dst, dst+2);
- *do_adjust_rsp = dst - (do_adjust_rsp+1);
- dst = sub_ir(dst, 8, RSP, SZ_PTR);
- dst = call(dst, (code_ptr)sync_components);
- dst = add_ir(dst, 8, RSP, SZ_PTR);
- *no_adjust_rsp = dst - (no_adjust_rsp+1);
- dst = pop_r(dst, RSI);
- dst = push_r(dst, RAX);
- dst = mov_rr(dst, RAX, RDI, SZ_PTR);
- dst = call(dst, (code_ptr)get_native_address_trans);
-#else
- //TODO: Add support for pushing a constant in gen_x86
- dst = xor_rr(dst, RAX, RAX, SZ_D);
- dst = push_r(dst, RAX);
- dst = push_r(dst, CONTEXT);
- dst = call(dst, (code_ptr)sync_components);
- dst = add_ir(dst, 8, RSP, SZ_D);
- dst = pop_r(dst, RSI); //restore saved address from SCRATCH1
- dst = push_r(dst, RAX); //save context pointer for later
- dst = push_r(dst, RSI); //2nd arg -- address
- dst = push_r(dst, RAX); //1st arg -- context pointer
- dst = call(dst, (code_ptr)get_native_address_trans);
- dst = add_ir(dst, 8, RSP, SZ_D);
-#endif
-
- dst = mov_rr(dst, RAX, SCRATCH1, SZ_PTR); //move result to scratch reg
- dst = pop_r(dst, CONTEXT);
- dst = call(dst, opts->gen.load_context);
- dst = retn(dst);
-
- opts->gen.handle_cycle_limit = dst;
- dst = cmp_rdisp8r(dst, CONTEXT, offsetof(m68k_context, sync_cycle), CYCLES, SZ_D);
- code_ptr skip_sync = dst+1;
- dst = jcc(dst, CC_C, dst+2);
- opts->do_sync = dst;
- dst = push_r(dst, SCRATCH1);
- dst = push_r(dst, SCRATCH2);
- dst = call(dst, opts->gen.save_context);
-#ifdef X86_64
- dst = mov_rr(dst, CONTEXT, RDI, SZ_PTR);
- dst = xor_rr(dst, RSI, RSI, SZ_D);
- dst = test_ir(dst, 8, RSP, SZ_D);
- code_ptr adjust_rsp = dst+1;
- dst = jcc(dst, CC_NZ, dst+2);
- dst = call(dst, (code_ptr)sync_components);
- code_ptr no_adjust = dst+1;
- dst = jmp(dst, dst+2);
- *adjust_rsp = dst - (adjust_rsp + 1);
- dst = sub_ir(dst, 8, RSP, SZ_PTR);
- dst = call(dst, (code_ptr)sync_components);
- dst = add_ir(dst, 8, RSP, SZ_PTR);
- *no_adjust = dst - (no_adjust+1);
-#else
- //TODO: Add support for pushing a constant in gen_x86
- dst = xor_rr(dst, RAX, RAX, SZ_D);
- dst = push_r(dst, RAX);
- dst = push_r(dst, CONTEXT);
- dst = call(dst, (code_ptr)sync_components);
- dst = add_ir(dst, 8, RSP, SZ_D);
-#endif
- dst = mov_rr(dst, RAX, CONTEXT, SZ_PTR);
- dst = call(dst, opts->gen.load_context);
- dst = pop_r(dst, SCRATCH2);
- dst = pop_r(dst, SCRATCH1);
- *skip_sync = dst - (skip_sync+1);
- dst = retn(dst);
-
- opts->gen.cur_code = dst;
-
- opts->read_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_16);
- opts->read_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, READ_8);
- opts->write_16 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_16);
- opts->write_8 = gen_mem_fun(&opts->gen, memmap, num_chunks, WRITE_8);
-
- dst = opts->gen.cur_code;
-
- opts->read_32 = dst;
- dst = push_r(dst, SCRATCH1);
- dst = call(dst, opts->read_16);
- dst = mov_rr(dst, SCRATCH1, SCRATCH2, SZ_W);
- dst = pop_r(dst, SCRATCH1);
- dst = push_r(dst, SCRATCH2);
- dst = add_ir(dst, 2, SCRATCH1, SZ_D);
- dst = call(dst, opts->read_16);
- dst = pop_r(dst, SCRATCH2);
- dst = movzx_rr(dst, SCRATCH1, SCRATCH1, SZ_W, SZ_D);
- dst = shl_ir(dst, 16, SCRATCH2, SZ_D);
- dst = or_rr(dst, SCRATCH2, SCRATCH1, SZ_D);
- dst = retn(dst);
-
- opts->write_32_lowfirst = dst;
- dst = push_r(dst, SCRATCH2);
- dst = push_r(dst, SCRATCH1);
- dst = add_ir(dst, 2, SCRATCH2, SZ_D);
- dst = call(dst, opts->write_16);
- dst = pop_r(dst, SCRATCH1);
- dst = pop_r(dst, SCRATCH2);
- dst = shr_ir(dst, 16, SCRATCH1, SZ_D);
- dst = jmp(dst, opts->write_16);
-
- opts->write_32_highfirst = dst;
- dst = push_r(dst, SCRATCH1);
- dst = push_r(dst, SCRATCH2);
- dst = shr_ir(dst, 16, SCRATCH1, SZ_D);
- dst = call(dst, opts->write_16);
- dst = pop_r(dst, SCRATCH2);
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, 2, SCRATCH2, SZ_D);
- dst = jmp(dst, opts->write_16);
-
- opts->get_sr = dst;
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, status), SCRATCH1, SZ_B);
- dst = shl_ir(dst, 8, SCRATCH1, SZ_W);
- if (opts->flag_regs[FLAG_X] >= 0) {
- dst = mov_rr(dst, opts->flag_regs[FLAG_X], SCRATCH1, SZ_B);
- } else {
- int8_t offset = offsetof(m68k_context, flags);
- if (offset) {
- dst = mov_rdisp8r(dst, CONTEXT, offset, SCRATCH1, SZ_B);
- } else {
- dst = mov_rindr(dst, CONTEXT, SCRATCH1, SZ_B);
- }
- }
- for (int flag = FLAG_N; flag <= FLAG_C; flag++)
- {
- dst = shl_ir(dst, 1, SCRATCH1, SZ_B);
- if (opts->flag_regs[flag] >= 0) {
- dst = or_rr(dst, opts->flag_regs[flag], SCRATCH1, SZ_B);
- } else {
- dst = or_rdisp8r(dst, CONTEXT, offsetof(m68k_context, flags) + flag, SCRATCH1, SZ_B);
- }
- }
- dst = retn(dst);
-
- opts->set_sr = dst;
- for (int flag = FLAG_C; flag >= FLAG_X; flag--)
- {
- dst = rcr_ir(dst, 1, SCRATCH1, SZ_B);
- if (opts->flag_regs[flag] >= 0) {
- dst = setcc_r(dst, CC_C, opts->flag_regs[flag]);
- } else {
- int8_t offset = offsetof(m68k_context, flags) + flag;
- if (offset) {
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, offset);
- } else {
- dst = setcc_rind(dst, CC_C, CONTEXT);
- }
- }
- }
- dst = shr_ir(dst, 8, SCRATCH1, SZ_W);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, status), SZ_B);
- dst = retn(dst);
-
- opts->set_ccr = dst;
- for (int flag = FLAG_C; flag >= FLAG_X; flag--)
- {
- dst = rcr_ir(dst, 1, SCRATCH1, SZ_B);
- if (opts->flag_regs[flag] >= 0) {
- dst = setcc_r(dst, CC_C, opts->flag_regs[flag]);
- } else {
- int8_t offset = offsetof(m68k_context, flags) + flag;
- if (offset) {
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, offset);
- } else {
- dst = setcc_rind(dst, CC_C, CONTEXT);
- }
- }
- }
- dst = retn(dst);
-
- opts->gen.handle_cycle_limit_int = dst;
- dst = cmp_rdisp8r(dst, CONTEXT, offsetof(m68k_context, int_cycle), CYCLES, SZ_D);
- code_ptr do_int = dst+1;
- dst = jcc(dst, CC_NC, dst+2);
- dst = cmp_rdisp8r(dst, CONTEXT, offsetof(m68k_context, sync_cycle), CYCLES, SZ_D);
- skip_sync = dst+1;
- dst = jcc(dst, CC_C, dst+2);
- dst = call(dst, opts->gen.save_context);
-#ifdef X86_64
- dst = mov_rr(dst, CONTEXT, RDI, SZ_PTR);
- dst = mov_rr(dst, SCRATCH1, RSI, SZ_D);
- dst = test_ir(dst, 8, RSP, SZ_D);
- adjust_rsp = dst+1;
- dst = jcc(dst, CC_NZ, dst+2);
- dst = call(dst, (code_ptr)sync_components);
- no_adjust = dst+1;
- dst = jmp(dst, dst+2);
- *adjust_rsp = dst - (adjust_rsp + 1);
- dst = sub_ir(dst, 8, RSP, SZ_PTR);
- dst = call(dst, (code_ptr)sync_components);
- dst = add_ir(dst, 8, RSP, SZ_PTR);
- *no_adjust = dst - (no_adjust+1);
-#else
- dst = push_r(dst, SCRATCH1);
- dst = push_r(dst, CONTEXT);
- dst = call(dst, (code_ptr)sync_components);
- dst = add_ir(dst, 8, RSP, SZ_D);
-#endif
- dst = mov_rr(dst, RAX, CONTEXT, SZ_PTR);
- dst = jmp(dst, opts->gen.load_context);
- *skip_sync = dst - (skip_sync+1);
- dst = retn(dst);
- *do_int = dst - (do_int+1);
- //set target cycle to sync cycle
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, sync_cycle), LIMIT, SZ_D);
- //swap USP and SSP if not already in supervisor mode
- dst = bt_irdisp8(dst, 5, CONTEXT, offsetof(m68k_context, status), SZ_B);
- code_ptr already_supervisor = dst+1;
- dst = jcc(dst, CC_C, dst+2);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SCRATCH2, SZ_D);
- dst = mov_rrdisp8(dst, opts->aregs[7], CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
- dst = mov_rr(dst, SCRATCH2, opts->aregs[7], SZ_D);
- *already_supervisor = dst - (already_supervisor+1);
- //save PC
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_lowfirst);
- //save status register
- dst = sub_ir(dst, 2, opts->aregs[7], SZ_D);
- dst = call(dst, opts->get_sr);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_16);
- //update status register
- dst = and_irdisp8(dst, 0xF8, CONTEXT, offsetof(m68k_context, status), SZ_B);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, int_num), SCRATCH1, SZ_B);
- dst = or_ir(dst, 0x20, SCRATCH1, SZ_B);
- dst = or_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, status), SZ_B);
- //calculate interrupt vector address
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, int_num), SCRATCH1, SZ_D);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(m68k_context, int_ack), SZ_W);
- dst = shl_ir(dst, 2, SCRATCH1, SZ_D);
- dst = add_ir(dst, 0x60, SCRATCH1, SZ_D);
- dst = call(dst, opts->read_32);
- dst = call(dst, opts->native_addr_and_sync);
- dst = cycles(dst, 24);
- //discard function return address
- dst = pop_r(dst, SCRATCH2);
- dst = jmp_r(dst, SCRATCH1);
-
- opts->trap = dst;
- dst = push_r(dst, SCRATCH2);
- //swap USP and SSP if not already in supervisor mode
- dst = bt_irdisp8(dst, 5, CONTEXT, offsetof(m68k_context, status), SZ_B);
- already_supervisor = dst+1;
- dst = jcc(dst, CC_C, dst+2);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SCRATCH2, SZ_D);
- dst = mov_rrdisp8(dst, opts->aregs[7], CONTEXT, offsetof(m68k_context, aregs) + sizeof(uint32_t) * 8, SZ_D);
- dst = mov_rr(dst, SCRATCH2, opts->aregs[7], SZ_D);
- *already_supervisor = dst - (already_supervisor+1);
- //save PC
- dst = sub_ir(dst, 4, opts->aregs[7], SZ_D);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_32_lowfirst);
- //save status register
- dst = sub_ir(dst, 2, opts->aregs[7], SZ_D);
- dst = call(dst, opts->get_sr);
- dst = mov_rr(dst, opts->aregs[7], SCRATCH2, SZ_D);
- dst = call(dst, opts->write_16);
- //set supervisor bit
- dst = or_irdisp8(dst, 0x20, CONTEXT, offsetof(m68k_context, status), SZ_B);
- //calculate vector address
- dst = pop_r(dst, SCRATCH1);
- dst = shl_ir(dst, 2, SCRATCH1, SZ_D);
- dst = call(dst, opts->read_32);
- dst = call(dst, opts->native_addr_and_sync);
- dst = cycles(dst, 18);
- dst = jmp_r(dst, SCRATCH1);
-
- opts->gen.cur_code = dst;
-}
-
-void init_68k_context(m68k_context * context, native_map_slot * native_code_map, void * opts)
-{
- memset(context, 0, sizeof(m68k_context));
- context->native_code_map = native_code_map;
- context->options = opts;
- context->int_cycle = 0xFFFFFFFF;
- context->status = 0x27;
-}
-
diff --git a/render_sdl.c b/render_sdl.c
index e5f5c96..f50c476 100644
--- a/render_sdl.c
+++ b/render_sdl.c
@@ -220,8 +220,6 @@ void render_init(int width, int height, char * title, uint32_t fps, uint8_t full
fprintf(stderr, "Unable to init SDL: %s\n", SDL_GetError());
exit(1);
}
- atexit(SDL_Quit);
- atexit(render_close_audio);
printf("width: %d, height: %d\n", width, height);
uint32_t flags = SDL_ANYFORMAT;
@@ -250,10 +248,12 @@ void render_init(int width, int height, char * title, uint32_t fps, uint8_t full
screen = SDL_SetVideoMode(width, height, 32, flags);
if (!screen) {
fprintf(stderr, "Unable to get SDL surface: %s\n", SDL_GetError());
+ SDL_Quit();
exit(1);
}
if (!use_gl && screen->format->BytesPerPixel != 2 && screen->format->BytesPerPixel != 4) {
fprintf(stderr, "BlastEm requires a 16-bit or 32-bit surface, SDL returned a %d-bit surface\n", screen->format->BytesPerPixel * 8);
+ SDL_Quit();
exit(1);
}
#ifndef DISABLE_OPENGL
@@ -263,10 +263,12 @@ void render_init(int width, int height, char * title, uint32_t fps, uint8_t full
GLenum res = glewInit();
if (res != GLEW_OK) {
fprintf(stderr, "Initialization of GLEW failed with code %d\n", res);
+ SDL_Quit();
exit(1);
}
if (!GLEW_VERSION_2_0) {
fputs("OpenGL 2.0 is unable, falling back to standard SDL rendering\n", stderr);
+ SDL_Quit();
exit(1);
}
float aspect = (float)width / height;
@@ -327,6 +329,7 @@ void render_init(int width, int height, char * title, uint32_t fps, uint8_t full
if (SDL_OpenAudio(&desired, &actual) < 0) {
fprintf(stderr, "Unable to open SDL audio: %s\n", SDL_GetError());
+ SDL_Quit();
exit(1);
}
buffer_samples = actual.samples;
@@ -345,6 +348,9 @@ void render_init(int width, int height, char * title, uint32_t fps, uint8_t full
}
}
SDL_JoystickEventState(SDL_ENABLE);
+
+ atexit(SDL_Quit);
+ atexit(render_close_audio);
}
#ifndef DISABLE_OPENGL
void render_context_gl(vdp_context * context)
@@ -364,7 +370,7 @@ void render_context_gl(vdp_context * context)
glBindTexture(GL_TEXTURE_2D, (context->regs[REG_MODE_4] & BIT_INTERLACE) ? textures[1] : textures[2]);
glUniform1i(un_textures[1], 1);
- glUniform1f(un_width, context->latched_mode & BIT_H40 ? 320.0f : 256.0f);
+ glUniform1f(un_width, context->regs[REG_MODE_4] & BIT_H40 ? 320.0f : 256.0f);
glBindBuffer(GL_ARRAY_BUFFER, buffers[0]);
glVertexAttribPointer(at_pos, 2, GL_FLOAT, GL_FALSE, sizeof(GLfloat[2]), (void *)0);
diff --git a/runtime.S b/runtime.S
index caac541..5595eb1 100644
--- a/runtime.S
+++ b/runtime.S
@@ -1,62 +1,16 @@
- .global bcd_add
-bcd_add:
- xchg %rax, %rdi
-
- mov %cl, %ch
- mov %al, %ah
- and $0xF, %ch
- and $0xF, %ah
- and $0xF0, %cl
- and $0xF0, %al
- add %ah, %ch
- cmp $10, %ch
- jb no_adjust
- add $6, %ch
-no_adjust:
- add %ch, %al
- add %al, %cl
- mov $0, %ch
- jc def_adjust
- cmp $0xA0, %cl
- jb no_adjust_h
-def_adjust:
- add $0x60, %cl
- mov $1, %ch
-no_adjust_h:
-
- mov %rdi, %rax
- ret
-
- .global bcd_sub
-bcd_sub:
- xchg %rax, %rdi
-
- mov %cl, %ch
- mov %al, %ah
- and $0xF, %ch
- and $0xF, %ah
- and $0xF0, %cl
- and $0xF0, %al
- sub %ah, %ch
- cmp $10, %ch
- jb no_adjusts
- sub $6, %ch
-no_adjusts:
- add %ch, %cl
- sub %al, %cl
- mov $0, %ch
- jc def_adjusts
- cmp $0xA0, %cl
- jb no_adjust_hs
-def_adjusts:
- sub $0x60, %cl
- mov $1, %ch
-no_adjust_hs:
-
- mov %rdi, %rax
- ret
+invalid_msg:
+ .asciz "Invalid instruction at %X\n"
+
+ .global m68k_invalid
+m68k_invalid:
+ lea invalid_msg(%rip), %rdi
+ mov %ecx, %esi
+ xor %rax, %rax
+ call printf
+ mov $1, %rdi
+ call exit
diff --git a/runtime_32.S b/runtime_32.S
index 1f0c503..50117f1 100644
--- a/runtime_32.S
+++ b/runtime_32.S
@@ -12,63 +12,6 @@ m68k_invalid:
push $1
call exit
- .global bcd_add
-bcd_add:
- xchg %eax, %edi
-
- mov %cl, %ch
- mov %al, %ah
- and $0xF, %ch
- and $0xF, %ah
- and $0xF0, %cl
- and $0xF0, %al
- add %ah, %ch
- cmp $10, %ch
- jb no_adjust
- add $6, %ch
-no_adjust:
- add %ch, %al
- add %al, %cl
- mov $0, %ch
- jc def_adjust
- cmp $0xA0, %cl
- jb no_adjust_h
-def_adjust:
- add $0x60, %cl
- mov $1, %ch
-no_adjust_h:
-
- mov %edi, %eax
- ret
-
- .global bcd_sub
-bcd_sub:
- xchg %eax, %edi
-
- mov %cl, %ch
- mov %al, %ah
- and $0xF, %ch
- and $0xF, %ah
- and $0xF0, %cl
- and $0xF0, %al
- sub %ah, %ch
- cmp $10, %ch
- jb no_adjusts
- sub $6, %ch
-no_adjusts:
- add %ch, %cl
- sub %al, %cl
- mov $0, %ch
- jc def_adjusts
- cmp $0xA0, %cl
- jb no_adjust_hs
-def_adjusts:
- sub $0x60, %cl
- mov $1, %ch
-no_adjust_hs:
-
- mov %edi, %eax
- ret
diff --git a/tern.c b/tern.c
index f61e2aa..73ea08d 100644
--- a/tern.c
+++ b/tern.c
@@ -122,4 +122,14 @@ tern_node * tern_insert_ptr(tern_node * head, char * key, void * value)
return tern_insert(head, key, val);
}
-
+char * tern_int_key(uint32_t key, char * buf)
+{
+ char * cur = buf;
+ while (key)
+ {
+ *(cur++) = (key & 0x7F) + 1;
+ key >>= 7;
+ }
+ *cur = 0;
+ return buf;
+}
diff --git a/tern.h b/tern.h
index e95e0c9..cdf6948 100644
--- a/tern.h
+++ b/tern.h
@@ -8,6 +8,8 @@
#include <stdint.h>
+#define MAX_INT_KEY_SIZE (sizeof(uint32_t) + 2)
+
typedef union {
void *ptrval;
intptr_t intval;
@@ -31,5 +33,6 @@ tern_node * tern_insert_int(tern_node * head, char * key, intptr_t value);
void * tern_find_ptr_default(tern_node * head, char * key, void * def);
void * tern_find_ptr(tern_node * head, char * key);
tern_node * tern_insert_ptr(tern_node * head, char * key, void * value);
+char * tern_int_key(uint32_t key, char * buf);
#endif //TERN_H_
diff --git a/test_x86.c b/test_x86.c
index e13a814..588ef5d 100644
--- a/test_x86.c
+++ b/test_x86.c
@@ -1,10 +1,10 @@
/*
Copyright 2013 Michael Pavone
- This file is part of BlastEm.
+ This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#include "gen_x86.h"
-#include "m68k_to_x86.h"
+#include "m68k_core.h"
#include <stdio.h>
#include <stddef.h>
diff --git a/testcases.txt b/testcases.txt
index b7f7d34..c0f2692 100644
--- a/testcases.txt
+++ b/testcases.txt
@@ -1,87 +1,88 @@
Name Sizes Src Modes Dst Modes
-#add bwl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#add bwl d (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#adda wl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) a
-#addi bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#addq bwl #(1-8) d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#addx bwl d d
-#addx bwl -(a) -(a)
-#and bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#and bwl d (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#andi bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#asl bwl d;#(1-8) d
-#asr bwl d;#(1-8) d
-#lsl bwl d;#(1-8) d
-#lsr bwl d;#(1-8) d
-#sub bwl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#sub bwl d (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#suba wl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) a
-#subi bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#subq bwl #(1-8) d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#subx bwl d d
-#subx bwl -(a) -(a)
-#bchg b d;#(0-255) (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#bchg l d;#(0-255) d
-#bset b d;#(0-255) (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#bset l d;#(0-255) d
-#bclr b d;#(0-255) (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#bclr l d;#(0-255) d
-#btst b d;#(0-255) (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#btst l d;#(0-255) d
-#rol bwl d;#(1-8) d
-#ror bwl d;#(1-8) d
-#abcd b d d
-#abcd b -(a) -(a)
-#sbcd b d d
-#sbcd b -(a) -(a)
-#muls w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#mulu w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#move bwl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#movea wl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) a
-#moveq l #(-128-127) d
-#roxl bwl d;#(1-8) d
-#roxr bwl d;#(1-8) d
-#divs w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#divu w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#chk w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#cmp bwl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#cmpa wl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) a
-#cmpi bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#cmpm bwl (a)+ (a)+
-#eor bwl d d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#eori bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#exg l d d;a
-#exg l a a
-#link w a #n
-#or bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
-#or bwl d (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#ori bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#clr bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#ext wl d
-#neg bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#negx bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#not bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#pea l (a);(n,a);(n,a,x);(n).w;(n).l;(n,pc);(n,pc,x)
-#rol w (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#ror w (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#roxl w (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#roxr w (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#st b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#sf b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#shi b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#sls b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#scc b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#scs b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#sne b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#seq b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#svc b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#svs b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#spl b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#smi b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#sge b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#slt b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#sgt b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#sle b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
-#swap w d
+add bwl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+add bwl d (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+adda wl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) a
+addi bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+addq bwl #(1-8) d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+addx bwl d d
+addx bwl -(a) -(a)
+and bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+and bwl d (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+andi bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+asl bwl d;#(1-8) d
+asr bwl d;#(1-8) d
+lsl bwl d;#(1-8) d
+lsr bwl d;#(1-8) d
+sub bwl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+sub bwl d (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+suba wl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) a
+subi bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+subq bwl #(1-8) d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+subx bwl d d
+subx bwl -(a) -(a)
+bchg b d;#(0-255) (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+bchg l d;#(0-255) d
+bset b d;#(0-255) (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+bset l d;#(0-255) d
+bclr b d;#(0-255) (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+bclr l d;#(0-255) d
+btst b d;#(0-255) (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+btst l d;#(0-255) d
+rol bwl d;#(1-8) d
+ror bwl d;#(1-8) d
+abcd b d d
+abcd b -(a) -(a)
+sbcd b d d
+sbcd b -(a) -(a)
+muls w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+mulu w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+move bwl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+movea wl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) a
+moveq l #(-128-127) d
+roxl bwl d;#(1-8) d
+roxr bwl d;#(1-8) d
+divs w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+divu w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+chk w d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+cmp bwl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+cmpa wl d;a;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) a
+cmpi bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+cmpm bwl (a)+ (a)+
+eor bwl d d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+eori bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+exg l d d;a
+exg l a a
+link w a #n
+or bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l;#n;(n,pc);(n,pc,x) d
+or bwl d (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+ori bwl #n d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+clr bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+ext wl d
+neg bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+negx bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+not bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+pea l (a);(n,a);(n,a,x);(n).w;(n).l;(n,pc);(n,pc,x)
+rol w (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+ror w (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+roxl w (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+roxr w (a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+st b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+sf b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+shi b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+sls b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+scc b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+scs b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+sne b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+seq b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+svc b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+svs b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+spl b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+smi b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+sge b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+slt b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+sgt b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+sle b d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+swap w d
tst bwl d;(a);(a)+;-(a);(n,a);(n,a,x);(n).w;(n).l
+lea l (a);(n,a);(n,a,x);(n).w;(n).l;(n,pc);(n,pc,x) a
diff --git a/trans.c b/trans.c
index ac4bf22..2f98e91 100644
--- a/trans.c
+++ b/trans.c
@@ -1,10 +1,10 @@
/*
Copyright 2013 Michael Pavone
- This file is part of BlastEm.
+ This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#include "68kinst.h"
-#include "m68k_to_x86.h"
+#include "m68k_core.h"
#include "mem.h"
#include <stdio.h>
#include <stdlib.h>
@@ -24,14 +24,15 @@ int main(int argc, char ** argv)
unsigned short *filebuf;
char disbuf[1024];
unsigned short * cur;
- x86_68k_options opts;
+ m68k_options opts;
m68k_context context;
FILE * f = fopen(argv[1], "rb");
fseek(f, 0, SEEK_END);
filesize = ftell(f);
fseek(f, 0, SEEK_SET);
- filebuf = malloc(filesize > 0x400000 ? filesize : 0x400000);
- fread(filebuf, 2, filesize/2, f);
+ filebuf = malloc(0x400000);
+ memset(filebuf, 0, 0x400000);
+ fread(filebuf, 2, filesize/2 > 0x200000 ? 0x200000 : filesize/2, f);
fclose(f);
for(cur = filebuf; cur - filebuf < (filesize/2); ++cur)
{
@@ -43,14 +44,15 @@ int main(int argc, char ** argv)
memmap[0].mask = 0xFFFFFF;
memmap[0].flags = MMAP_READ;
memmap[0].buffer = filebuf;
-
+
memmap[1].start = 0xE00000;
memmap[1].end = 0x1000000;
memmap[1].mask = 0xFFFF;
memmap[1].flags = MMAP_READ | MMAP_WRITE | MMAP_CODE;
memmap[1].buffer = malloc(64 * 1024);
- init_x86_68k_opts(&opts, memmap, 2);
- init_68k_context(&context, opts.native_code_map, &opts);
+ memset(memmap[1].buffer, 0, 64 * 1024);
+ init_m68k_opts(&opts, memmap, 2);
+ init_68k_context(&context, opts.gen.native_code_map, &opts);
context.mem_pointers[0] = memmap[0].buffer;
context.mem_pointers[1] = memmap[1].buffer;
context.target_cycle = context.sync_cycle = 0x80000000;
diff --git a/transz80.c b/transz80.c
index a006a92..eec1736 100644
--- a/transz80.c
+++ b/transz80.c
@@ -38,7 +38,7 @@ int main(int argc, char ** argv)
{
long filesize;
uint8_t *filebuf;
- x86_z80_options opts;
+ z80_options opts;
z80_context context;
if (argc < 2) {
fputs("usage: transz80 zrom [cartrom]\n", stderr);
@@ -70,7 +70,7 @@ int main(int argc, char ** argv)
*cur = (*cur >> 8) | (*cur << 8);
}
}
- init_x86_z80_opts(&opts);
+ init_z80_opts(&opts);
init_z80_context(&context, &opts);
//Z80 RAM
context.mem_pointers[0] = z80_ram;
diff --git a/vdp.c b/vdp.c
index 5cf5797..710d2ab 100644
--- a/vdp.c
+++ b/vdp.c
@@ -9,8 +9,8 @@
#include <string.h>
#include "render.h"
-#define NTSC_ACTIVE 225
-#define PAL_ACTIVE 241
+#define NTSC_INACTIVE_START 224
+#define PAL_INACTIVE_START 240
#define BUF_BIT_PRIORITY 0x40
#define MAP_BIT_PRIORITY 0x8000
#define MAP_BIT_H_FLIP 0x800
@@ -22,14 +22,17 @@
#define MCLKS_SLOT_H40 16
#define MCLKS_SLOT_H32 20
-#define VINT_CYCLE_H40 (21*MCLKS_SLOT_H40+332+9*MCLKS_SLOT_H40) //21 slots before HSYNC, 16 during, 10 after
-#define VINT_CYCLE_H32 ((33+20+7)*MCLKS_SLOT_H32) //33 slots before HSYNC, 20 during, 7 after TODO: confirm final number
-#define HSYNC_SLOT_H40 21
-#define MCLK_WEIRD_END (HSYNC_SLOT_H40*MCLKS_SLOT_H40 + 332)
-#define SLOT_WEIRD_END (HSYNC_SLOT_H40+17)
+#define VINT_SLOT_H40 4 //21 slots before HSYNC, 16 during, 10 after
+#define VINT_SLOT_H32 23 //33 slots before HSYNC, 20 during, 7 after TODO: confirm final number
+#define HSYNC_SLOT_H40 240
+#define HSYNC_END_H40 (240+17)
#define HSYNC_END_H32 (33 * MCLKS_SLOT_H32)
-#define HBLANK_CLEAR_H40 (MCLK_WEIRD_END+61*4)
-#define HBLANK_CLEAR_H32 (HSYNC_END_H32 + 46*5)
+#define HBLANK_START_H40 178 //should be 179 according to Nemesis, but 178 seems to fit slightly better with my test ROM results
+#define HBLANK_END_H40 0 //should be 5.5 according to Nemesis, but 0 seems to fit better with my test ROM results
+#define HBLANK_START_H32 233 //should be 147 according to Nemesis which is very different from my test ROM result
+#define HBLANK_END_H32 0 //should be 5 according to Nemesis, but 0 seems to fit better with my test ROM results
+#define LINE_CHANGE_H40 165
+#define LINE_CHANGE_H32 132
#define FIFO_LATENCY 3
int32_t color_map[1 << 12];
@@ -45,7 +48,7 @@ uint8_t debug_base[][3] = {
uint8_t color_map_init_done;
-void init_vdp_context(vdp_context * context)
+void init_vdp_context(vdp_context * context, uint8_t region_pal)
{
memset(context, 0, sizeof(*context));
context->vdpmem = malloc(VRAM_SIZE);
@@ -132,18 +135,21 @@ void init_vdp_context(vdp_context * context)
context->debugcolors[color] = render_map_color(r, g, b);
}
}
+ if (region_pal) {
+ context->flags2 |= FLAG2_REGION_PAL;
+ }
}
int is_refresh(vdp_context * context, uint32_t slot)
{
- if (context->latched_mode & BIT_H40) {
- return (slot == 37 || slot == 69 || slot == 102 || slot == 133 || slot == 165 || slot == 197 || slot >= 210);
+ if (context->regs[REG_MODE_4] & BIT_H40) {
+ return slot == 250 || slot == 26 || slot == 59 || slot == 90 || slot == 122 || slot == 154;
} else {
//TODO: Figure out which slots are refresh when display is off in 32-cell mode
//These numbers are guesses based on H40 numbers
- return (slot == 24 || slot == 56 || slot == 88 || slot == 120 || slot == 152);
+ return slot == 243 || slot == 19 || slot == 51 || slot == 83 || slot == 115;
//The numbers below are the refresh slots during active display
- //return (slot == 66 || slot == 98 || slot == 130 || slot == 162);
+ //return (slot == 29 || slot == 61 || slot == 93 || slot == 125);
}
}
@@ -227,8 +233,8 @@ void vdp_print_reg_explain(vdp_context * context)
context->regs[REG_SCROLL_A], (context->regs[REG_SCROLL_A] & 0x38) << 10,
context->regs[REG_WINDOW], (context->regs[REG_WINDOW] & (context->regs[REG_MODE_4] & BIT_H40 ? 0x3C : 0x3E)) << 10,
context->regs[REG_SCROLL_B], (context->regs[REG_SCROLL_B] & 0x7) << 13,
- context->regs[REG_SAT], (context->regs[REG_SAT] & (context->regs[REG_MODE_4] & BIT_H40 ? 0x3E : 0x3F)) << 9,
- context->regs[REG_HSCROLL], (context->regs[REG_HSCROLL] & 0x1F) << 10);
+ context->regs[REG_SAT], (context->regs[REG_SAT] & (context->regs[REG_MODE_4] & BIT_H40 ? 0x7E : 0x7F)) << 9,
+ context->regs[REG_HSCROLL], (context->regs[REG_HSCROLL] & 0x3F) << 10);
char * sizes[] = {"32", "64", "invalid", "128"};
printf("\n**Misc Group**\n"
"07: %.2X | Backdrop Color: $%X\n"
@@ -239,11 +245,28 @@ void vdp_print_reg_explain(vdp_context * context)
context->regs[REG_HINT], context->regs[REG_HINT],
context->regs[REG_AUTOINC], context->regs[REG_AUTOINC],
context->regs[REG_SCROLL], sizes[context->regs[REG_SCROLL] & 0x3], sizes[context->regs[REG_SCROLL] >> 4 & 0x3]);
+ char * src_types[] = {"68K", "68K", "Copy", "Fill"};
+ printf("\n**DMA Group**\n"
+ "13: %.2X |\n"
+ "14: %.2X | DMA Length: $%.4X words\n"
+ "15: %.2X |\n"
+ "16: %.2X |\n"
+ "17: %.2X | DMA Source Address: $%.6X, Type: %s\n",
+ context->regs[REG_DMALEN_L],
+ context->regs[REG_DMALEN_H], context->regs[REG_DMALEN_H] << 8 | context->regs[REG_DMALEN_L],
+ context->regs[REG_DMASRC_L],
+ context->regs[REG_DMASRC_M],
+ context->regs[REG_DMASRC_H],
+ context->regs[REG_DMASRC_H] << 17 | context->regs[REG_DMASRC_M] << 9 | context->regs[REG_DMASRC_L] << 1,
+ src_types[context->regs[REG_DMASRC_H] >> 6 & 3]);
printf("\n**Internal Group**\n"
"Address: %X\n"
"CD: %X\n"
- "Pending: %s\n",
- context->address, context->cd, (context->flags & FLAG_PENDING) ? "true" : "false");
+ "Pending: %s\n"
+ "VCounter: %d\n"
+ "HCounter: %d\n",
+ context->address, context->cd, (context->flags & FLAG_PENDING) ? "true" : "false",
+ context->vcounter, context->hslot*2);
//TODO: Window Group, DMA Group
}
@@ -269,7 +292,7 @@ void scan_sprite_table(uint32_t line, vdp_context * context)
height_mult = 8;
}
context->sprite_index &= 0x7F;
- if (context->latched_mode & BIT_H40) {
+ if (context->regs[REG_MODE_4] & BIT_H40) {
if (context->sprite_index >= MAX_SPRITES_FRAME) {
context->sprite_index = 0;
return;
@@ -472,7 +495,7 @@ void run_dma_src(vdp_context * context, uint32_t slot)
case 0x40:
if (!slot || !is_refresh(context, slot-1)) {
cur = context->fifo + context->fifo_write;
- cur->cycle = context->cycles + ((context->latched_mode & BIT_H40) ? 16 : 20)*FIFO_LATENCY;
+ cur->cycle = context->cycles + ((context->regs[REG_MODE_4] & BIT_H40) ? 16 : 20)*FIFO_LATENCY;
cur->address = context->address;
cur->value = read_dma_value((context->regs[REG_DMASRC_H] << 16) | (context->regs[REG_DMASRC_M] << 8) | context->regs[REG_DMASRC_L]);
cur->cd = context->cd;
@@ -567,7 +590,7 @@ void read_map_scroll(uint16_t column, uint16_t vsram_off, uint32_t line, uint16_
if ((column >= left_col && column < right_col) || (line >= top_line && line < bottom_line)) {
uint16_t address = context->regs[REG_WINDOW] << 10;
uint16_t line_offset, offset, mask;
- if (context->latched_mode & BIT_H40) {
+ if (context->regs[REG_MODE_4] & BIT_H40) {
address &= 0xF000;
line_offset = (((line) >> vscroll_shift) * 64 * 2) & 0xFFF;
mask = 0x7F;
@@ -893,13 +916,15 @@ void vdp_h40(uint32_t line, uint32_t linecyc, vdp_context * context)
uint32_t mask;
switch(linecyc)
{
+ case 165:
+ case 166:
+ external_slot(context);
+ break;
//sprite render to line buffer starts
- case 0:
- context->cur_slot = MAX_DRAWS-1;
- memset(context->linebuf, 0, LINEBUF_SIZE);
- case 1:
- case 2:
- case 3:
+ case 167:
+ case 168:
+ case 169:
+ case 170:
if (line == 0xFF) {
external_slot(context);
} else {
@@ -907,52 +932,50 @@ void vdp_h40(uint32_t line, uint32_t linecyc, vdp_context * context)
}
break;
//sprite attribute table scan starts
- case 4:
+ case 171:
render_sprite_cells( context);
- context->sprite_index = 0x80;
- context->slot_counter = MAX_SPRITES_LINE;
scan_sprite_table(line, context);
break;
- case 5:
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- case 11:
- case 12:
- case 13:
- case 14:
- case 15:
- case 16:
- case 17:
- case 18:
- case 19:
- case 20:
+ case 172:
+ case 173:
+ case 174:
+ case 175:
+ case 176:
+ case 177:
+ case 178:
+ case 179:
+ case 180:
+ case 181:
+ case 182:
+ case 229:
+ case 230:
+ case 231:
+ case 232:
+ case 233:
//!HSYNC asserted
- case 21:
- case 22:
+ case 234:
+ case 235:
render_sprite_cells(context);
scan_sprite_table(line, context);
break;
- case 23:
+ case 236:
external_slot(context);
break;
- case 24:
- case 25:
- case 26:
- case 27:
- case 28:
- case 29:
- case 30:
- case 31:
- case 32:
- case 33:
- case 34:
+ case 237:
+ case 238:
+ case 239:
+ case 240:
+ case 241:
+ case 242:
+ case 243:
+ case 244:
+ case 245:
+ case 246:
+ case 247:
render_sprite_cells(context);
scan_sprite_table(line, context);
break;
- case 35:
+ case 248:
address = (context->regs[REG_HSCROLL] & 0x3F) << 10;
mask = 0;
if (context->regs[REG_MODE_3] & 0x2) {
@@ -967,41 +990,41 @@ void vdp_h40(uint32_t line, uint32_t linecyc, vdp_context * context)
context->hscroll_b = context->vdpmem[address+2] << 8 | context->vdpmem[address+3];
//printf("%d: HScroll A: %d, HScroll B: %d\n", line, context->hscroll_a, context->hscroll_b);
break;
- case 36:
+ case 249:
//!HSYNC high
- case 37:
- case 38:
- case 39:
+ case 250:
+ case 251:
+ case 252:
render_sprite_cells(context);
scan_sprite_table(line, context);
break;
- case 40:
+ case 253:
read_map_scroll_a(0, line, context);
break;
- case 41:
+ case 254:
render_sprite_cells(context);
scan_sprite_table(line, context);
break;
- case 42:
+ case 255:
render_map_1(context);
scan_sprite_table(line, context);//Just a guess
break;
- case 43:
+ case 0:
render_map_2(context);
scan_sprite_table(line, context);//Just a guess
break;
- case 44:
+ case 1:
read_map_scroll_b(0, line, context);
break;
- case 45:
+ case 2:
render_sprite_cells(context);
scan_sprite_table(line, context);
break;
- case 46:
+ case 3:
render_map_3(context);
scan_sprite_table(line, context);//Just a guess
break;
- case 47:
+ case 4:
render_map_output(line, 0, context);
scan_sprite_table(line, context);//Just a guess
//reverse context slot counter so it counts the number of sprite slots
@@ -1011,33 +1034,26 @@ void vdp_h40(uint32_t line, uint32_t linecyc, vdp_context * context)
context->sprite_draws = MAX_DRAWS;
context->flags &= (~FLAG_CAN_MASK & ~FLAG_MASKED);
break;
- COLUMN_RENDER_BLOCK(2, 48)
- COLUMN_RENDER_BLOCK(4, 56)
- COLUMN_RENDER_BLOCK(6, 64)
- COLUMN_RENDER_BLOCK_REFRESH(8, 72)
- COLUMN_RENDER_BLOCK(10, 80)
- COLUMN_RENDER_BLOCK(12, 88)
- COLUMN_RENDER_BLOCK(14, 96)
- COLUMN_RENDER_BLOCK_REFRESH(16, 104)
- COLUMN_RENDER_BLOCK(18, 112)
- COLUMN_RENDER_BLOCK(20, 120)
- COLUMN_RENDER_BLOCK(22, 128)
- COLUMN_RENDER_BLOCK_REFRESH(24, 136)
- COLUMN_RENDER_BLOCK(26, 144)
- COLUMN_RENDER_BLOCK(28, 152)
- COLUMN_RENDER_BLOCK(30, 160)
- COLUMN_RENDER_BLOCK_REFRESH(32, 168)
- COLUMN_RENDER_BLOCK(34, 176)
- COLUMN_RENDER_BLOCK(36, 184)
- COLUMN_RENDER_BLOCK(38, 192)
- COLUMN_RENDER_BLOCK_REFRESH(40, 200)
- case 208:
- case 209:
- external_slot(context);
- break;
- default:
- //leftovers from HSYNC clock change nonsense
- break;
+ COLUMN_RENDER_BLOCK(2, 5)
+ COLUMN_RENDER_BLOCK(4, 13)
+ COLUMN_RENDER_BLOCK(6, 21)
+ COLUMN_RENDER_BLOCK_REFRESH(8, 29)
+ COLUMN_RENDER_BLOCK(10, 37)
+ COLUMN_RENDER_BLOCK(12, 45)
+ COLUMN_RENDER_BLOCK(14, 53)
+ COLUMN_RENDER_BLOCK_REFRESH(16, 61)
+ COLUMN_RENDER_BLOCK(18, 69)
+ COLUMN_RENDER_BLOCK(20, 77)
+ COLUMN_RENDER_BLOCK(22, 85)
+ COLUMN_RENDER_BLOCK_REFRESH(24, 93)
+ COLUMN_RENDER_BLOCK(26, 101)
+ COLUMN_RENDER_BLOCK(28, 109)
+ COLUMN_RENDER_BLOCK(30, 117)
+ COLUMN_RENDER_BLOCK_REFRESH(32, 125)
+ COLUMN_RENDER_BLOCK(34, 133)
+ COLUMN_RENDER_BLOCK(36, 141)
+ COLUMN_RENDER_BLOCK(38, 149)
+ COLUMN_RENDER_BLOCK_REFRESH(40, 157)
}
}
@@ -1047,13 +1063,15 @@ void vdp_h32(uint32_t line, uint32_t linecyc, vdp_context * context)
uint32_t mask;
switch(linecyc)
{
+ case 132:
+ case 133:
+ external_slot(context);
+ break;
//sprite render to line buffer starts
- case 0:
- context->cur_slot = MAX_DRAWS_H32-1;
- memset(context->linebuf, 0, LINEBUF_SIZE);
- case 1:
- case 2:
- case 3:
+ case 134:
+ case 135:
+ case 136:
+ case 137:
if (line == 0xFF) {
external_slot(context);
} else {
@@ -1061,46 +1079,44 @@ void vdp_h32(uint32_t line, uint32_t linecyc, vdp_context * context)
}
break;
//sprite attribute table scan starts
- case 4:
+ case 138:
render_sprite_cells( context);
- context->sprite_index = 0x80;
- context->slot_counter = MAX_SPRITES_LINE_H32;
scan_sprite_table(line, context);
break;
- case 5:
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- case 11:
- case 12:
- case 13:
+ case 139:
+ case 140:
+ case 141:
+ case 142:
+ case 143:
+ case 144:
+ case 145:
+ case 146:
+ case 147:
render_sprite_cells(context);
scan_sprite_table(line, context);
- case 14:
+ case 233:
external_slot(context);
break;
- case 15:
- case 16:
- case 17:
- case 18:
- case 19:
+ case 234:
+ case 235:
+ case 236:
+ case 237:
+ case 238:
//HSYNC start
- case 20:
- case 21:
- case 22:
- case 23:
- case 24:
- case 25:
- case 26:
+ case 239:
+ case 240:
+ case 241:
+ case 242:
+ case 243:
+ case 244:
+ case 245:
render_sprite_cells(context);
scan_sprite_table(line, context);
break;
- case 27:
+ case 246:
external_slot(context);
break;
- case 28:
+ case 247:
address = (context->regs[REG_HSCROLL] & 0x3F) << 10;
mask = 0;
if (context->regs[REG_MODE_3] & 0x2) {
@@ -1115,41 +1131,41 @@ void vdp_h32(uint32_t line, uint32_t linecyc, vdp_context * context)
context->hscroll_b = context->vdpmem[address+2] << 8 | context->vdpmem[address+3];
//printf("%d: HScroll A: %d, HScroll B: %d\n", line, context->hscroll_a, context->hscroll_b);
break;
- case 29:
- case 30:
- case 31:
- case 32:
+ case 248:
+ case 249:
+ case 250:
+ case 251:
render_sprite_cells(context);
scan_sprite_table(line, context);
break;
//!HSYNC high
- case 33:
+ case 252:
read_map_scroll_a(0, line, context);
break;
- case 34:
+ case 253:
render_sprite_cells(context);
scan_sprite_table(line, context);
break;
- case 35:
+ case 254:
render_map_1(context);
scan_sprite_table(line, context);//Just a guess
break;
- case 36:
+ case 255:
render_map_2(context);
scan_sprite_table(line, context);//Just a guess
break;
- case 37:
+ case 0:
read_map_scroll_b(0, line, context);
break;
- case 38:
+ case 1:
render_sprite_cells(context);
scan_sprite_table(line, context);
break;
- case 39:
+ case 2:
render_map_3(context);
scan_sprite_table(line, context);//Just a guess
break;
- case 40:
+ case 3:
render_map_output(line, 0, context);
scan_sprite_table(line, context);//Just a guess
//reverse context slot counter so it counts the number of sprite slots
@@ -1159,26 +1175,22 @@ void vdp_h32(uint32_t line, uint32_t linecyc, vdp_context * context)
context->sprite_draws = MAX_DRAWS_H32;
context->flags &= (~FLAG_CAN_MASK & ~FLAG_MASKED);
break;
- COLUMN_RENDER_BLOCK(2, 41)
- COLUMN_RENDER_BLOCK(4, 49)
- COLUMN_RENDER_BLOCK(6, 57)
- COLUMN_RENDER_BLOCK_REFRESH(8, 65)
- COLUMN_RENDER_BLOCK(10, 73)
- COLUMN_RENDER_BLOCK(12, 81)
- COLUMN_RENDER_BLOCK(14, 89)
- COLUMN_RENDER_BLOCK_REFRESH(16, 97)
- COLUMN_RENDER_BLOCK(18, 105)
- COLUMN_RENDER_BLOCK(20, 113)
- COLUMN_RENDER_BLOCK(22, 121)
- COLUMN_RENDER_BLOCK_REFRESH(24, 129)
- COLUMN_RENDER_BLOCK(26, 137)
- COLUMN_RENDER_BLOCK(28, 145)
- COLUMN_RENDER_BLOCK(30, 153)
- COLUMN_RENDER_BLOCK_REFRESH(32, 161)
- case 169:
- case 170:
- external_slot(context);
- break;
+ COLUMN_RENDER_BLOCK(2, 4)
+ COLUMN_RENDER_BLOCK(4, 12)
+ COLUMN_RENDER_BLOCK(6, 20)
+ COLUMN_RENDER_BLOCK_REFRESH(8, 28)
+ COLUMN_RENDER_BLOCK(10, 36)
+ COLUMN_RENDER_BLOCK(12, 44)
+ COLUMN_RENDER_BLOCK(14, 52)
+ COLUMN_RENDER_BLOCK_REFRESH(16, 60)
+ COLUMN_RENDER_BLOCK(18, 68)
+ COLUMN_RENDER_BLOCK(20, 76)
+ COLUMN_RENDER_BLOCK(22, 84)
+ COLUMN_RENDER_BLOCK_REFRESH(24, 92)
+ COLUMN_RENDER_BLOCK(26, 100)
+ COLUMN_RENDER_BLOCK(28, 108)
+ COLUMN_RENDER_BLOCK(30, 116)
+ COLUMN_RENDER_BLOCK_REFRESH(32, 124)
}
}
@@ -1203,6 +1215,14 @@ void vdp_h40_line(uint32_t line, vdp_context * context)
if (context->flags & FLAG_DMA_RUN) {
run_dma_src(context, 0);
}
+ external_slot(context);
+ if (context->flags & FLAG_DMA_RUN) {
+ run_dma_src(context, 0);
+ }
+ external_slot(context);
+ if (context->flags & FLAG_DMA_RUN) {
+ run_dma_src(context, 0);
+ }
for (int i = 0; i < 19; i++)
{
scan_sprite_table(line, context);
@@ -1240,13 +1260,17 @@ void vdp_h40_line(uint32_t line, vdp_context * context)
read_sprite_x(line, context);
}
- external_slot(context);
- if (context->flags & FLAG_DMA_RUN) {
- run_dma_src(context, 0);
- }
- external_slot(context);
+
return;
}
+ external_slot(context);
+ if (context->flags & FLAG_DMA_RUN) {
+ run_dma_src(context, 0);
+ }
+ external_slot(context);
+ if (context->flags & FLAG_DMA_RUN) {
+ run_dma_src(context, 0);
+ }
render_sprite_cells(context);
render_sprite_cells(context);
@@ -1356,73 +1380,81 @@ void vdp_h40_line(uint32_t line, vdp_context * context)
render_map_3(context);
render_map_output(line, column, context);
}
- external_slot(context);
- if (context->flags & FLAG_DMA_RUN) {
- run_dma_src(context, 0);
- }
- external_slot(context);
}
void latch_mode(vdp_context * context)
{
- context->latched_mode = (context->regs[REG_MODE_4] & 0x81) | (context->regs[REG_MODE_2] & BIT_PAL);
+ context->latched_mode = context->regs[REG_MODE_2] & BIT_PAL;
}
void check_render_bg(vdp_context * context, int32_t line, uint32_t slot)
{
- if (line > 0) {
- line -= 1;
- int starti = -1;
- if (context->latched_mode & BIT_H40) {
- if (slot >= 55 && slot < 210) {
- uint32_t x = (slot-55)*2;
- starti = line * 320 + x;
- } else if (slot < 5) {
- uint32_t x = (slot + 155)*2;
- starti = (line-1)*320 + x;
+ int starti = -1;
+ if (context->regs[REG_MODE_4] & BIT_H40) {
+ if (slot >= 12 && slot < 172) {
+ uint32_t x = (slot-12)*2;
+ starti = line * 320 + x;
+ }
+ } else {
+ if (slot >= 11 && slot < 139) {
+ uint32_t x = (slot-11)*2;
+ starti = line * 320 + x;
+ }
+ }
+ if (starti >= 0) {
+ if (context->b32) {
+ uint32_t color = context->colors[context->regs[REG_BG_COLOR]];
+ uint32_t * start = context->framebuf;
+ start += starti;
+ for (int i = 0; i < 2; i++) {
+ *(start++) = color;
}
} else {
- if (slot >= 48 && slot < 171) {
- uint32_t x = (slot-48)*2;
- starti = line * 320 + x;
- } else if (slot < 5) {
- uint32_t x = (slot + 123)*2;
- starti = (line-1)*320 + x;
- }
- }
- if (starti >= 0) {
- if (context->b32) {
- uint32_t color = context->colors[context->regs[REG_BG_COLOR]];
- uint32_t * start = context->framebuf;
- start += starti;
- for (int i = 0; i < 2; i++) {
- *(start++) = color;
- }
- } else {
- uint16_t color = context->colors[context->regs[REG_BG_COLOR]];
- uint16_t * start = context->framebuf;
- start += starti;
- for (int i = 0; i < 2; i++) {
- *(start++) = color;
- }
+ uint16_t color = context->colors[context->regs[REG_BG_COLOR]];
+ uint16_t * start = context->framebuf;
+ start += starti;
+ for (int i = 0; i < 2; i++) {
+ *(start++) = color;
}
}
}
}
+uint32_t const h40_hsync_cycles[] = {19, 20, 20, 20, 19, 20, 20, 20, 19, 20, 20, 20, 19, 20, 20, 20, 19};
+
void vdp_run_context(vdp_context * context, uint32_t target_cycles)
{
while(context->cycles < target_cycles)
{
context->flags &= ~FLAG_UNUSED_SLOT;
- uint32_t line = context->cycles / MCLKS_LINE;
- uint32_t active_lines = context->latched_mode & BIT_PAL ? PAL_ACTIVE : NTSC_ACTIVE;
- if (!context->cycles) {
+ uint32_t line = context->vcounter;
+ uint32_t inactive_start = context->latched_mode & BIT_PAL ? PAL_INACTIVE_START : NTSC_INACTIVE_START;
+ uint32_t slot = context->hslot;
+ //TODO: Figure out when this actually happens
+ if (!line && !slot) {
latch_mode(context);
}
- uint32_t linecyc = context->cycles % MCLKS_LINE;
- if (linecyc == 0) {
- if (line <= 1 || line >= active_lines) {
+
+ uint8_t is_h40 = context->regs[REG_MODE_4] & BIT_H40;
+ if (is_h40) {
+ if (slot == 167) {
+ context->cur_slot = MAX_DRAWS-1;
+ memset(context->linebuf, 0, LINEBUF_SIZE);
+ } else if (slot == 171) {
+ context->sprite_index = 0x80;
+ context->slot_counter = MAX_SPRITES_LINE;
+ }
+ } else {
+ if (slot == 134) {
+ context->cur_slot = MAX_DRAWS_H32-1;
+ memset(context->linebuf, 0, LINEBUF_SIZE);
+ } else if (slot == 138) {
+ context->sprite_index = 0x80;
+ context->slot_counter = MAX_SPRITES_LINE_H32;
+ }
+ }
+ if (is_h40 && slot == LINE_CHANGE_H40 || !is_h40 && slot == LINE_CHANGE_H32) {
+ if (line >= inactive_start) {
context->hint_counter = context->regs[REG_HINT];
} else if (context->hint_counter) {
context->hint_counter--;
@@ -1430,111 +1462,41 @@ void vdp_run_context(vdp_context * context, uint32_t target_cycles)
context->flags2 |= FLAG2_HINT_PENDING;
context->hint_counter = context->regs[REG_HINT];
}
- } else if(line == active_lines) {
- uint32_t intcyc = context->latched_mode & BIT_H40 ? VINT_CYCLE_H40 : VINT_CYCLE_H32;
- if (linecyc == intcyc) {
+ } else if(line == inactive_start) {
+ uint32_t intslot = context->regs[REG_MODE_4] & BIT_H40 ? VINT_SLOT_H40 : VINT_SLOT_H32;
+ if (slot == intslot) {
context->flags2 |= FLAG2_VINT_PENDING;
}
}
- uint32_t inccycles, slot;
- if (context->latched_mode & BIT_H40){
- if (linecyc < MCLKS_SLOT_H40*HSYNC_SLOT_H40) {
- slot = linecyc/MCLKS_SLOT_H40;
+ uint32_t inccycles;
+ //line 0x1FF is basically active even though it's not displayed
+ uint8_t active_slot = line < inactive_start || line == 0x1FF;
+ if (is_h40) {
+ if (slot < HSYNC_SLOT_H40 || slot >= HSYNC_END_H40) {
inccycles = MCLKS_SLOT_H40;
- } else if(linecyc < MCLK_WEIRD_END) {
- switch(linecyc-(MCLKS_SLOT_H40*HSYNC_SLOT_H40))
- {
- case 0:
- inccycles = 19;
- slot = 0;
- break;
- case 19:
- slot = 1;
- inccycles = 20;
- break;
- case 39:
- slot = 2;
- inccycles = 20;
- break;
- case 59:
- slot = 3;
- inccycles = 20;
- break;
- case 79:
- slot = 4;
- inccycles = 18;
- break;
- case 97:
- slot = 5;
- inccycles = 20;
- break;
- case 117:
- slot = 6;
- inccycles = 20;
- break;
- case 137:
- slot = 7;
- inccycles = 20;
- break;
- case 157:
- slot = 8;
- inccycles = 18;
- break;
- case 175:
- slot = 9;
- inccycles = 20;
- break;
- case 195:
- slot = 10;
- inccycles = 20;
- break;
- case 215:
- slot = 11;
- inccycles = 20;
- break;
- case 235:
- slot = 12;
- inccycles = 18;
- break;
- case 253:
- slot = 13;
- inccycles = 20;
- break;
- case 273:
- slot = 14;
- inccycles = 20;
- break;
- case 293:
- slot = 15;
- inccycles = 20;
- break;
- case 313:
- slot = 16;
- inccycles = 19;
- break;
- default:
- fprintf(stderr, "cycles after weirdness %d\n", linecyc-(MCLKS_SLOT_H40*HSYNC_SLOT_H40));
- exit(1);
- }
- slot += HSYNC_SLOT_H40;
} else {
- slot = (linecyc-MCLK_WEIRD_END)/MCLKS_SLOT_H40 + SLOT_WEIRD_END;
- inccycles = MCLKS_SLOT_H40;
+ inccycles = h40_hsync_cycles[slot-HSYNC_SLOT_H40];
+ }
+ //the first inactive line behaves as an active one for the first 4 slots
+ if (line == inactive_start && slot > 166 && slot < 171) {
+ active_slot = 1;
}
} else {
inccycles = MCLKS_SLOT_H32;
- slot = linecyc/MCLKS_SLOT_H32;
+ //the first inactive line behaves as an active one for the first 4 slots
+ if (line == inactive_start && slot > 166 && slot < 171) {
+ active_slot = 1;
+ }
}
- if ((line < active_lines || (line == active_lines && linecyc < (context->latched_mode & BIT_H40 ? 64 : 80))) && context->regs[REG_MODE_2] & DISPLAY_ENABLE) {
- //first sort-of active line is treated as 255 internally
- //it's used for gathering sprite info for line
- line = (line - 1) & 0xFF;
-
- //Convert to slot number
- if (context->latched_mode & BIT_H40){
- if (!slot && line != (active_lines-1) && (target_cycles - context->cycles) >= MCLKS_LINE) {
+ uint8_t inc_slot = 1;
+ if (context->regs[REG_MODE_2] & DISPLAY_ENABLE && active_slot) {
+ //run VDP rendering for a slot or a line
+ if (is_h40) {
+ if (slot == LINE_CHANGE_H40 && line < inactive_start && (target_cycles - context->cycles) >= MCLKS_LINE) {
vdp_h40_line(line, context);
inccycles = MCLKS_LINE;
+ context->vcounter++;
+ inc_slot = 0;
} else {
vdp_h40(line, slot, context);
}
@@ -1545,20 +1507,50 @@ void vdp_run_context(vdp_context * context, uint32_t target_cycles)
if (!is_refresh(context, slot)) {
external_slot(context);
}
- if (line < active_lines) {
+ if (line < inactive_start) {
check_render_bg(context, line, slot);
}
}
if (context->flags & FLAG_DMA_RUN && !is_refresh(context, slot)) {
run_dma_src(context, slot);
}
+ if (inc_slot) {
+ context->hslot++;
+ context->hslot &= 0xFF;
+ if (is_h40) {
+ if (context->hslot == LINE_CHANGE_H40) {
+ context->vcounter++;
+ } else if (context->hslot == 183) {
+ context->hslot = 229;
+ }
+ } else {
+ if (context->hslot == LINE_CHANGE_H32) {
+ context->vcounter++;
+ } else if (context->hslot == 148) {
+ context->hslot = 233;
+ }
+ }
+
+ }
+ context->vcounter &= 0x1FF;
+ if (context->flags2 & FLAG2_REGION_PAL) {
+ if (context->latched_mode & BIT_PAL) {
+ if (context->vcounter == 0x10B) {
+ context->vcounter = 0x1D2;
+ }
+ } else if (context->vcounter == 0x103){
+ context->vcounter = 0x1CA;
+ }
+ } else if (!(context->latched_mode & BIT_PAL) && context->vcounter == 0xEB) {
+ context->vcounter = 0x1E5;
+ }
context->cycles += inccycles;
}
}
uint32_t vdp_run_to_vblank(vdp_context * context)
{
- uint32_t target_cycles = ((context->latched_mode & BIT_PAL) ? PAL_ACTIVE : NTSC_ACTIVE) * MCLKS_LINE;
+ uint32_t target_cycles = ((context->latched_mode & BIT_PAL) ? PAL_INACTIVE_START : NTSC_INACTIVE_START) * MCLKS_LINE;
vdp_run_context(context, target_cycles);
return context->cycles;
}
@@ -1570,7 +1562,7 @@ void vdp_run_dma_done(vdp_context * context, uint32_t target_cycles)
if (!dmalen) {
dmalen = 0x10000;
}
- uint32_t min_dma_complete = dmalen * (context->latched_mode & BIT_H40 ? 16 : 20);
+ uint32_t min_dma_complete = dmalen * (context->regs[REG_MODE_4] & BIT_H40 ? 16 : 20);
if ((context->regs[REG_DMASRC_H] & 0xC0) == 0xC0 || (context->cd & 0xF) == VRAM_WRITE) {
//DMA copies take twice as long to complete since they require a read and a write
//DMA Fills and transfers to VRAM also take twice as long as it requires 2 writes for a single word
@@ -1635,7 +1627,7 @@ int vdp_control_port_write(vdp_context * context, uint16_t value)
if (!context->double_res) {
context->framebuf = context->oddbuf;
}
- }
+ }
context->cd &= 0x3C;
}
} else {
@@ -1661,10 +1653,10 @@ int vdp_data_port_write(vdp_context * context, uint16_t value)
context->flags &= ~FLAG_DMA_RUN;
}
while (context->fifo_write == context->fifo_read) {
- vdp_run_context(context, context->cycles + ((context->latched_mode & BIT_H40) ? 16 : 20));
+ vdp_run_context(context, context->cycles + ((context->regs[REG_MODE_4] & BIT_H40) ? 16 : 20));
}
fifo_entry * cur = context->fifo + context->fifo_write;
- cur->cycle = context->cycles + ((context->latched_mode & BIT_H40) ? 16 : 20)*FIFO_LATENCY;
+ cur->cycle = context->cycles + ((context->regs[REG_MODE_4] & BIT_H40) ? 16 : 20)*FIFO_LATENCY;
cur->address = context->address;
cur->value = value;
if (context->cd & 0x20 && (context->regs[REG_DMASRC_H] & 0xC0) == 0x80) {
@@ -1709,13 +1701,25 @@ uint16_t vdp_control_port_read(vdp_context * context)
if ((context->regs[REG_MODE_4] & BIT_INTERLACE) && context->framebuf == context->oddbuf) {
value |= 0x10;
}
- uint32_t line= context->cycles / MCLKS_LINE;
- uint32_t linecyc = context->cycles % MCLKS_LINE;
- if (line >= (context->latched_mode & BIT_PAL ? PAL_ACTIVE : NTSC_ACTIVE) || !(context->regs[REG_MODE_2] & BIT_DISP_EN)) {
+ uint32_t line= context->vcounter;
+ uint32_t slot = context->hslot;
+ if (
+ (
+ line >= (context->latched_mode & BIT_PAL ? PAL_INACTIVE_START : NTSC_INACTIVE_START)
+ && line < 0x1FF
+ )
+ || !(context->regs[REG_MODE_2] & BIT_DISP_EN)
+ ) {
value |= 0x8;
}
- if (linecyc < (context->latched_mode & BIT_H40 ? HBLANK_CLEAR_H40 : HBLANK_CLEAR_H32)) {
- value |= 0x4;
+ if (context->regs[REG_MODE_4] & BIT_H40) {
+ if (slot < HBLANK_END_H40 || slot > HBLANK_START_H40) {
+ value |= 0x4;
+ }
+ } else {
+ if (slot < HBLANK_END_H32 || slot > HBLANK_START_H32) {
+ value |= 0x4;
+ }
}
if (context->flags & FLAG_DMA_RUN) {
value |= 0x2;
@@ -1741,7 +1745,7 @@ uint16_t vdp_data_port_read(vdp_context * context)
context->flags &= ~FLAG_UNUSED_SLOT;
//context->flags2 |= FLAG2_READ_PENDING;
while (!(context->flags & FLAG_UNUSED_SLOT)) {
- vdp_run_context(context, context->cycles + ((context->latched_mode & BIT_H40) ? 16 : 20));
+ vdp_run_context(context, context->cycles + ((context->regs[REG_MODE_4] & BIT_H40) ? 16 : 20));
}
uint16_t value = 0;
switch (context->cd & 0xF)
@@ -1751,7 +1755,7 @@ uint16_t vdp_data_port_read(vdp_context * context)
context->flags &= ~FLAG_UNUSED_SLOT;
context->flags2 |= FLAG2_READ_PENDING;
while (!(context->flags & FLAG_UNUSED_SLOT)) {
- vdp_run_context(context, context->cycles + ((context->latched_mode & BIT_H40) ? 16 : 20));
+ vdp_run_context(context, context->cycles + ((context->regs[REG_MODE_4] & BIT_H40) ? 16 : 20));
}
value |= context->vdpmem[context->address | 1];
break;
@@ -1782,102 +1786,8 @@ uint16_t vdp_hv_counter_read(vdp_context * context)
if (context->regs[REG_MODE_1] & BIT_HVC_LATCH) {
return context->hv_latch;
}
- uint32_t line= context->cycles / MCLKS_LINE;
- if (!line) {
- line = 0xFF;
- } else {
- line--;
- if (line > 0xEA) {
- line = (line + 0xFA) & 0xFF;
- }
- }
- uint32_t linecyc = context->cycles % MCLKS_LINE;
- if (context->latched_mode & BIT_H40) {
- uint32_t slot;
- if (linecyc < MCLKS_SLOT_H40*HSYNC_SLOT_H40) {
- slot = linecyc/MCLKS_SLOT_H40;
- } else if(linecyc < MCLK_WEIRD_END) {
- switch(linecyc-(MCLKS_SLOT_H40*HSYNC_SLOT_H40))
- {
- case 0:
- slot = 0;
- break;
- case 19:
- slot = 1;
- break;
- case 39:
- slot = 2;
- break;
- case 59:
- slot = 2;
- break;
- case 79:
- slot = 3;
- break;
- case 97:
- slot = 4;
- break;
- case 117:
- slot = 5;
- break;
- case 137:
- slot = 6;
- break;
- case 157:
- slot = 7;
- break;
- case 175:
- slot = 8;
- break;
- case 195:
- slot = 9;
- break;
- case 215:
- slot = 11;
- break;
- case 235:
- slot = 12;
- break;
- case 253:
- slot = 13;
- break;
- case 273:
- slot = 14;
- break;
- case 293:
- slot = 15;
- break;
- case 313:
- slot = 16;
- break;
- default:
- fprintf(stderr, "cycles after weirdness %d\n", linecyc-(MCLKS_SLOT_H40*HSYNC_SLOT_H40));
- exit(1);
- }
- slot += HSYNC_SLOT_H40;
- } else {
- slot = (linecyc-MCLK_WEIRD_END)/MCLKS_SLOT_H40 + SLOT_WEIRD_END;
- }
- linecyc = slot * 2;
- if (linecyc >= 86) {
- linecyc -= 86;
- } else {
- linecyc += 334;
- }
- if (linecyc > 0x16C) {
- linecyc += 92;
- }
- } else {
- linecyc /= 10;
- if (linecyc >= 74) {
- linecyc -= 74;
- } else {
- linecyc += 268;
- }
- if (linecyc > 0x127) {
- linecyc += 170;
- }
- }
+ uint32_t line= context->vcounter & 0xFF;
+ uint32_t linecyc = context->hslot;
linecyc &= 0xFF;
if (context->double_res) {
line <<= 1;
@@ -1910,6 +1820,108 @@ void vdp_adjust_cycles(vdp_context * context, uint32_t deduction)
}
}
+uint32_t vdp_cycles_next_line(vdp_context * context)
+{
+ if (context->regs[REG_MODE_4] & BIT_H40) {
+ if (context->hslot < LINE_CHANGE_H40) {
+ return (HBLANK_START_H40 - context->hslot) * MCLKS_SLOT_H40;
+ } else if (context->hslot < 183) {
+ return MCLKS_LINE - (context->hslot - LINE_CHANGE_H40) * MCLKS_SLOT_H40;
+ } else if (context->hslot < HSYNC_END_H40){
+ uint32_t before_hsync = context->hslot < HSYNC_SLOT_H40 ? (HSYNC_SLOT_H40 - context->hslot) * MCLKS_SLOT_H40 : 0;
+ uint32_t hsync = 0;
+ for (int i = context->hslot <= HSYNC_SLOT_H40 ? 0 : context->hslot - HSYNC_SLOT_H40; i < sizeof(h40_hsync_cycles)/sizeof(uint32_t); i++)
+ {
+ hsync += h40_hsync_cycles[i];
+ }
+ uint32_t after_hsync = (256- HSYNC_END_H40 + LINE_CHANGE_H40) * MCLKS_SLOT_H40;
+ return before_hsync + hsync + after_hsync;
+ } else {
+ return (256-context->hslot + LINE_CHANGE_H40) * MCLKS_SLOT_H40;
+ }
+ } else {
+ if (context->hslot < LINE_CHANGE_H32) {
+ return (LINE_CHANGE_H32 - context->hslot) * MCLKS_SLOT_H32;
+ } else if (context->hslot < 148) {
+ return MCLKS_LINE - (context->hslot - LINE_CHANGE_H32) * MCLKS_SLOT_H32;
+ } else {
+ return (256-context->hslot + LINE_CHANGE_H32) * MCLKS_SLOT_H32;
+ }
+ }
+}
+
+uint32_t vdp_cycles_to_line(vdp_context * context, uint32_t target)
+{
+ uint32_t jump_start, jump_dst;
+ if (context->flags2 & FLAG2_REGION_PAL) {
+ if (context->latched_mode & BIT_PAL) {
+ jump_start = 0x10B;
+ jump_dst = 0x1D2;
+ } else {
+ jump_start = 0x103;
+ jump_dst = 0x1CA;
+ }
+ } else {
+ if (context->latched_mode & BIT_PAL) {
+ jump_start = 0;
+ jump_dst = 0;
+ } else {
+ jump_start = 0xEB;
+ jump_dst = 0x1E5;
+ }
+ }
+ uint32_t lines;
+ if (context->vcounter < target) {
+ if (target < jump_start) {
+ lines = target - context->vcounter;
+ } else {
+ lines = jump_start - context->vcounter + target - jump_dst;
+ }
+ } else {
+ if (context->vcounter < jump_start) {
+ lines = jump_start - context->vcounter + 512 - jump_dst;
+ } else {
+ lines = 512 - context->vcounter;
+ }
+ if (target < jump_start) {
+ lines += target;
+ } else {
+ lines += jump_start + target - jump_dst;
+ }
+ }
+ return MCLKS_LINE * (lines - 1) + vdp_cycles_next_line(context);
+}
+
+uint32_t vdp_frame_end_line(vdp_context * context)
+{
+ uint32_t frame_end;
+ if (context->flags2 & FLAG2_REGION_PAL) {
+ if (context->latched_mode & BIT_PAL) {
+ frame_end = PAL_INACTIVE_START + 8;
+ } else {
+ frame_end = NTSC_INACTIVE_START + 8;
+ }
+ } else {
+ if (context->latched_mode & BIT_PAL) {
+ frame_end = 512;
+ } else {
+ frame_end = NTSC_INACTIVE_START + 8;
+ }
+ }
+ return frame_end;
+}
+
+uint32_t vdp_cycles_to_frame_end(vdp_context * context)
+{
+ return context->cycles + vdp_cycles_to_line(context, vdp_frame_end_line(context));
+}
+
+uint8_t vdp_is_frame_over(vdp_context * context)
+{
+ uint32_t frame_end = vdp_frame_end_line(context);
+ return context->vcounter >= frame_end && context->vcounter < (frame_end + 8);
+}
+
uint32_t vdp_next_hint(vdp_context * context)
{
if (!(context->regs[REG_MODE_1] & BIT_HINT_EN)) {
@@ -1918,17 +1930,15 @@ uint32_t vdp_next_hint(vdp_context * context)
if (context->flags2 & FLAG2_HINT_PENDING) {
return context->cycles;
}
- uint32_t active_lines = context->latched_mode & BIT_PAL ? PAL_ACTIVE : NTSC_ACTIVE;
- uint32_t line = context->cycles / MCLKS_LINE;
- if (line >= active_lines) {
- return 0xFFFFFFFF;
- }
- uint32_t linecyc = context->cycles % MCLKS_LINE;
- uint32_t hcycle = context->cycles + context->hint_counter * MCLKS_LINE + MCLKS_LINE - linecyc;
- if (!line) {
- hcycle += MCLKS_LINE;
+ uint32_t inactive_start = context->latched_mode & BIT_PAL ? PAL_INACTIVE_START : NTSC_INACTIVE_START;
+ uint32_t hint_line;
+ if (context->vcounter >= inactive_start) {
+ hint_line = context->regs[REG_HINT];
+ } else {
+ hint_line = context->vcounter + context->hint_counter + 1;
}
- return hcycle;
+
+ return context->cycles + vdp_cycles_to_line(context, hint_line);
}
uint32_t vdp_next_vint(vdp_context * context)
@@ -1939,29 +1949,44 @@ uint32_t vdp_next_vint(vdp_context * context)
if (context->flags2 & FLAG2_VINT_PENDING) {
return context->cycles;
}
- uint32_t active_lines = context->latched_mode & BIT_PAL ? PAL_ACTIVE : NTSC_ACTIVE;
- uint32_t vcycle = MCLKS_LINE * active_lines;
- if (context->latched_mode & BIT_H40) {
- vcycle += VINT_CYCLE_H40;
- } else {
- vcycle += VINT_CYCLE_H32;
- }
- if (vcycle < context->cycles) {
- return 0xFFFFFFFF;
- }
- return vcycle;
+
+
+ return vdp_next_vint_z80(context);
}
uint32_t vdp_next_vint_z80(vdp_context * context)
{
- uint32_t active_lines = context->latched_mode & BIT_PAL ? PAL_ACTIVE : NTSC_ACTIVE;
- uint32_t vcycle = MCLKS_LINE * active_lines;
- if (context->latched_mode & BIT_H40) {
- vcycle += VINT_CYCLE_H40;
+ uint32_t inactive_start = context->latched_mode & BIT_PAL ? PAL_INACTIVE_START : NTSC_INACTIVE_START;
+ if (context->vcounter == inactive_start) {
+ if (context->regs[REG_MODE_4] & BIT_H40) {
+ if (context->hslot >= HBLANK_START_H40) {
+ if (context->hslot < 183) {
+ return context->cycles + (VINT_SLOT_H40 + 183 - context->hslot + 256 - 229) * MCLKS_SLOT_H40;
+ } else {
+ return context->cycles + (VINT_SLOT_H40 + 256 - context->hslot) * MCLKS_SLOT_H40;
+ }
+ } else if (context->hslot < VINT_SLOT_H40) {
+ return context->cycles + (VINT_SLOT_H40 - context->hslot) * MCLKS_SLOT_H40;
+ }
+ } else {
+ if (context->hslot >= HBLANK_START_H32) {
+ if (context->hslot < 148) {
+ return context->cycles + (VINT_SLOT_H32 + 148 - context->hslot + 256 - 233) * MCLKS_SLOT_H32;
+ } else {
+ return context->cycles + (VINT_SLOT_H32 + 256 - context->hslot) * MCLKS_SLOT_H32;
+ }
+ } else if (context->hslot < VINT_SLOT_H32) {
+ return context->cycles + (VINT_SLOT_H32 - context->hslot) * MCLKS_SLOT_H32;
+ }
+ }
+ }
+ int32_t cycles_to_vint = vdp_cycles_to_line(context, inactive_start);
+ if (context->regs[REG_MODE_4] & BIT_H40) {
+ cycles_to_vint += (VINT_SLOT_H40 + 183 - HBLANK_START_H40 + 256 - 229) * MCLKS_SLOT_H40;
} else {
- vcycle += VINT_CYCLE_H32;
+ cycles_to_vint += (VINT_SLOT_H32 + 148 - HBLANK_START_H32 + 256 - 233) * MCLKS_SLOT_H32;
}
- return vcycle;
+ return context->cycles + cycles_to_vint;
}
void vdp_int_ack(vdp_context * context, uint16_t int_num)
diff --git a/vdp.h b/vdp.h
index 830aa5a..70c0953 100644
--- a/vdp.h
+++ b/vdp.h
@@ -49,6 +49,7 @@
#define FLAG2_HINT_PENDING 0x02
#define FLAG2_READ_PENDING 0x04
#define FLAG2_SPRITE_COLLIDE 0x08
+#define FLAG2_REGION_PAL 0x10
#define DISPLAY_ENABLE 0x40
@@ -142,9 +143,11 @@ typedef struct {
uint32_t colors[CRAM_SIZE*3];
uint32_t debugcolors[1 << (3 + 1 + 1 + 1)];//3 bits for source, 1 bit for priority, 1 bit for shadow, 1 bit for hilight
uint16_t vsram[VSRAM_SIZE];
- uint8_t latched_mode;
+ uint16_t vcounter;
+ uint16_t hslot; //hcounter/2
uint16_t hscroll_a;
uint16_t hscroll_b;
+ uint8_t latched_mode;
uint8_t sprite_index;
uint8_t sprite_draws;
int8_t slot_counter;
@@ -167,7 +170,7 @@ typedef struct {
uint8_t *tmp_buf_b;
} vdp_context;
-void init_vdp_context(vdp_context * context);
+void init_vdp_context(vdp_context * context, uint8_t region_pal);
void vdp_run_context(vdp_context * context, uint32_t target_cycles);
//runs from current cycle count to VBLANK for the current mode, returns ending cycle count
uint32_t vdp_run_to_vblank(vdp_context * context);
@@ -190,6 +193,8 @@ void vdp_int_ack(vdp_context * context, uint16_t int_num);
void vdp_print_sprite_table(vdp_context * context);
void vdp_print_reg_explain(vdp_context * context);
void latch_mode(vdp_context * context);
+uint32_t vdp_cycles_to_frame_end(vdp_context * context);
+uint8_t vdp_is_frame_over(vdp_context * context);
extern int32_t color_map[1 << 12];
diff --git a/vos_prog_info.c b/vos_prog_info.c
new file mode 100644
index 0000000..1fb4895
--- /dev/null
+++ b/vos_prog_info.c
@@ -0,0 +1,100 @@
+#include <stdio.h>
+#include "vos_program_module.h"
+
+int main(int argc, char ** argv)
+{
+ vos_program_module header;
+ FILE * f = fopen(argv[1], "rb");
+ vos_read_header(f, &header);
+ vos_read_alloc_module_map(f, &header);
+ vos_read_alloc_external_vars(f, &header);
+
+ printf("Version: %d\n", header.version);
+ printf("Binder Version: %s\n", header.binder_version.str);
+ printf("Binder Options: %s\n", header.binder_options.str);
+ printf("System name: %s\n", header.system_name.str);
+ printf("User name: %s\n", header.user_name.str);
+ printf("Date bound: %d\n", header.date_bound);
+ printf("Code addresss: 0x%X, Static address: 0x%X\n",
+ header.main_entry_link.code_address, header.main_entry_link.static_address);
+ printf("User boundary: 0x%X\n", header.user_boundary);
+ printf("Num modules: %d\n", header.n_modules);
+ printf("Num extern vars: %d\n", header.n_external_vars);
+ printf("Num link names: %d\n", header.n_link_names);
+ printf("Num unsapped links: %d\n", header.n_unsnapped_links);
+ printf("Num VM pages: %d\n", header.n_vm_pages);
+ printf("Num header pages: %d\n", header.n_header_pages);
+ for (int i = 0; i < 3; i++) {
+ for (int j = 0; j < 4; j++) {
+ printf("Info %d:%d\n\tAddress: 0x%X\n\tLength: 0x%X\n",
+ i, j, header.info[i][j].address, header.info[i][j].len);
+ }
+ }
+ printf("Module map address: 0x%X\n", header.module_map_address);
+ printf("Module map length: 0x%X\n", header.module_map_len);
+ printf("External vars map address: 0x%X\n", header.external_vars_map_address);
+ printf("External vars map length: 0x%X\n", header.external_vars_map_len);
+ printf("Link names map address: 0x%X\n", header.link_names_map_address);
+ printf("Link names map length: 0x%X\n", header.link_names_map_len);
+ printf("Header address: 0x%X\n", header.header_address);
+ printf("Header length: 0x%X\n", header.header_len);
+ //printf("Access Info: 0x%X\n", header.header_address);
+ printf("Flags: 0x%X\n", header.flags);
+ printf("Num tasks: %d\n", header.n_tasks);
+ printf("Stack Size: 0x%X\n", header.stack_len);
+ printf("Num entries: %d\n", header.n_entries);
+ printf("Entry map address: 0x%X\n", header.entry_map_address);
+ printf("Entry map length: 0x%X\n", header.entry_map_len);
+ printf("Pop Version: %d\n", header.pop_version);
+ printf("Processor: %d\n", header.processor);
+ printf("Processor family: %d\n", header.processor_family);
+ printf("Release name: %s\n", header.release_name.str);
+ printf("Relocation info:\n\tMap Addres: 0x%X\n\tMap Length: 0x%X\n\tNum Relocations: %d\n",
+ header.relocation_info.map_address, header.relocation_info.map_len,
+ header.relocation_info.n_relocations);
+ printf("High water mark: 0x%X\n", header.high_water_mark);
+ printf("Copyright notice: %s\n", header.program_name.str);
+ printf("String pool address: 0x%X\n", header.string_pool_address);
+ printf("String pool length: 0x%X\n", header.string_pool_len);
+ printf("Object dir map address: 0x%X\n", header.obj_dir_map_address);
+ printf("Object dir map length: 0x%X\n", header.obj_dir_map_len);
+ puts("Global offset table addresses:");
+ for (int i = 0; i < 3; i++) {
+ printf("\t%d: 0x%X\n", i, header.global_offset_table_address[i]);
+ }
+ for (int i = 0; i < 3; i++) {
+ printf("Block map info %d\n\tAddress: 0x%X\n\tLength: 0x%X\n",
+ i, header.block_map_info[i].address, header.block_map_info[i].len);
+ }
+ printf("Secton map file address: 0x%X\n", header.section_map_file_address);
+ printf("Secton map address: 0x%X\n", header.section_map_address);
+ printf("Secton map length: 0x%X\n", header.section_map_len);
+ printf("Num sections: %d\n", header.n_sections);
+ printf("Max heap size: 0x%X\n", header.max_heap_size);
+ printf("Max program size: 0x%X\n", header.max_program_size);
+ printf("Max stack size: 0x%X\n", header.max_stack_size);
+ printf("Stack fence size: 0x%X\n", header.stack_fence_size);
+
+ puts("\nModules");
+ for (int i = 0; i < header.n_modules; i++) {
+ printf("\t%s:\n\t\tCode Address: 0x%X, Length: 0x%X\n",
+ header.module_map_entries[i].name.str,
+ header.module_map_entries[i].code_address,
+ header.module_map_entries[i].code_length);
+ printf("\t\tFoo Address: 0x%X, Length: 0x%X\n",
+ header.module_map_entries[i].foo_address,
+ header.module_map_entries[i].foo_length);
+ printf("\t\tBar Address: 0x%X, Length: 0x%X\n",
+ header.module_map_entries[i].bar_address,
+ header.module_map_entries[i].bar_length);
+ }
+
+ puts("\nExtrnal Vars");
+ for (int i = 0; i < header.n_external_vars; i++) {
+ printf("\t%s: 0x%X\n",
+ header.external_vars[i].name.str, header.external_vars[i].address);
+ }
+
+ vos_header_cleanup(&header);
+ return 0;
+}
diff --git a/vos_program_module.c b/vos_program_module.c
new file mode 100644
index 0000000..7019623
--- /dev/null
+++ b/vos_program_module.c
@@ -0,0 +1,208 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include "vos_program_module.h"
+
+static uint16_t big16(uint8_t ** src)
+{
+ uint16_t ret = *((*src)++) << 8;
+ ret |= *((*src)++);
+ return ret;
+}
+
+static uint32_t big32(uint8_t ** src)
+{
+ uint32_t ret = *((*src)++) << 24;
+ ret |= *((*src)++) << 16;
+ ret |= *((*src)++) << 8;
+ ret |= *((*src)++);
+ return ret;
+}
+
+static void string_(uint8_t ** src, uint16_t *len, char * str, uint32_t storage)
+{
+ *len = big16(src);
+ memcpy(str, *src, storage);
+ *src += storage;
+ if (*len >= storage)
+ {
+ *len = storage;
+ } else {
+ str[*len] = 0;
+ }
+ if (storage & 1)
+ {
+ (*src)++;
+ }
+}
+
+#define string(src, field) string_(src, &(field).len, (field).str, sizeof((field).str))
+
+
+int vos_read_header(FILE * f, vos_program_module *out)
+{
+ uint8_t buffer[4096];
+ if (fread(buffer, 1, sizeof(buffer), f) != sizeof(buffer))
+ {
+ return 0;
+ }
+ uint8_t *cur = buffer;
+ out->version = big16(&cur);
+ string(&cur, out->binder_version);
+ string(&cur, out->binder_options);
+ string(&cur, out->system_name);
+ string(&cur, out->user_name);
+ out->date_bound = big32(&cur);
+ out->main_entry_link.code_address = big32(&cur);
+ out->main_entry_link.static_address = big32(&cur);
+ out->user_boundary = big32(&cur);
+ out->n_modules = big16(&cur);
+ out->n_external_vars = big16(&cur);
+ out->n_link_names = big16(&cur);
+ out->n_unsnapped_links = big16(&cur);
+ out->n_vm_pages = big16(&cur);
+ out->n_header_pages = big16(&cur);
+ for (int i = 0; i < 3; i++)
+ {
+ for (int j = 0; j < 4; j++)
+ {
+ out->info[i][j].address = big32(&cur);
+ out->info[i][j].len = big32(&cur);
+ }
+ }
+ out->module_map_address = big32(&cur);
+ out->module_map_len = big32(&cur);
+ out->external_vars_map_address = big32(&cur);
+ out->external_vars_map_len = big32(&cur);
+ out->link_names_map_address = big32(&cur);
+ out->link_names_map_len = big32(&cur);
+ out->link_map_address = big32(&cur);
+ out->link_map_len = big32(&cur);
+ out->header_address = big32(&cur);
+ out->header_len = big32(&cur);
+ memcpy(out->access_info, cur, sizeof(out->access_info));
+ cur += sizeof(out->access_info);
+ out->flags = big32(&cur);
+ out->n_tasks = big16(&cur);
+ for (int i = 0; i < 3; i++)
+ {
+ out->task_static_len[i] = big32(&cur);
+ }
+ out->stack_len = big32(&cur);
+ out->n_entries = big16(&cur);
+ out->entry_map_address = big32(&cur);
+ out->entry_map_len = big32(&cur);
+ out->pop_version = big16(&cur);
+ out->processor = big16(&cur);
+ string(&cur, out->release_name);
+ out->relocation_info.map_address = big32(&cur);
+ out->relocation_info.map_len = big32(&cur);
+ out->relocation_info.n_relocations = big32(&cur);
+ out->high_water_mark = big32(&cur);
+ string(&cur, out->copyright_notice);
+ for (int i = 0; i < 14; i++)
+ {
+ out->module_origins[i] = big32(&cur);
+ }
+ out->processor_family = big16(&cur);
+ string(&cur, out->program_name);
+ out->string_pool_address = big32(&cur);
+ out->string_pool_len = big32(&cur);
+ out->obj_dir_map_address = big32(&cur);
+ out->obj_dir_map_len = big32(&cur);
+ for (int i = 0; i < 3; i++)
+ {
+ out->global_offset_table_address[i] = big32(&cur);
+ }
+ for (int i = 0; i < 3; i++)
+ {
+ out->block_map_info[i].address = big32(&cur);
+ out->block_map_info[i].len = big32(&cur);
+ }
+ out->section_map_file_address = big32(&cur);
+ out->section_map_address = big32(&cur);
+ out->section_map_len = big32(&cur);
+ out->n_sections = big16(&cur);
+ out->max_heap_size = big32(&cur);
+ out->max_program_size = big32(&cur);
+ out->max_stack_size = big32(&cur);
+ out->stack_fence_size = big32(&cur);
+
+ out->module_map_entries = NULL;
+ out->external_vars = NULL;
+ return 1;
+}
+
+#define MODULE_MAP_ENTRY_SIZE 74
+
+int vos_read_alloc_module_map(FILE * f, vos_program_module *header)
+{
+ if (header->module_map_len != header->n_modules * MODULE_MAP_ENTRY_SIZE)
+ {
+ return 0;
+ }
+ uint8_t * buf = malloc(header->module_map_len);
+ fseek(f, header->module_map_address + 0x1000 - header->user_boundary, SEEK_SET);
+ if (fread(buf, 1, header->module_map_len, f) != header->module_map_len)
+ {
+ free(buf);
+ return 0;
+ }
+ uint8_t * cur = buf;
+ header->module_map_entries = malloc(sizeof(vos_module_map_entry) * header->n_modules);
+ for (int i = 0; i < header->n_modules; i++)
+ {
+ string(&cur, header->module_map_entries[i].name);
+ for (int j = 0; j < 5; j++)
+ {
+ header->module_map_entries[i].unknown[j] = big16(&cur);
+ }
+ header->module_map_entries[i].code_address = big32(&cur);
+ header->module_map_entries[i].code_length = big32(&cur);
+ header->module_map_entries[i].foo_address = big32(&cur);
+ header->module_map_entries[i].foo_length = big32(&cur);
+ header->module_map_entries[i].bar_address = big32(&cur);
+ header->module_map_entries[i].bar_length = big32(&cur);
+ for (int j = 0; j < 3; j++)
+ {
+ header->module_map_entries[i].unknown2[j] = big16(&cur);
+ }
+ }
+ return 1;
+}
+
+#define EXTERNAL_VAR_ENTRY_SIZE 44
+
+int vos_read_alloc_external_vars(FILE * f, vos_program_module *header)
+{
+ if (header->external_vars_map_len != header->n_external_vars * EXTERNAL_VAR_ENTRY_SIZE)
+ {
+ return 0;
+ }
+ uint8_t * buf = malloc(header->external_vars_map_len);
+ fseek(f, header->external_vars_map_address + 0x1000 - header->user_boundary, SEEK_SET);
+ if (fread(buf, 1, header->external_vars_map_len, f) != header->external_vars_map_len)
+ {
+ free(buf);
+ return 0;
+ }
+ uint8_t * cur = buf;
+ header->external_vars = malloc(sizeof(vos_external_var_entry) * header->n_external_vars);
+ for (int i = 0; i < header->n_external_vars; i++)
+ {
+ string(&cur, header->external_vars[i].name);
+ header->external_vars[i].address = big32(&cur);
+ for (int j = 0; j < 3; j++)
+ {
+ header->external_vars[i].unknown[j] = big16(&cur);
+ }
+ }
+ return 1;
+}
+
+void vos_header_cleanup(vos_program_module *header)
+{
+ free(header->module_map_entries);
+ free(header->external_vars);
+}
diff --git a/vos_program_module.h b/vos_program_module.h
new file mode 100644
index 0000000..7febb05
--- /dev/null
+++ b/vos_program_module.h
@@ -0,0 +1,134 @@
+#ifndef VOS_PROGRAM_MODULE_H_
+#define VOS_PROGRAM_MODULE_H_
+
+#include <stdint.h>
+
+typedef struct
+{
+ struct {
+ uint16_t len;
+ char str[32];
+ } name;
+ uint16_t unknown[5];
+ uint32_t code_address;
+ uint32_t code_length;
+ uint32_t foo_address;
+ uint32_t foo_length;
+ uint32_t bar_address;
+ uint32_t bar_length;
+ uint16_t unknown2[3];
+} vos_module_map_entry;
+
+typedef struct
+{
+ struct {
+ uint16_t len;
+ char str[32];
+ } name;
+ uint32_t address;
+ uint16_t unknown[3];
+} vos_external_var_entry;
+
+typedef struct
+{
+ uint16_t version;
+ struct {
+ uint16_t len;
+ char str[32];
+ } binder_version;
+ struct {
+ uint16_t len;
+ char str[32];
+ } binder_options;
+ struct {
+ uint16_t len;
+ char str[32];
+ } system_name;
+ struct {
+ uint16_t len;
+ char str[65];
+ } user_name;
+ uint32_t date_bound;
+ struct {
+ uint32_t code_address;
+ uint32_t static_address;
+ } main_entry_link;
+ uint32_t user_boundary;
+ uint16_t n_modules;
+ uint16_t n_external_vars;
+ uint16_t n_link_names;
+ uint16_t n_unsnapped_links;
+ uint16_t n_vm_pages;
+ uint16_t n_header_pages;
+ struct {
+ uint32_t address;
+ uint32_t len;
+ } info[3][4];
+ uint32_t module_map_address;
+ uint32_t module_map_len;
+ uint32_t external_vars_map_address;
+ uint32_t external_vars_map_len;
+ uint32_t link_names_map_address;
+ uint32_t link_names_map_len;
+ uint32_t link_map_address;
+ uint32_t link_map_len;
+ uint32_t header_address;
+ uint32_t header_len;
+ uint8_t access_info[2048];
+ uint32_t flags;
+ uint16_t n_tasks;
+ uint32_t task_static_len[3];
+ uint32_t stack_len;
+ uint16_t n_entries;
+ uint32_t entry_map_address;
+ uint32_t entry_map_len;
+ uint16_t pop_version;
+ uint16_t processor;
+ struct {
+ uint16_t len;
+ char str[32];
+ } release_name;
+ struct {
+ uint32_t map_address;
+ uint32_t map_len;
+ uint32_t n_relocations;
+ } relocation_info;
+ uint32_t high_water_mark;
+ struct {
+ uint16_t len;
+ char str[256];
+ } copyright_notice;
+ uint32_t module_origins[14];
+ uint16_t processor_family;
+ struct {
+ uint16_t len;
+ char str[32];
+ } program_name;
+ uint32_t string_pool_address;
+ uint32_t string_pool_len;
+ uint32_t obj_dir_map_address;
+ uint32_t obj_dir_map_len;
+ uint32_t global_offset_table_address[3];
+ struct {
+ uint32_t address;
+ uint32_t len;
+ } block_map_info[3];
+ uint32_t section_map_file_address;
+ uint32_t section_map_address;
+ uint32_t section_map_len;
+ uint16_t n_sections;
+ uint32_t max_heap_size;
+ uint32_t max_program_size;
+ uint32_t max_stack_size;
+ uint32_t stack_fence_size;
+
+ vos_module_map_entry *module_map_entries;
+ vos_external_var_entry *external_vars;
+} vos_program_module;
+
+int vos_read_header(FILE * f, vos_program_module *out);
+int vos_read_alloc_module_map(FILE * f, vos_program_module *header);
+int vos_read_alloc_external_vars(FILE * f, vos_program_module *header);
+void vos_header_cleanup(vos_program_module *header);
+
+#endif //VOS_PROGRAM_MODULE_H_
diff --git a/ym2612.c b/ym2612.c
index 3e43b63..2d3404a 100644
--- a/ym2612.c
+++ b/ym2612.c
@@ -521,6 +521,7 @@ void ym_address_write_part1(ym2612_context * context, uint8_t address)
context->selected_part = 0;
context->write_cycle = context->current_cycle;
context->busy_cycles = BUSY_CYCLES_ADDRESS;
+ context->status |= 0x80;
}
void ym_address_write_part2(ym2612_context * context, uint8_t address)
@@ -530,6 +531,7 @@ void ym_address_write_part2(ym2612_context * context, uint8_t address)
context->selected_part = 1;
context->write_cycle = context->current_cycle;
context->busy_cycles = BUSY_CYCLES_ADDRESS;
+ context->status |= 0x80;
}
uint8_t fnum_to_keycode[] = {
diff --git a/z80_to_x86.c b/z80_to_x86.c
index 34263ad..fab31da 100644
--- a/z80_to_x86.c
+++ b/z80_to_x86.c
@@ -14,12 +14,6 @@
#define MODE_UNUSED (MODE_IMMED-1)
-#define ZCYCLES RBP
-#define ZLIMIT RDI
-#define SCRATCH1 R13
-#define SCRATCH2 R14
-#define CONTEXT RSI
-
//#define DO_DEBUG_PRINT
#ifdef DO_DEBUG_PRINT
@@ -28,21 +22,7 @@
#define dprintf
#endif
-extern void z80_read_byte() asm("z80_read_byte");
-extern void z80_read_word() asm("z80_read_word");
-extern void z80_write_byte() asm("z80_write_byte");
-extern void z80_write_word_highfirst() asm("z80_write_word_highfirst");
-extern void z80_write_word_lowfirst() asm("z80_write_word_lowfirst");
-extern void z80_save_context() asm("z80_save_context");
-extern void z80_native_addr() asm("z80_native_addr");
-extern void z80_do_sync() asm("z80_do_sync");
-extern void z80_handle_cycle_limit_int() asm("z80_handle_cycle_limit_int");
-extern void z80_retrans_stub() asm("z80_retrans_stub");
-extern void z80_io_read() asm("z80_io_read");
-extern void z80_io_write() asm("z80_io_write");
-extern void z80_halt() asm("z80_halt");
-extern void z80_save_context() asm("z80_save_context");
-extern void z80_load_context() asm("z80_load_context");
+uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst);
uint8_t z80_size(z80inst * inst)
{
@@ -54,24 +34,9 @@ uint8_t z80_size(z80inst * inst)
return SZ_B;
}
-uint8_t * zcycles(uint8_t * dst, uint32_t num_cycles)
-{
- return add_ir(dst, num_cycles, ZCYCLES, SZ_D);
-}
-
-uint8_t * z80_check_cycles_int(uint8_t * dst, uint16_t address)
-{
- dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D);
- uint8_t * jmp_off = dst+1;
- dst = jcc(dst, CC_NC, dst + 7);
- dst = mov_ir(dst, address, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_handle_cycle_limit_int);
- *jmp_off = dst - (jmp_off+1);
- return dst;
-}
-
-uint8_t * translate_z80_reg(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_options * opts)
+void translate_z80_reg(z80inst * inst, host_ea * ea, z80_options * opts)
{
+ code_info *code = &opts->gen.code;
if (inst->reg == Z80_USE_IMMED) {
ea->mode = MODE_IMMED;
ea->disp = inst->immed;
@@ -81,12 +46,12 @@ uint8_t * translate_z80_reg(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_
ea->mode = MODE_REG_DIRECT;
if (inst->reg == Z80_IYH) {
if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) {
- dst = mov_rr(dst, opts->regs[Z80_IY], SCRATCH1, SZ_W);
- dst = ror_ir(dst, 8, SCRATCH1, SZ_W);
- ea->base = SCRATCH1;
+ mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W);
+ ror_ir(code, 8, opts->gen.scratch1, SZ_W);
+ ea->base = opts->gen.scratch1;
} else {
ea->base = opts->regs[Z80_IYL];
- dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W);
+ ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
}
} else if(opts->regs[inst->reg] >= 0) {
ea->base = opts->regs[inst->reg];
@@ -96,142 +61,148 @@ uint8_t * translate_z80_reg(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_
if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
//we can't mix an *H reg with a register that requires the REX prefix
ea->base = opts->regs[z80_low_reg(inst->reg)];
- dst = ror_ir(dst, 8, ea->base, SZ_W);
+ ror_ir(code, 8, ea->base, SZ_W);
}
} else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) {
//temp regs require REX prefix too
ea->base = opts->regs[z80_low_reg(inst->reg)];
- dst = ror_ir(dst, 8, ea->base, SZ_W);
+ ror_ir(code, 8, ea->base, SZ_W);
}
}
} else {
ea->mode = MODE_REG_DISPLACE8;
- ea->base = CONTEXT;
+ ea->base = opts->gen.context_reg;
ea->disp = offsetof(z80_context, regs) + inst->reg;
}
}
- return dst;
}
-uint8_t * z80_save_reg(uint8_t * dst, z80inst * inst, x86_z80_options * opts)
+void z80_save_reg(z80inst * inst, z80_options * opts)
{
+ code_info *code = &opts->gen.code;
if (inst->reg == Z80_IYH) {
if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) {
- dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W);
- dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_IYL], SZ_B);
- dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W);
+ ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
+ mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B);
+ ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
} else {
- dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W);
+ ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
}
} else if (opts->regs[inst->reg] >= AH && opts->regs[inst->reg] <= BH) {
if ((inst->addr_mode & 0x1F) == Z80_REG) {
uint8_t other_reg = opts->regs[inst->ea_reg];
if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
//we can't mix an *H reg with a register that requires the REX prefix
- dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W);
+ ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W);
}
} else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) {
//temp regs require REX prefix too
- dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W);
+ ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W);
}
}
- return dst;
}
-uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_options * opts, uint8_t read, uint8_t modify)
+void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t read, uint8_t modify)
{
+ code_info *code = &opts->gen.code;
uint8_t size, reg, areg;
ea->mode = MODE_REG_DIRECT;
- areg = read ? SCRATCH1 : SCRATCH2;
+ areg = read ? opts->gen.scratch1 : opts->gen.scratch2;
switch(inst->addr_mode & 0x1F)
{
case Z80_REG:
if (inst->ea_reg == Z80_IYH) {
if (inst->reg == Z80_IYL) {
- dst = mov_rr(dst, opts->regs[Z80_IY], SCRATCH1, SZ_W);
- dst = ror_ir(dst, 8, SCRATCH1, SZ_W);
- ea->base = SCRATCH1;
+ mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W);
+ ror_ir(code, 8, opts->gen.scratch1, SZ_W);
+ ea->base = opts->gen.scratch1;
} else {
ea->base = opts->regs[Z80_IYL];
- dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W);
+ ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
}
- } else {
+ } else if(opts->regs[inst->ea_reg] >= 0) {
ea->base = opts->regs[inst->ea_reg];
if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) {
uint8_t other_reg = opts->regs[inst->reg];
+#ifdef X86_64
if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
//we can't mix an *H reg with a register that requires the REX prefix
ea->base = opts->regs[z80_low_reg(inst->ea_reg)];
- dst = ror_ir(dst, 8, ea->base, SZ_W);
+ ror_ir(code, 8, ea->base, SZ_W);
}
+#endif
}
+ } else {
+ ea->mode = MODE_REG_DISPLACE8;
+ ea->base = opts->gen.context_reg;
+ ea->disp = offsetof(z80_context, regs) + inst->ea_reg;
}
break;
case Z80_REG_INDIRECT:
- dst = mov_rr(dst, opts->regs[inst->ea_reg], areg, SZ_W);
+ mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W);
size = z80_size(inst);
if (read) {
if (modify) {
- //dst = push_r(dst, SCRATCH1);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(z80_context, scratch1), SZ_W);
+ //push_r(code, opts->gen.scratch1);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
}
if (size == SZ_B) {
- dst = call(dst, (uint8_t *)z80_read_byte);
+ call(code, opts->read_8);
} else {
- dst = call(dst, (uint8_t *)z80_read_word);
+ call(code, opts->read_16);
}
if (modify) {
- //dst = pop_r(dst, SCRATCH2);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, scratch1), SCRATCH2, SZ_W);
+ //pop_r(code, opts->gen.scratch2);
+ mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W);
}
}
- ea->base = SCRATCH1;
+ ea->base = opts->gen.scratch1;
break;
case Z80_IMMED:
ea->mode = MODE_IMMED;
ea->disp = inst->immed;
break;
case Z80_IMMED_INDIRECT:
- dst = mov_ir(dst, inst->immed, areg, SZ_W);
+ mov_ir(code, inst->immed, areg, SZ_W);
size = z80_size(inst);
if (read) {
/*if (modify) {
- dst = push_r(dst, SCRATCH1);
+ push_r(code, opts->gen.scratch1);
}*/
if (size == SZ_B) {
- dst = call(dst, (uint8_t *)z80_read_byte);
+ call(code, opts->read_8);
} else {
- dst = call(dst, (uint8_t *)z80_read_word);
+ call(code, opts->read_16);
}
if (modify) {
- //dst = pop_r(dst, SCRATCH2);
- dst = mov_ir(dst, inst->immed, SCRATCH2, SZ_W);
+ //pop_r(code, opts->gen.scratch2);
+ mov_ir(code, inst->immed, opts->gen.scratch2, SZ_W);
}
}
- ea->base = SCRATCH1;
+ ea->base = opts->gen.scratch1;
break;
case Z80_IX_DISPLACE:
case Z80_IY_DISPLACE:
reg = opts->regs[(inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY];
- dst = mov_rr(dst, reg, areg, SZ_W);
- dst = add_ir(dst, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W);
+ mov_rr(code, reg, areg, SZ_W);
+ add_ir(code, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W);
size = z80_size(inst);
if (read) {
if (modify) {
- //dst = push_r(dst, SCRATCH1);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(z80_context, scratch1), SZ_W);
+ //push_r(code, opts->gen.scratch1);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
}
if (size == SZ_B) {
- dst = call(dst, (uint8_t *)z80_read_byte);
+ call(code, opts->read_8);
} else {
- dst = call(dst, (uint8_t *)z80_read_word);
+ call(code, opts->read_16);
}
if (modify) {
- //dst = pop_r(dst, SCRATCH2);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, scratch1), SCRATCH2, SZ_W);
+ //pop_r(code, opts->gen.scratch2);
+ mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W);
}
}
- ea->base = SCRATCH1;
+ ea->base = opts->gen.scratch1;
break;
case Z80_UNUSED:
ea->mode = MODE_UNUSED;
@@ -240,32 +211,32 @@ uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_o
fprintf(stderr, "Unrecognized Z80 addressing mode %d\n", inst->addr_mode & 0x1F);
exit(1);
}
- return dst;
}
-uint8_t * z80_save_ea(uint8_t * dst, z80inst * inst, x86_z80_options * opts)
+void z80_save_ea(code_info *code, z80inst * inst, z80_options * opts)
{
if ((inst->addr_mode & 0x1F) == Z80_REG) {
if (inst->ea_reg == Z80_IYH) {
if (inst->reg == Z80_IYL) {
- dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W);
- dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_IYL], SZ_B);
- dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W);
+ ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
+ mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B);
+ ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
} else {
- dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W);
+ ror_ir(code, 8, opts->regs[Z80_IY], SZ_W);
}
} else if (inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
uint8_t other_reg = opts->regs[inst->reg];
+#ifdef X86_64
if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) {
//we can't mix an *H reg with a register that requires the REX prefix
- dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->ea_reg)], SZ_W);
+ ror_ir(code, 8, opts->regs[z80_low_reg(inst->ea_reg)], SZ_W);
}
+#endif
}
}
- return dst;
}
-uint8_t * z80_save_result(uint8_t * dst, z80inst * inst)
+void z80_save_result(z80_options *opts, z80inst * inst)
{
switch(inst->addr_mode & 0x1f)
{
@@ -274,12 +245,11 @@ uint8_t * z80_save_result(uint8_t * dst, z80inst * inst)
case Z80_IX_DISPLACE:
case Z80_IY_DISPLACE:
if (z80_size(inst) == SZ_B) {
- dst = call(dst, (uint8_t *)z80_write_byte);
+ call(&opts->gen.code, opts->write_8);
} else {
- dst = call(dst, (uint8_t *)z80_write_word_lowfirst);
+ call(&opts->gen.code, opts->write_16_lowfirst);
}
}
- return dst;
}
enum {
@@ -326,14 +296,20 @@ void z80_print_regs_exit(z80_context * context)
exit(0);
}
-uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context, uint16_t address)
+void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, uint8_t interp)
{
- uint32_t cycles;
- x86_ea src_op, dst_op;
+ uint32_t num_cycles;
+ host_ea src_op, dst_op;
uint8_t size;
- x86_z80_options *opts = context->options;
- uint8_t * start = dst;
- dst = z80_check_cycles_int(dst, address);
+ z80_options *opts = context->options;
+ uint8_t * start = opts->gen.code.cur;
+ code_info *code = &opts->gen.code;
+ if (!interp) {
+ check_cycles_int(&opts->gen, address);
+ if (context->breakpoint_flags[address / sizeof(uint8_t)] & (1 << (address % sizeof(uint8_t)))) {
+ zbreakpoint_patch(context, address, start);
+ }
+ }
switch(inst->op)
{
case Z80_LD:
@@ -342,243 +318,253 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context
{
case Z80_REG:
case Z80_REG_INDIRECT:
- cycles = size == SZ_B ? 4 : 6;
+ num_cycles = size == SZ_B ? 4 : 6;
if (inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) {
- cycles += 4;
+ num_cycles += 4;
}
if (inst->reg == Z80_I || inst->ea_reg == Z80_I) {
- cycles += 5;
+ num_cycles += 5;
}
break;
case Z80_IMMED:
- cycles = size == SZ_B ? 7 : 10;
+ num_cycles = size == SZ_B ? 7 : 10;
break;
case Z80_IMMED_INDIRECT:
- cycles = 10;
+ num_cycles = 10;
break;
case Z80_IX_DISPLACE:
case Z80_IY_DISPLACE:
- cycles = 16;
+ num_cycles = 16;
break;
}
if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
+ cycles(&opts->gen, num_cycles);
if (inst->addr_mode & Z80_DIR) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, DONT_READ, MODIFY);
- dst = translate_z80_reg(inst, &src_op, dst, opts);
+ translate_z80_ea(inst, &dst_op, opts, DONT_READ, MODIFY);
+ translate_z80_reg(inst, &src_op, opts);
} else {
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
+ translate_z80_reg(inst, &dst_op, opts);
}
if (src_op.mode == MODE_REG_DIRECT) {
if(dst_op.mode == MODE_REG_DISPLACE8) {
- dst = mov_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size);
+ mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size);
} else {
- dst = mov_rr(dst, src_op.base, dst_op.base, size);
+ mov_rr(code, src_op.base, dst_op.base, size);
}
} else if(src_op.mode == MODE_IMMED) {
- dst = mov_ir(dst, src_op.disp, dst_op.base, size);
+ mov_ir(code, src_op.disp, dst_op.base, size);
} else {
- dst = mov_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, size);
+ mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, size);
}
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
+ if (inst->ea_reg == Z80_I && inst->addr_mode == Z80_REG) {
+ //ld a, i sets some flags
+ //TODO: Implement half-carry flag
+ cmp_ir(code, 0, dst_op.base, SZ_B);
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);;
+ mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch1, SZ_B);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
+ }
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
if (inst->addr_mode & Z80_DIR) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
}
break;
case Z80_PUSH:
- dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5);
- dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W);
+ cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5);
+ sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
if (inst->reg == Z80_AF) {
- dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B);
- dst = shl_ir(dst, 8, SCRATCH1, SZ_W);
- dst = mov_rdisp8r(dst, CONTEXT, zf_off(ZF_S), SCRATCH1, SZ_B);
- dst = shl_ir(dst, 1, SCRATCH1, SZ_B);
- dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_Z), SCRATCH1, SZ_B);
- dst = shl_ir(dst, 2, SCRATCH1, SZ_B);
- dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_H), SCRATCH1, SZ_B);
- dst = shl_ir(dst, 2, SCRATCH1, SZ_B);
- dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_PV), SCRATCH1, SZ_B);
- dst = shl_ir(dst, 1, SCRATCH1, SZ_B);
- dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_N), SCRATCH1, SZ_B);
- dst = shl_ir(dst, 1, SCRATCH1, SZ_B);
- dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_C), SCRATCH1, SZ_B);
+ mov_rr(code, opts->regs[Z80_A], opts->gen.scratch1, SZ_B);
+ shl_ir(code, 8, opts->gen.scratch1, SZ_W);
+ mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B);
+ shl_ir(code, 1, opts->gen.scratch1, SZ_B);
+ or_rdispr(code, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B);
+ shl_ir(code, 2, opts->gen.scratch1, SZ_B);
+ or_rdispr(code, opts->gen.context_reg, zf_off(ZF_H), opts->gen.scratch1, SZ_B);
+ shl_ir(code, 2, opts->gen.scratch1, SZ_B);
+ or_rdispr(code, opts->gen.context_reg, zf_off(ZF_PV), opts->gen.scratch1, SZ_B);
+ shl_ir(code, 1, opts->gen.scratch1, SZ_B);
+ or_rdispr(code, opts->gen.context_reg, zf_off(ZF_N), opts->gen.scratch1, SZ_B);
+ shl_ir(code, 1, opts->gen.scratch1, SZ_B);
+ or_rdispr(code, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B);
} else {
- dst = translate_z80_reg(inst, &src_op, dst, opts);
- dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_W);
+ translate_z80_reg(inst, &src_op, opts);
+ mov_rr(code, src_op.base, opts->gen.scratch1, SZ_W);
}
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_word_highfirst);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
+ call(code, opts->write_16_highfirst);
//no call to save_z80_reg needed since there's no chance we'll use the only
//the upper half of a register pair
break;
case Z80_POP:
- dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4);
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_word);
- dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W);
+ cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_16);
+ add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
if (inst->reg == Z80_AF) {
- dst = bt_ir(dst, 0, SCRATCH1, SZ_W);
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = bt_ir(dst, 1, SCRATCH1, SZ_W);
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_N));
- dst = bt_ir(dst, 2, SCRATCH1, SZ_W);
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_PV));
- dst = bt_ir(dst, 4, SCRATCH1, SZ_W);
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_H));
- dst = bt_ir(dst, 6, SCRATCH1, SZ_W);
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_Z));
- dst = bt_ir(dst, 7, SCRATCH1, SZ_W);
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_S));
- dst = shr_ir(dst, 8, SCRATCH1, SZ_W);
- dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B);
+ bt_ir(code, 0, opts->gen.scratch1, SZ_W);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ bt_ir(code, 1, opts->gen.scratch1, SZ_W);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_N));
+ bt_ir(code, 2, opts->gen.scratch1, SZ_W);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_PV));
+ bt_ir(code, 4, opts->gen.scratch1, SZ_W);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_H));
+ bt_ir(code, 6, opts->gen.scratch1, SZ_W);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_Z));
+ bt_ir(code, 7, opts->gen.scratch1, SZ_W);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_S));
+ shr_ir(code, 8, opts->gen.scratch1, SZ_W);
+ mov_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
} else {
- dst = translate_z80_reg(inst, &src_op, dst, opts);
- dst = mov_rr(dst, SCRATCH1, src_op.base, SZ_W);
+ translate_z80_reg(inst, &src_op, opts);
+ mov_rr(code, opts->gen.scratch1, src_op.base, SZ_W);
}
//no call to save_z80_reg needed since there's no chance we'll use the only
//the upper half of a register pair
break;
case Z80_EX:
if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) {
- cycles = 4;
+ num_cycles = 4;
} else {
- cycles = 8;
+ num_cycles = 8;
}
- dst = zcycles(dst, cycles);
+ cycles(&opts->gen, num_cycles);
if (inst->addr_mode == Z80_REG) {
if(inst->reg == Z80_AF) {
- dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B);
- dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_A), opts->regs[Z80_A], SZ_B);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_A), SZ_B);
+ mov_rr(code, opts->regs[Z80_A], opts->gen.scratch1, SZ_B);
+ mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->regs[Z80_A], SZ_B);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B);
//Flags are currently word aligned, so we can move
//them efficiently a word at a time
for (int f = ZF_C; f < ZF_NUM; f+=2) {
- dst = mov_rdisp8r(dst, CONTEXT, zf_off(f), SCRATCH1, SZ_W);
- dst = mov_rdisp8r(dst, CONTEXT, zaf_off(f), SCRATCH2, SZ_W);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zaf_off(f), SZ_W);
- dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zf_off(f), SZ_W);
+ mov_rdispr(code, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W);
+ mov_rdispr(code, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W);
+ mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W);
}
} else {
- dst = xchg_rr(dst, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W);
+ xchg_rr(code, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W);
}
} else {
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_byte);
- dst = xchg_rr(dst, opts->regs[inst->reg], SCRATCH1, SZ_B);
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_byte);
- dst = zcycles(dst, 1);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_8);
+ xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
+ call(code, opts->write_8);
+ cycles(&opts->gen, 1);
uint8_t high_reg = z80_high_reg(inst->reg);
uint8_t use_reg;
//even though some of the upper halves can be used directly
//the limitations on mixing *H regs with the REX prefix
//prevent us from taking advantage of it
use_reg = opts->regs[inst->reg];
- dst = ror_ir(dst, 8, use_reg, SZ_W);
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W);
- dst = add_ir(dst, 1, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_byte);
- dst = xchg_rr(dst, use_reg, SCRATCH1, SZ_B);
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W);
- dst = add_ir(dst, 1, SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_byte);
+ ror_ir(code, 8, use_reg, SZ_W);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
+ add_ir(code, 1, opts->gen.scratch1, SZ_W);
+ call(code, opts->read_8);
+ xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
+ add_ir(code, 1, opts->gen.scratch2, SZ_W);
+ call(code, opts->write_8);
//restore reg to normal rotation
- dst = ror_ir(dst, 8, use_reg, SZ_W);
- dst = zcycles(dst, 2);
+ ror_ir(code, 8, use_reg, SZ_W);
+ cycles(&opts->gen, 2);
}
break;
case Z80_EXX:
- dst = zcycles(dst, 4);
- dst = mov_rr(dst, opts->regs[Z80_BC], SCRATCH1, SZ_W);
- dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W);
- dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W);
- dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_C), SZ_W);
- dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zar_off(Z80_L), SZ_W);
- dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH1, SZ_W);
- dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W);
- dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_E), SZ_W);
+ cycles(&opts->gen, 4);
+ mov_rr(code, opts->regs[Z80_BC], opts->gen.scratch1, SZ_W);
+ mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W);
+ mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W);
+ mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_C), SZ_W);
+ mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zar_off(Z80_L), SZ_W);
+ mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch1, SZ_W);
+ mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W);
+ mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_E), SZ_W);
break;
case Z80_LDI: {
- dst = zcycles(dst, 8);
- dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_byte);
- dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_byte);
- dst = zcycles(dst, 2);
- dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W);
- dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W);
- dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W);
+ cycles(&opts->gen, 8);
+ mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_8);
+ mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W);
+ call(code, opts->write_8);
+ cycles(&opts->gen, 2);
+ add_ir(code, 1, opts->regs[Z80_DE], SZ_W);
+ add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
+ sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
//TODO: Implement half-carry
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
- dst = setcc_rdisp8(dst, CC_NZ, CONTEXT, zf_off(ZF_PV));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
break;
}
case Z80_LDIR: {
- dst = zcycles(dst, 8);
- dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_byte);
- dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_byte);
- dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W);
- dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W);
-
- dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W);
- uint8_t * cont = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- dst = zcycles(dst, 7);
+ cycles(&opts->gen, 8);
+ mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_8);
+ mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W);
+ call(code, opts->write_8);
+ add_ir(code, 1, opts->regs[Z80_DE], SZ_W);
+ add_ir(code, 1, opts->regs[Z80_HL], SZ_W);
+
+ sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
+ uint8_t * cont = code->cur+1;
+ jcc(code, CC_Z, code->cur+2);
+ cycles(&opts->gen, 7);
//TODO: Figure out what the flag state should be here
//TODO: Figure out whether an interrupt can interrupt this
- dst = jmp(dst, start);
- *cont = dst - (cont + 1);
- dst = zcycles(dst, 2);
+ jmp(code, start);
+ *cont = code->cur - (cont + 1);
+ cycles(&opts->gen, 2);
//TODO: Implement half-carry
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
break;
}
case Z80_LDD: {
- dst = zcycles(dst, 8);
- dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_byte);
- dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_byte);
- dst = zcycles(dst, 2);
- dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W);
- dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W);
- dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W);
+ cycles(&opts->gen, 8);
+ mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_8);
+ mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W);
+ call(code, opts->write_8);
+ cycles(&opts->gen, 2);
+ sub_ir(code, 1, opts->regs[Z80_DE], SZ_W);
+ sub_ir(code, 1, opts->regs[Z80_HL], SZ_W);
+ sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
//TODO: Implement half-carry
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
- dst = setcc_rdisp8(dst, CC_NZ, CONTEXT, zf_off(ZF_PV));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV));
break;
}
case Z80_LDDR: {
- dst = zcycles(dst, 8);
- dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_byte);
- dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_byte);
- dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W);
- dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W);
-
- dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W);
- uint8_t * cont = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- dst = zcycles(dst, 7);
+ cycles(&opts->gen, 8);
+ mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_8);
+ mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W);
+ call(code, opts->write_8);
+ sub_ir(code, 1, opts->regs[Z80_DE], SZ_W);
+ sub_ir(code, 1, opts->regs[Z80_HL], SZ_W);
+
+ sub_ir(code, 1, opts->regs[Z80_BC], SZ_W);
+ uint8_t * cont = code->cur+1;
+ jcc(code, CC_Z, code->cur+2);
+ cycles(&opts->gen, 7);
//TODO: Figure out what the flag state should be here
//TODO: Figure out whether an interrupt can interrupt this
- dst = jmp(dst, start);
- *cont = dst - (cont + 1);
- dst = zcycles(dst, 2);
+ jmp(code, start);
+ *cont = code->cur - (cont + 1);
+ cycles(&opts->gen, 2);
//TODO: Implement half-carry
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
break;
}
/*case Z80_CPI:
@@ -587,622 +573,634 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context
case Z80_CPDR:
break;*/
case Z80_ADD:
- cycles = 4;
+ num_cycles = 4;
if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 12;
+ num_cycles += 12;
} else if(inst->addr_mode == Z80_IMMED) {
- cycles += 3;
+ num_cycles += 3;
} else if(z80_size(inst) == SZ_W) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
if (src_op.mode == MODE_REG_DIRECT) {
- dst = add_rr(dst, src_op.base, dst_op.base, z80_size(inst));
+ add_rr(code, src_op.base, dst_op.base, z80_size(inst));
} else {
- dst = add_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
+ add_ir(code, src_op.disp, dst_op.base, z80_size(inst));
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
if (z80_size(inst) == SZ_B) {
- dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
}
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
break;
case Z80_ADC:
- cycles = 4;
+ num_cycles = 4;
if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 12;
+ num_cycles += 12;
} else if(inst->addr_mode == Z80_IMMED) {
- cycles += 3;
+ num_cycles += 3;
} else if(z80_size(inst) == SZ_W) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
- dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
+ bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
if (src_op.mode == MODE_REG_DIRECT) {
- dst = adc_rr(dst, src_op.base, dst_op.base, z80_size(inst));
+ adc_rr(code, src_op.base, dst_op.base, z80_size(inst));
} else {
- dst = adc_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
+ adc_ir(code, src_op.disp, dst_op.base, z80_size(inst));
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
+ setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
break;
case Z80_SUB:
- cycles = 4;
+ num_cycles = 4;
if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 12;
+ num_cycles += 12;
} else if(inst->addr_mode == Z80_IMMED) {
- cycles += 3;
+ num_cycles += 3;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
if (src_op.mode == MODE_REG_DIRECT) {
- dst = sub_rr(dst, src_op.base, dst_op.base, z80_size(inst));
+ sub_rr(code, src_op.base, dst_op.base, z80_size(inst));
} else {
- dst = sub_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
+ sub_ir(code, src_op.disp, dst_op.base, z80_size(inst));
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B);
- dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
//TODO: Implement half-carry flag
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
break;
case Z80_SBC:
- cycles = 4;
+ num_cycles = 4;
if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 12;
+ num_cycles += 12;
} else if(inst->addr_mode == Z80_IMMED) {
- cycles += 3;
+ num_cycles += 3;
} else if(z80_size(inst) == SZ_W) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
- dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
+ bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
if (src_op.mode == MODE_REG_DIRECT) {
- dst = sbb_rr(dst, src_op.base, dst_op.base, z80_size(inst));
+ sbb_rr(code, src_op.base, dst_op.base, z80_size(inst));
} else {
- dst = sbb_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
+ sbb_ir(code, src_op.disp, dst_op.base, z80_size(inst));
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
+ setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
break;
case Z80_AND:
- cycles = 4;
+ num_cycles = 4;
if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 12;
+ num_cycles += 12;
} else if(inst->addr_mode == Z80_IMMED) {
- cycles += 3;
+ num_cycles += 3;
} else if(z80_size(inst) == SZ_W) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
if (src_op.mode == MODE_REG_DIRECT) {
- dst = and_rr(dst, src_op.base, dst_op.base, z80_size(inst));
+ and_rr(code, src_op.base, dst_op.base, z80_size(inst));
} else {
- dst = and_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
+ and_ir(code, src_op.disp, dst_op.base, z80_size(inst));
}
//TODO: Cleanup flags
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
if (z80_size(inst) == SZ_B) {
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
}
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
break;
case Z80_OR:
- cycles = 4;
+ num_cycles = 4;
if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 12;
+ num_cycles += 12;
} else if(inst->addr_mode == Z80_IMMED) {
- cycles += 3;
+ num_cycles += 3;
} else if(z80_size(inst) == SZ_W) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
if (src_op.mode == MODE_REG_DIRECT) {
- dst = or_rr(dst, src_op.base, dst_op.base, z80_size(inst));
+ or_rr(code, src_op.base, dst_op.base, z80_size(inst));
} else {
- dst = or_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
+ or_ir(code, src_op.disp, dst_op.base, z80_size(inst));
}
//TODO: Cleanup flags
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
if (z80_size(inst) == SZ_B) {
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
}
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
break;
case Z80_XOR:
- cycles = 4;
+ num_cycles = 4;
if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 12;
+ num_cycles += 12;
} else if(inst->addr_mode == Z80_IMMED) {
- cycles += 3;
+ num_cycles += 3;
} else if(z80_size(inst) == SZ_W) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
if (src_op.mode == MODE_REG_DIRECT) {
- dst = xor_rr(dst, src_op.base, dst_op.base, z80_size(inst));
+ xor_rr(code, src_op.base, dst_op.base, z80_size(inst));
} else {
- dst = xor_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
+ xor_ir(code, src_op.disp, dst_op.base, z80_size(inst));
}
//TODO: Cleanup flags
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
if (z80_size(inst) == SZ_B) {
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
}
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
break;
case Z80_CP:
- cycles = 4;
+ num_cycles = 4;
if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 12;
+ num_cycles += 12;
} else if(inst->addr_mode == Z80_IMMED) {
- cycles += 3;
+ num_cycles += 3;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
if (src_op.mode == MODE_REG_DIRECT) {
- dst = cmp_rr(dst, src_op.base, dst_op.base, z80_size(inst));
+ cmp_rr(code, src_op.base, dst_op.base, z80_size(inst));
} else {
- dst = cmp_ir(dst, src_op.disp, dst_op.base, z80_size(inst));
+ cmp_ir(code, src_op.disp, dst_op.base, z80_size(inst));
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B);
- dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
//TODO: Implement half-carry flag
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
break;
case Z80_INC:
- cycles = 4;
+ num_cycles = 4;
if (inst->reg == Z80_IX || inst->reg == Z80_IY) {
- cycles += 6;
+ num_cycles += 6;
} else if(z80_size(inst) == SZ_W) {
- cycles += 2;
+ num_cycles += 2;
} else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
if (dst_op.mode == MODE_UNUSED) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY);
+ translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
}
- dst = add_ir(dst, 1, dst_op.base, z80_size(inst));
+ add_ir(code, 1, dst_op.base, z80_size(inst));
if (z80_size(inst) == SZ_B) {
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
}
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
- dst = z80_save_result(dst, inst);
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
+ z80_save_result(opts, inst);
break;
case Z80_DEC:
- cycles = 4;
+ num_cycles = 4;
if (inst->reg == Z80_IX || inst->reg == Z80_IY) {
- cycles += 6;
+ num_cycles += 6;
} else if(z80_size(inst) == SZ_W) {
- cycles += 2;
+ num_cycles += 2;
} else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ cycles(&opts->gen, num_cycles);
+ translate_z80_reg(inst, &dst_op, opts);
if (dst_op.mode == MODE_UNUSED) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY);
+ translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
}
- dst = sub_ir(dst, 1, dst_op.base, z80_size(inst));
+ sub_ir(code, 1, dst_op.base, z80_size(inst));
if (z80_size(inst) == SZ_B) {
- dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B);
+ mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
}
- dst = z80_save_reg(dst, inst, opts);
- dst = z80_save_ea(dst, inst, opts);
- dst = z80_save_result(dst, inst);
+ z80_save_reg(inst, opts);
+ z80_save_ea(code, inst, opts);
+ z80_save_result(opts, inst);
break;
//case Z80_DAA:
case Z80_CPL:
- dst = zcycles(dst, 4);
- dst = not_r(dst, opts->regs[Z80_A], SZ_B);
+ cycles(&opts->gen, 4);
+ not_r(code, opts->regs[Z80_A], SZ_B);
//TODO: Implement half-carry flag
- dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B);
+ mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
break;
case Z80_NEG:
- dst = zcycles(dst, 8);
- dst = neg_r(dst, opts->regs[Z80_A], SZ_B);
+ cycles(&opts->gen, 8);
+ neg_r(code, opts->regs[Z80_A], SZ_B);
//TODO: Implement half-carry flag
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV));
- dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV));
+ mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
break;
case Z80_CCF:
- dst = zcycles(dst, 4);
- dst = xor_irdisp8(dst, 1, CONTEXT, zf_off(ZF_C), SZ_B);
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ cycles(&opts->gen, 4);
+ xor_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
break;
case Z80_SCF:
- dst = zcycles(dst, 4);
- dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_C), SZ_B);
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ cycles(&opts->gen, 4);
+ mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
break;
case Z80_NOP:
if (inst->immed == 42) {
- dst = call(dst, (uint8_t *)z80_save_context);
- dst = mov_rr(dst, CONTEXT, RDI, SZ_Q);
- dst = jmp(dst, (uint8_t *)z80_print_regs_exit);
+ call(code, opts->gen.save_context);
+ call_args(code, (code_ptr)z80_print_regs_exit, 1, opts->gen.context_reg);
} else {
- dst = zcycles(dst, 4 * inst->immed);
+ cycles(&opts->gen, 4 * inst->immed);
}
break;
- case Z80_HALT:
- dst = zcycles(dst, 4);
- dst = mov_ir(dst, address, SCRATCH1, SZ_W);
- uint8_t * call_inst = dst;
- dst = call(dst, (uint8_t *)z80_halt);
- dst = jmp(dst, call_inst);
+ case Z80_HALT: {
+ code_ptr loop_top = code->cur;
+ //this isn't terribly efficient, but it's good enough for now
+ cycles(&opts->gen, 4);
+ check_cycles_int(&opts->gen, address);
+ jmp(code, loop_top);
break;
+ }
case Z80_DI:
- dst = zcycles(dst, 4);
- dst = mov_irdisp8(dst, 0, CONTEXT, offsetof(z80_context, iff1), SZ_B);
- dst = mov_irdisp8(dst, 0, CONTEXT, offsetof(z80_context, iff2), SZ_B);
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, sync_cycle), ZLIMIT, SZ_D);
- dst = mov_irdisp8(dst, 0xFFFFFFFF, CONTEXT, offsetof(z80_context, int_cycle), SZ_D);
+ cycles(&opts->gen, 4);
+ mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
+ mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D);
+ mov_irdisp(code, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D);
break;
case Z80_EI:
- dst = zcycles(dst, 4);
- dst = mov_rrdisp32(dst, ZCYCLES, CONTEXT, offsetof(z80_context, int_enable_cycle), SZ_D);
- dst = mov_irdisp8(dst, 1, CONTEXT, offsetof(z80_context, iff1), SZ_B);
- dst = mov_irdisp8(dst, 1, CONTEXT, offsetof(z80_context, iff2), SZ_B);
+ cycles(&opts->gen, 4);
+ mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
+ mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
+ mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
//interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles
- dst = add_irdisp32(dst, 4, CONTEXT, offsetof(z80_context, int_enable_cycle), SZ_D);
- dst = call(dst, (uint8_t *)z80_do_sync);
+ add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D);
+ call(code, opts->do_sync);
break;
case Z80_IM:
- dst = zcycles(dst, 4);
- dst = mov_irdisp8(dst, inst->immed, CONTEXT, offsetof(z80_context, im), SZ_B);
+ cycles(&opts->gen, 4);
+ mov_irdisp(code, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B);
break;
case Z80_RLC:
- cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
- dst = zcycles(dst, cycles);
+ num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
+ cycles(&opts->gen, num_cycles);
if (inst->addr_mode != Z80_UNUSED) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY);
- dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register
- dst = zcycles(dst, 1);
+ translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
+ translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
+ cycles(&opts->gen, 1);
} else {
src_op.mode = MODE_UNUSED;
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_reg(inst, &dst_op, opts);
}
- dst = rol_ir(dst, 1, dst_op.base, SZ_B);
+ rol_ir(code, 1, dst_op.base, SZ_B);
if (src_op.mode != MODE_UNUSED) {
- dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B);
+ mov_rr(code, dst_op.base, src_op.base, SZ_B);
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = cmp_ir(dst, 0, dst_op.base, SZ_B);
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ if (inst->immed) {
+ //rlca does not set these flags
+ cmp_ir(code, 0, dst_op.base, SZ_B);
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ }
if (inst->addr_mode != Z80_UNUSED) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
if (src_op.mode != MODE_UNUSED) {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
} else {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
break;
case Z80_RL:
- cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
- dst = zcycles(dst, cycles);
+ num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
+ cycles(&opts->gen, num_cycles);
if (inst->addr_mode != Z80_UNUSED) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY);
- dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register
- dst = zcycles(dst, 1);
+ translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
+ translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
+ cycles(&opts->gen, 1);
} else {
src_op.mode = MODE_UNUSED;
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_reg(inst, &dst_op, opts);
}
- dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B);
- dst = rcl_ir(dst, 1, dst_op.base, SZ_B);
+ bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
+ rcl_ir(code, 1, dst_op.base, SZ_B);
if (src_op.mode != MODE_UNUSED) {
- dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B);
+ mov_rr(code, dst_op.base, src_op.base, SZ_B);
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = cmp_ir(dst, 0, dst_op.base, SZ_B);
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ if (inst->immed) {
+ //rla does not set these flags
+ cmp_ir(code, 0, dst_op.base, SZ_B);
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ }
if (inst->addr_mode != Z80_UNUSED) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
if (src_op.mode != MODE_UNUSED) {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
} else {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
break;
case Z80_RRC:
- cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
- dst = zcycles(dst, cycles);
+ num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
+ cycles(&opts->gen, num_cycles);
if (inst->addr_mode != Z80_UNUSED) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY);
- dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register
- dst = zcycles(dst, 1);
+ translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
+ translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
+ cycles(&opts->gen, 1);
} else {
src_op.mode = MODE_UNUSED;
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_reg(inst, &dst_op, opts);
}
- dst = ror_ir(dst, 1, dst_op.base, SZ_B);
+ ror_ir(code, 1, dst_op.base, SZ_B);
if (src_op.mode != MODE_UNUSED) {
- dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B);
+ mov_rr(code, dst_op.base, src_op.base, SZ_B);
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = cmp_ir(dst, 0, dst_op.base, SZ_B);
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ if (inst->immed) {
+ //rrca does not set these flags
+ cmp_ir(code, 0, dst_op.base, SZ_B);
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ }
if (inst->addr_mode != Z80_UNUSED) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
if (src_op.mode != MODE_UNUSED) {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
} else {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
break;
case Z80_RR:
- cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
- dst = zcycles(dst, cycles);
+ num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8);
+ cycles(&opts->gen, num_cycles);
if (inst->addr_mode != Z80_UNUSED) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY);
- dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register
- dst = zcycles(dst, 1);
+ translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
+ translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
+ cycles(&opts->gen, 1);
} else {
src_op.mode = MODE_UNUSED;
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_reg(inst, &dst_op, opts);
}
- dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B);
- dst = rcr_ir(dst, 1, dst_op.base, SZ_B);
+ bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
+ rcr_ir(code, 1, dst_op.base, SZ_B);
if (src_op.mode != MODE_UNUSED) {
- dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B);
+ mov_rr(code, dst_op.base, src_op.base, SZ_B);
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = cmp_ir(dst, 0, dst_op.base, SZ_B);
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ if (inst->immed) {
+ //rra does not set these flags
+ cmp_ir(code, 0, dst_op.base, SZ_B);
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+ }
if (inst->addr_mode != Z80_UNUSED) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
if (src_op.mode != MODE_UNUSED) {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
} else {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
break;
case Z80_SLA:
case Z80_SLL:
- cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
- dst = zcycles(dst, cycles);
+ num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
+ cycles(&opts->gen, num_cycles);
if (inst->addr_mode != Z80_UNUSED) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY);
- dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register
- dst = zcycles(dst, 1);
+ translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
+ translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
+ cycles(&opts->gen, 1);
} else {
src_op.mode = MODE_UNUSED;
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_reg(inst, &dst_op, opts);
}
- dst = shl_ir(dst, 1, dst_op.base, SZ_B);
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
+ shl_ir(code, 1, dst_op.base, SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
if (inst->op == Z80_SLL) {
- dst = or_ir(dst, 1, dst_op.base, SZ_B);
+ or_ir(code, 1, dst_op.base, SZ_B);
}
if (src_op.mode != MODE_UNUSED) {
- dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B);
+ mov_rr(code, dst_op.base, src_op.base, SZ_B);
}
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = cmp_ir(dst, 0, dst_op.base, SZ_B);
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ cmp_ir(code, 0, dst_op.base, SZ_B);
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
if (inst->addr_mode != Z80_UNUSED) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
if (src_op.mode != MODE_UNUSED) {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
} else {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
break;
case Z80_SRA:
- cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
- dst = zcycles(dst, cycles);
+ num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
+ cycles(&opts->gen, num_cycles);
if (inst->addr_mode != Z80_UNUSED) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY);
- dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register
- dst = zcycles(dst, 1);
+ translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
+ translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
+ cycles(&opts->gen, 1);
} else {
src_op.mode = MODE_UNUSED;
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_reg(inst, &dst_op, opts);
}
- dst = sar_ir(dst, 1, dst_op.base, SZ_B);
+ sar_ir(code, 1, dst_op.base, SZ_B);
if (src_op.mode != MODE_UNUSED) {
- dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B);
+ mov_rr(code, dst_op.base, src_op.base, SZ_B);
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = cmp_ir(dst, 0, dst_op.base, SZ_B);
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ cmp_ir(code, 0, dst_op.base, SZ_B);
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
if (inst->addr_mode != Z80_UNUSED) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
if (src_op.mode != MODE_UNUSED) {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
} else {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
break;
case Z80_SRL:
- cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
- dst = zcycles(dst, cycles);
+ num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8;
+ cycles(&opts->gen, num_cycles);
if (inst->addr_mode != Z80_UNUSED) {
- dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY);
- dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register
- dst = zcycles(dst, 1);
+ translate_z80_ea(inst, &dst_op, opts, READ, MODIFY);
+ translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register
+ cycles(&opts->gen, 1);
} else {
src_op.mode = MODE_UNUSED;
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_reg(inst, &dst_op, opts);
}
- dst = shr_ir(dst, 1, dst_op.base, SZ_B);
+ shr_ir(code, 1, dst_op.base, SZ_B);
if (src_op.mode != MODE_UNUSED) {
- dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B);
+ mov_rr(code, dst_op.base, src_op.base, SZ_B);
}
- dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
//TODO: Implement half-carry flag
- dst = cmp_ir(dst, 0, dst_op.base, SZ_B);
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ cmp_ir(code, 0, dst_op.base, SZ_B);
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
if (inst->addr_mode != Z80_UNUSED) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
if (src_op.mode != MODE_UNUSED) {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
} else {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
break;
case Z80_RLD:
- dst = zcycles(dst, 8);
- dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_byte);
+ cycles(&opts->gen, 8);
+ mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_8);
//Before: (HL) = 0x12, A = 0x34
//After: (HL) = 0x24, A = 0x31
- dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH2, SZ_B);
- dst = shl_ir(dst, 4, SCRATCH1, SZ_W);
- dst = and_ir(dst, 0xF, SCRATCH2, SZ_W);
- dst = and_ir(dst, 0xFFF, SCRATCH1, SZ_W);
- dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B);
- dst = or_rr(dst, SCRATCH2, SCRATCH1, SZ_W);
- //SCRATCH1 = 0x0124
- dst = ror_ir(dst, 8, SCRATCH1, SZ_W);
- dst = zcycles(dst, 4);
- dst = or_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B);
+ mov_rr(code, opts->regs[Z80_A], opts->gen.scratch2, SZ_B);
+ shl_ir(code, 4, opts->gen.scratch1, SZ_W);
+ and_ir(code, 0xF, opts->gen.scratch2, SZ_W);
+ and_ir(code, 0xFFF, opts->gen.scratch1, SZ_W);
+ and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B);
+ or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W);
+ //opts->gen.scratch1 = 0x0124
+ ror_ir(code, 8, opts->gen.scratch1, SZ_W);
+ cycles(&opts->gen, 4);
+ or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
//set flags
//TODO: Implement half-carry flag
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
-
- dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W);
- dst = ror_ir(dst, 8, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_byte);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+
+ mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W);
+ ror_ir(code, 8, opts->gen.scratch1, SZ_W);
+ call(code, opts->write_8);
break;
case Z80_RRD:
- dst = zcycles(dst, 8);
- dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_byte);
+ cycles(&opts->gen, 8);
+ mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_8);
//Before: (HL) = 0x12, A = 0x34
//After: (HL) = 0x41, A = 0x32
- dst = movzx_rr(dst, opts->regs[Z80_A], SCRATCH2, SZ_B, SZ_W);
- dst = ror_ir(dst, 4, SCRATCH1, SZ_W);
- dst = shl_ir(dst, 4, SCRATCH2, SZ_W);
- dst = and_ir(dst, 0xF00F, SCRATCH1, SZ_W);
- dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B);
- //SCRATCH1 = 0x2001
- //SCRATCH2 = 0x0040
- dst = or_rr(dst, SCRATCH2, SCRATCH1, SZ_W);
- //SCRATCH1 = 0x2041
- dst = ror_ir(dst, 8, SCRATCH1, SZ_W);
- dst = zcycles(dst, 4);
- dst = shr_ir(dst, 4, SCRATCH1, SZ_B);
- dst = or_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B);
+ movzx_rr(code, opts->regs[Z80_A], opts->gen.scratch2, SZ_B, SZ_W);
+ ror_ir(code, 4, opts->gen.scratch1, SZ_W);
+ shl_ir(code, 4, opts->gen.scratch2, SZ_W);
+ and_ir(code, 0xF00F, opts->gen.scratch1, SZ_W);
+ and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B);
+ //opts->gen.scratch1 = 0x2001
+ //opts->gen.scratch2 = 0x0040
+ or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W);
+ //opts->gen.scratch1 = 0x2041
+ ror_ir(code, 8, opts->gen.scratch1, SZ_W);
+ cycles(&opts->gen, 4);
+ shr_ir(code, 4, opts->gen.scratch1, SZ_B);
+ or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B);
//set flags
//TODO: Implement half-carry flag
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
- dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV));
- dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
-
- dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W);
- dst = ror_ir(dst, 8, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_byte);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
+ setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV));
+ setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
+
+ mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W);
+ ror_ir(code, 8, opts->gen.scratch1, SZ_W);
+ call(code, opts->write_8);
break;
case Z80_BIT: {
- cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
- dst = zcycles(dst, cycles);
+ num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
+ cycles(&opts->gen, num_cycles);
uint8_t bit;
if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
@@ -1211,27 +1209,27 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context
} else {
size = SZ_B;
bit = inst->immed;
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY);
+ translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY);
}
if (inst->addr_mode != Z80_REG) {
//Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4
- dst = zcycles(dst, 1);
+ cycles(&opts->gen, 1);
}
- dst = bt_ir(dst, bit, src_op.base, size);
- dst = setcc_rdisp8(dst, CC_NC, CONTEXT, zf_off(ZF_Z));
- dst = setcc_rdisp8(dst, CC_NC, CONTEXT, zf_off(ZF_PV));
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);
+ bt_ir(code, bit, src_op.base, size);
+ setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_Z));
+ setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_PV));
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);
if (inst->immed == 7) {
- dst = cmp_ir(dst, 0, src_op.base, size);
- dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S));
+ cmp_ir(code, 0, src_op.base, size);
+ setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S));
} else {
- dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B);
+ mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
}
break;
}
case Z80_SET: {
- cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
- dst = zcycles(dst, cycles);
+ num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
+ cycles(&opts->gen, num_cycles);
uint8_t bit;
if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
@@ -1240,40 +1238,44 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context
} else {
size = SZ_B;
bit = inst->immed;
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, MODIFY);
+ translate_z80_ea(inst, &src_op, opts, READ, MODIFY);
}
if (inst->reg != Z80_USE_IMMED) {
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_reg(inst, &dst_op, opts);
}
if (inst->addr_mode != Z80_REG) {
//Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4
- dst = zcycles(dst, 1);
+ cycles(&opts->gen, 1);
}
- dst = bts_ir(dst, bit, src_op.base, size);
+ bts_ir(code, bit, src_op.base, size);
if (inst->reg != Z80_USE_IMMED) {
if (size == SZ_W) {
+#ifdef X86_64
if (dst_op.base >= R8) {
- dst = ror_ir(dst, 8, src_op.base, SZ_W);
- dst = mov_rr(dst, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B);
- dst = ror_ir(dst, 8, src_op.base, SZ_W);
+ ror_ir(code, 8, src_op.base, SZ_W);
+ mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B);
+ ror_ir(code, 8, src_op.base, SZ_W);
} else {
- dst = mov_rr(dst, opts->regs[inst->ea_reg], dst_op.base, SZ_B);
+#endif
+ mov_rr(code, opts->regs[inst->ea_reg], dst_op.base, SZ_B);
+#ifdef X86_64
}
+#endif
} else {
- dst = mov_rr(dst, src_op.base, dst_op.base, SZ_B);
+ mov_rr(code, src_op.base, dst_op.base, SZ_B);
}
}
if ((inst->addr_mode & 0x1F) != Z80_REG) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
if (inst->reg != Z80_USE_IMMED) {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
}
break;
}
case Z80_RES: {
- cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
- dst = zcycles(dst, cycles);
+ num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16;
+ cycles(&opts->gen, num_cycles);
uint8_t bit;
if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) {
src_op.base = opts->regs[z80_word_reg(inst->ea_reg)];
@@ -1282,361 +1284,331 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context
} else {
size = SZ_B;
bit = inst->immed;
- dst = translate_z80_ea(inst, &src_op, dst, opts, READ, MODIFY);
+ translate_z80_ea(inst, &src_op, opts, READ, MODIFY);
}
if (inst->reg != Z80_USE_IMMED) {
- dst = translate_z80_reg(inst, &dst_op, dst, opts);
+ translate_z80_reg(inst, &dst_op, opts);
}
if (inst->addr_mode != Z80_REG) {
//Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4
- dst = zcycles(dst, 1);
+ cycles(&opts->gen, 1);
}
- dst = btr_ir(dst, bit, src_op.base, size);
+ btr_ir(code, bit, src_op.base, size);
if (inst->reg != Z80_USE_IMMED) {
if (size == SZ_W) {
+#ifdef X86_64
if (dst_op.base >= R8) {
- dst = ror_ir(dst, 8, src_op.base, SZ_W);
- dst = mov_rr(dst, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B);
- dst = ror_ir(dst, 8, src_op.base, SZ_W);
+ ror_ir(code, 8, src_op.base, SZ_W);
+ mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B);
+ ror_ir(code, 8, src_op.base, SZ_W);
} else {
- dst = mov_rr(dst, opts->regs[inst->ea_reg], dst_op.base, SZ_B);
+#endif
+ mov_rr(code, opts->regs[inst->ea_reg], dst_op.base, SZ_B);
+#ifdef X86_64
}
+#endif
} else {
- dst = mov_rr(dst, src_op.base, dst_op.base, SZ_B);
+ mov_rr(code, src_op.base, dst_op.base, SZ_B);
}
}
if (inst->addr_mode != Z80_REG) {
- dst = z80_save_result(dst, inst);
+ z80_save_result(opts, inst);
if (inst->reg != Z80_USE_IMMED) {
- dst = z80_save_reg(dst, inst, opts);
+ z80_save_reg(inst, opts);
}
}
break;
}
case Z80_JP: {
- cycles = 4;
+ num_cycles = 4;
if (inst->addr_mode != Z80_REG_INDIRECT) {
- cycles += 6;
+ num_cycles += 6;
} else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) {
- cycles += 4;
+ num_cycles += 4;
}
- dst = zcycles(dst, cycles);
- if (inst->addr_mode != Z80_REG_INDIRECT && inst->immed < 0x4000) {
- uint8_t * call_dst = z80_get_native_address(context, inst->immed);
+ cycles(&opts->gen, num_cycles);
+ if (inst->addr_mode != Z80_REG_INDIRECT) {
+ code_ptr call_dst = z80_get_native_address(context, inst->immed);
if (!call_dst) {
- opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1);
+ opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
//fake address to force large displacement
- call_dst = dst + 256;
+ call_dst = code->cur + 256;
}
- dst = jmp(dst, call_dst);
+ jmp(code, call_dst);
} else {
if (inst->addr_mode == Z80_REG_INDIRECT) {
- dst = mov_rr(dst, opts->regs[inst->ea_reg], SCRATCH1, SZ_W);
+ mov_rr(code, opts->regs[inst->ea_reg], opts->gen.scratch1, SZ_W);
} else {
- dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W);
+ mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W);
}
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
}
break;
}
case Z80_JPCC: {
- dst = zcycles(dst, 7);//T States: 4,3
+ cycles(&opts->gen, 7);//T States: 4,3
uint8_t cond = CC_Z;
switch (inst->reg)
{
case Z80_CC_NZ:
cond = CC_NZ;
case Z80_CC_Z:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
break;
case Z80_CC_NC:
cond = CC_NZ;
case Z80_CC_C:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
break;
case Z80_CC_PO:
cond = CC_NZ;
case Z80_CC_PE:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
break;
case Z80_CC_P:
cond = CC_NZ;
case Z80_CC_M:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
break;
}
- uint8_t *no_jump_off = dst+1;
- dst = jcc(dst, cond, dst+2);
- dst = zcycles(dst, 5);//T States: 5
+ uint8_t *no_jump_off = code->cur+1;
+ jcc(code, cond, code->cur+2);
+ cycles(&opts->gen, 5);//T States: 5
uint16_t dest_addr = inst->immed;
- if (dest_addr < 0x4000) {
- uint8_t * call_dst = z80_get_native_address(context, dest_addr);
+ code_ptr call_dst = z80_get_native_address(context, dest_addr);
if (!call_dst) {
- opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1);
+ opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
//fake address to force large displacement
- call_dst = dst + 256;
+ call_dst = code->cur + 256;
}
- dst = jmp(dst, call_dst);
- } else {
- dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
- *no_jump_off = dst - (no_jump_off+1);
+ jmp(code, call_dst);
+ *no_jump_off = code->cur - (no_jump_off+1);
break;
}
case Z80_JR: {
- dst = zcycles(dst, 12);//T States: 4,3,5
+ cycles(&opts->gen, 12);//T States: 4,3,5
uint16_t dest_addr = address + inst->immed + 2;
- if (dest_addr < 0x4000) {
- uint8_t * call_dst = z80_get_native_address(context, dest_addr);
+ code_ptr call_dst = z80_get_native_address(context, dest_addr);
if (!call_dst) {
- opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1);
+ opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
//fake address to force large displacement
- call_dst = dst + 256;
+ call_dst = code->cur + 256;
}
- dst = jmp(dst, call_dst);
- } else {
- dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
+ jmp(code, call_dst);
break;
}
case Z80_JRCC: {
- dst = zcycles(dst, 7);//T States: 4,3
+ cycles(&opts->gen, 7);//T States: 4,3
uint8_t cond = CC_Z;
switch (inst->reg)
{
case Z80_CC_NZ:
cond = CC_NZ;
case Z80_CC_Z:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
break;
case Z80_CC_NC:
cond = CC_NZ;
case Z80_CC_C:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
break;
}
- uint8_t *no_jump_off = dst+1;
- dst = jcc(dst, cond, dst+2);
- dst = zcycles(dst, 5);//T States: 5
+ uint8_t *no_jump_off = code->cur+1;
+ jcc(code, cond, code->cur+2);
+ cycles(&opts->gen, 5);//T States: 5
uint16_t dest_addr = address + inst->immed + 2;
- if (dest_addr < 0x4000) {
- uint8_t * call_dst = z80_get_native_address(context, dest_addr);
+ code_ptr call_dst = z80_get_native_address(context, dest_addr);
if (!call_dst) {
- opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1);
+ opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
//fake address to force large displacement
- call_dst = dst + 256;
+ call_dst = code->cur + 256;
}
- dst = jmp(dst, call_dst);
- } else {
- dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
- *no_jump_off = dst - (no_jump_off+1);
+ jmp(code, call_dst);
+ *no_jump_off = code->cur - (no_jump_off+1);
break;
}
- case Z80_DJNZ:
- dst = zcycles(dst, 8);//T States: 5,3
- dst = sub_ir(dst, 1, opts->regs[Z80_B], SZ_B);
- uint8_t *no_jump_off = dst+1;
- dst = jcc(dst, CC_Z, dst+2);
- dst = zcycles(dst, 5);//T States: 5
+ case Z80_DJNZ: {
+ cycles(&opts->gen, 8);//T States: 5,3
+ sub_ir(code, 1, opts->regs[Z80_B], SZ_B);
+ uint8_t *no_jump_off = code->cur+1;
+ jcc(code, CC_Z, code->cur+2);
+ cycles(&opts->gen, 5);//T States: 5
uint16_t dest_addr = address + inst->immed + 2;
- if (dest_addr < 0x4000) {
- uint8_t * call_dst = z80_get_native_address(context, dest_addr);
+ code_ptr call_dst = z80_get_native_address(context, dest_addr);
if (!call_dst) {
- opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1);
+ opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1);
//fake address to force large displacement
- call_dst = dst + 256;
+ call_dst = code->cur + 256;
}
- dst = jmp(dst, call_dst);
- } else {
- dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
- *no_jump_off = dst - (no_jump_off+1);
+ jmp(code, call_dst);
+ *no_jump_off = code->cur - (no_jump_off+1);
break;
+ }
case Z80_CALL: {
- dst = zcycles(dst, 11);//T States: 4,3,4
- dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W);
- dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W);
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3
- if (inst->immed < 0x4000) {
- uint8_t * call_dst = z80_get_native_address(context, inst->immed);
+ cycles(&opts->gen, 11);//T States: 4,3,4
+ sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
+ mov_ir(code, address + 3, opts->gen.scratch1, SZ_W);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
+ call(code, opts->write_16_highfirst);//T States: 3, 3
+ code_ptr call_dst = z80_get_native_address(context, inst->immed);
if (!call_dst) {
- opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1);
+ opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
//fake address to force large displacement
- call_dst = dst + 256;
+ call_dst = code->cur + 256;
}
- dst = jmp(dst, call_dst);
- } else {
- dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
+ jmp(code, call_dst);
break;
}
- case Z80_CALLCC:
- dst = zcycles(dst, 10);//T States: 4,3,3 (false case)
+ case Z80_CALLCC: {
+ cycles(&opts->gen, 10);//T States: 4,3,3 (false case)
uint8_t cond = CC_Z;
switch (inst->reg)
{
case Z80_CC_NZ:
cond = CC_NZ;
case Z80_CC_Z:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
break;
case Z80_CC_NC:
cond = CC_NZ;
case Z80_CC_C:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
break;
case Z80_CC_PO:
cond = CC_NZ;
case Z80_CC_PE:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
break;
case Z80_CC_P:
cond = CC_NZ;
case Z80_CC_M:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
break;
}
- uint8_t *no_call_off = dst+1;
- dst = jcc(dst, cond, dst+2);
- dst = zcycles(dst, 1);//Last of the above T states takes an extra cycle in the true case
- dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W);
- dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W);
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3
- if (inst->immed < 0x4000) {
- uint8_t * call_dst = z80_get_native_address(context, inst->immed);
+ uint8_t *no_call_off = code->cur+1;
+ jcc(code, cond, code->cur+2);
+ cycles(&opts->gen, 1);//Last of the above T states takes an extra cycle in the true case
+ sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
+ mov_ir(code, address + 3, opts->gen.scratch1, SZ_W);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
+ call(code, opts->write_16_highfirst);//T States: 3, 3
+ code_ptr call_dst = z80_get_native_address(context, inst->immed);
if (!call_dst) {
- opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1);
+ opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
//fake address to force large displacement
- call_dst = dst + 256;
+ call_dst = code->cur + 256;
}
- dst = jmp(dst, call_dst);
- } else {
- dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
- }
- *no_call_off = dst - (no_call_off+1);
+ jmp(code, call_dst);
+ *no_call_off = code->cur - (no_call_off+1);
break;
+ }
case Z80_RET:
- dst = zcycles(dst, 4);//T States: 4
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3
- dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
+ cycles(&opts->gen, 4);//T States: 4
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_16);//T STates: 3, 3
+ add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
break;
case Z80_RETCC: {
- dst = zcycles(dst, 5);//T States: 5
+ cycles(&opts->gen, 5);//T States: 5
uint8_t cond = CC_Z;
switch (inst->reg)
{
case Z80_CC_NZ:
cond = CC_NZ;
case Z80_CC_Z:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B);
break;
case Z80_CC_NC:
cond = CC_NZ;
case Z80_CC_C:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B);
break;
case Z80_CC_PO:
cond = CC_NZ;
case Z80_CC_PE:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B);
break;
case Z80_CC_P:
cond = CC_NZ;
case Z80_CC_M:
- dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B);
+ cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B);
break;
}
- uint8_t *no_call_off = dst+1;
- dst = jcc(dst, cond, dst+2);
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3
- dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
- *no_call_off = dst - (no_call_off+1);
+ uint8_t *no_call_off = code->cur+1;
+ jcc(code, cond, code->cur+2);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_16);//T STates: 3, 3
+ add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
+ *no_call_off = code->cur - (no_call_off+1);
break;
}
case Z80_RETI:
//For some systems, this may need a callback for signalling interrupt routine completion
- dst = zcycles(dst, 8);//T States: 4, 4
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W);
- dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3
- dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
+ cycles(&opts->gen, 8);//T States: 4, 4
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
+ call(code, opts->read_16);//T STates: 3, 3
+ add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
break;
case Z80_RETN:
- dst = zcycles(dst, 8);//T States: 4, 4
- dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, iff2), SCRATCH2, SZ_B);
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W);
- dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, offsetof(z80_context, iff1), SZ_B);
- dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3
- dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W);
- dst = call(dst, (uint8_t *)z80_native_addr);
- dst = jmp_r(dst, SCRATCH1);
+ cycles(&opts->gen, 8);//T States: 4, 4
+ mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W);
+ mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
+ call(code, opts->read_16);//T STates: 3, 3
+ add_ir(code, 2, opts->regs[Z80_SP], SZ_W);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
break;
case Z80_RST: {
//RST is basically CALL to an address in page 0
- dst = zcycles(dst, 5);//T States: 5
- dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W);
- dst = mov_ir(dst, address + 1, SCRATCH1, SZ_W);
- dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W);
- dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3
- uint8_t * call_dst = z80_get_native_address(context, inst->immed);
+ cycles(&opts->gen, 5);//T States: 5
+ sub_ir(code, 2, opts->regs[Z80_SP], SZ_W);
+ mov_ir(code, address + 1, opts->gen.scratch1, SZ_W);
+ mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W);
+ call(code, opts->write_16_highfirst);//T States: 3, 3
+ code_ptr call_dst = z80_get_native_address(context, inst->immed);
if (!call_dst) {
- opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1);
+ opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1);
//fake address to force large displacement
- call_dst = dst + 256;
+ call_dst = code->cur + 256;
}
- dst = jmp(dst, call_dst);
+ jmp(code, call_dst);
break;
}
case Z80_IN:
- dst = zcycles(dst, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4
+ cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4
if (inst->addr_mode == Z80_IMMED_INDIRECT) {
- dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_B);
+ mov_ir(code, inst->immed, opts->gen.scratch1, SZ_B);
} else {
- dst = mov_rr(dst, opts->regs[Z80_C], SCRATCH1, SZ_B);
+ mov_rr(code, opts->regs[Z80_C], opts->gen.scratch1, SZ_B);
}
- dst = call(dst, (uint8_t *)z80_io_read);
- translate_z80_reg(inst, &dst_op, dst, opts);
- dst = mov_rr(dst, SCRATCH1, dst_op.base, SZ_B);
- dst = z80_save_reg(dst, inst, opts);
+ call(code, opts->read_io);
+ translate_z80_reg(inst, &dst_op, opts);
+ mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B);
+ z80_save_reg(inst, opts);
break;
/*case Z80_INI:
case Z80_INIR:
case Z80_IND:
case Z80_INDR:*/
case Z80_OUT:
- dst = zcycles(dst, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4
+ cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4
if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) {
- dst = mov_ir(dst, inst->immed, SCRATCH2, SZ_B);
+ mov_ir(code, inst->immed, opts->gen.scratch2, SZ_B);
} else {
- dst = mov_rr(dst, opts->regs[Z80_C], SCRATCH2, SZ_B);
+ mov_rr(code, opts->regs[Z80_C], opts->gen.scratch2, SZ_B);
}
- translate_z80_reg(inst, &src_op, dst, opts);
- dst = mov_rr(dst, dst_op.base, SCRATCH1, SZ_B);
- dst = call(dst, (uint8_t *)z80_io_write);
- dst = z80_save_reg(dst, inst, opts);
+ translate_z80_reg(inst, &src_op, opts);
+ mov_rr(code, dst_op.base, opts->gen.scratch1, SZ_B);
+ call(code, opts->write_io);
+ z80_save_reg(inst, opts);
break;
/*case Z80_OUTI:
case Z80_OTIR:
@@ -1652,21 +1624,74 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context
exit(1);
}
}
- return dst;
}
+uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context)
+{
+ if (!context->interp_code[opcode]) {
+ if (opcode == 0xCB || (opcode >= 0xDD && opcode & 0xF == 0xD)) {
+ fprintf(stderr, "Encountered prefix byte %X at address %X. Z80 interpeter doesn't support those yet.", opcode, context->pc);
+ exit(1);
+ }
+ uint8_t codebuf[8];
+ memset(codebuf, 0, sizeof(codebuf));
+ codebuf[0] = opcode;
+ z80inst inst;
+ uint8_t * after = z80_decode(codebuf, &inst);
+ if (after - codebuf > 1) {
+ fprintf(stderr, "Encountered multi-byte Z80 instruction at %X. Z80 interpeter doesn't support those yet.", context->pc);
+ exit(1);
+ }
+
+ z80_options * opts = context->options;
+ code_info *code = &opts->gen.code;
+ check_alloc_code(code, ZMAX_NATIVE_SIZE);
+ context->interp_code[opcode] = code->cur;
+ translate_z80inst(&inst, context, 0, 1);
+ mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, pc), opts->gen.scratch1, SZ_W);
+ add_ir(code, after - codebuf, opts->gen.scratch1, SZ_W);
+ call(code, opts->native_addr);
+ jmp_r(code, opts->gen.scratch1);
+ }
+ return context->interp_code[opcode];
+}
+
+code_info z80_make_interp_stub(z80_context * context, uint16_t address)
+{
+ z80_options *opts = context->options;
+ code_info * code = &opts->gen.code;
+ check_alloc_code(code, 32);
+ code_info stub = {code->cur, NULL};
+ //TODO: make this play well with the breakpoint code
+ mov_ir(code, address, opts->gen.scratch1, SZ_W);
+ call(code, opts->read_8);
+ //normal opcode fetch is already factored into instruction timing
+ //back out the base 3 cycles from a read here
+ //not quite perfect, but it will have to do for now
+ cycles(&opts->gen, -3);
+ check_cycles_int(&opts->gen, address);
+ call(code, opts->gen.save_context);
+ mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W);
+ push_r(code, opts->gen.context_reg);
+ call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.scratch2);
+ mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR);
+ pop_r(code, opts->gen.context_reg);
+ call(code, opts->gen.load_context);
+ jmp_r(code, opts->gen.scratch1);
+ stub.last = code->cur;
+ return stub;
+}
+
+
uint8_t * z80_get_native_address(z80_context * context, uint32_t address)
{
native_map_slot *map;
if (address < 0x4000) {
address &= 0x1FFF;
map = context->static_code_map;
- } else if (address >= 0x8000) {
- address &= 0x7FFF;
- map = context->banked_code_map + context->bank_reg;
} else {
- //dprintf("z80_get_native_address: %X NULL\n", address);
- return NULL;
+ address -= 0x4000;
+ map = context->banked_code_map;
}
if (!map->base || !map->offsets || map->offsets[address] == INVALID_OFFSET || map->offsets[address] == EXTENSION_WORD) {
//dprintf("z80_get_native_address: %X NULL\n", address);
@@ -1676,34 +1701,34 @@ uint8_t * z80_get_native_address(z80_context * context, uint32_t address)
return map->base + map->offsets[address];
}
-uint8_t z80_get_native_inst_size(x86_z80_options * opts, uint32_t address)
+uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address)
{
+ //TODO: Fix for addresses >= 0x4000
if (address >= 0x4000) {
return 0;
}
- return opts->ram_inst_sizes[address & 0x1FFF];
+ return opts->gen.ram_inst_sizes[0][address & 0x1FFF];
}
void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size)
{
uint32_t orig_address = address;
native_map_slot *map;
- x86_z80_options * opts = context->options;
+ z80_options * opts = context->options;
if (address < 0x4000) {
address &= 0x1FFF;
map = context->static_code_map;
- opts->ram_inst_sizes[address] = native_size;
+ opts->gen.ram_inst_sizes[0][address] = native_size;
context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7);
context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7);
- } else if (address >= 0x8000) {
- address &= 0x7FFF;
- map = context->banked_code_map + context->bank_reg;
+ } else {
+ //HERE
+ address -= 0x4000;
+ map = context->banked_code_map;
if (!map->offsets) {
- map->offsets = malloc(sizeof(int32_t) * 0x8000);
- memset(map->offsets, 0xFF, sizeof(int32_t) * 0x8000);
+ map->offsets = malloc(sizeof(int32_t) * 0xC000);
+ memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000);
}
- } else {
- return;
}
if (!map->base) {
map->base = native_address;
@@ -1714,15 +1739,13 @@ void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * n
if (address < 0x4000) {
address &= 0x1FFF;
map = context->static_code_map;
- } else if (address >= 0x8000) {
- address &= 0x7FFF;
- map = context->banked_code_map + context->bank_reg;
} else {
- return;
+ address -= 0x4000;
+ map = context->banked_code_map;
}
if (!map->offsets) {
- map->offsets = malloc(sizeof(int32_t) * 0x8000);
- memset(map->offsets, 0xFF, sizeof(int32_t) * 0x8000);
+ map->offsets = malloc(sizeof(int32_t) * 0xC000);
+ memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000);
}
map->offsets[address] = EXTENSION_WORD;
}
@@ -1732,6 +1755,7 @@ void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * n
uint32_t z80_get_instruction_start(native_map_slot * static_code_map, uint32_t address)
{
+ //TODO: Fixme for address >= 0x4000
if (!static_code_map->base || address >= 0x4000) {
return INVALID_INSTRUCTION_START;
}
@@ -1750,10 +1774,12 @@ z80_context * z80_handle_code_write(uint32_t address, z80_context * context)
{
uint32_t inst_start = z80_get_instruction_start(context->static_code_map, address);
if (inst_start != INVALID_INSTRUCTION_START) {
- uint8_t * dst = z80_get_native_address(context, inst_start);
- dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", dst, inst_start, address);
- dst = mov_ir(dst, inst_start, SCRATCH1, SZ_D);
- dst = call(dst, (uint8_t *)z80_retrans_stub);
+ code_ptr dst = z80_get_native_address(context, inst_start);
+ code_info code = {dst, dst+16};
+ z80_options * opts = context->options;
+ dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code.cur, inst_start, address);
+ mov_ir(&code, inst_start, opts->gen.scratch1, SZ_D);
+ call(&code, opts->retrans_stub);
}
return context;
}
@@ -1773,10 +1799,10 @@ uint8_t * z80_get_native_address_trans(z80_context * context, uint32_t address)
void z80_handle_deferred(z80_context * context)
{
- x86_z80_options * opts = context->options;
- process_deferred(&opts->deferred, context, (native_addr_func)z80_get_native_address);
- if (opts->deferred) {
- translate_z80_stream(context, opts->deferred->address);
+ z80_options * opts = context->options;
+ process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address);
+ if (opts->gen.deferred) {
+ translate_z80_stream(context, opts->gen.deferred->address);
}
}
@@ -1784,13 +1810,10 @@ extern void * z80_retranslate_inst(uint32_t address, z80_context * context, uint
void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start)
{
char disbuf[80];
- x86_z80_options * opts = context->options;
+ z80_options * opts = context->options;
uint8_t orig_size = z80_get_native_inst_size(opts, address);
- uint32_t orig = address;
- address &= 0x1FFF;
- uint8_t * dst = opts->cur_code;
- uint8_t * dst_end = opts->code_end;
- uint8_t *after, *inst = context->mem_pointers[0] + address;
+ code_info *code = &opts->gen.code;
+ uint8_t *after, *inst = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen);
z80inst instbuf;
dprintf("Retranslating code at Z80 address %X, native address %p\n", address, orig_start);
after = z80_decode(inst, &instbuf);
@@ -1803,19 +1826,16 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o
}
#endif
if (orig_size != ZMAX_NATIVE_SIZE) {
- if (dst_end - dst < ZMAX_NATIVE_SIZE) {
- size_t size = 1024*1024;
- dst = alloc_code(&size);
- opts->code_end = dst_end = dst + size;
- opts->cur_code = dst;
- }
- deferred_addr * orig_deferred = opts->deferred;
- uint8_t * native_end = translate_z80inst(&instbuf, dst, context, address);
+ check_alloc_code(code, ZMAX_NATIVE_SIZE);
+ code_ptr start = code->cur;
+ deferred_addr * orig_deferred = opts->gen.deferred;
+ translate_z80inst(&instbuf, context, address, 0);
+ /*
if ((native_end - dst) <= orig_size) {
uint8_t * native_next = z80_get_native_address(context, address + after-inst);
if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) {
- remove_deferred_until(&opts->deferred, orig_deferred);
- native_end = translate_z80inst(&instbuf, orig_start, context, address);
+ remove_deferred_until(&opts->gen.deferred, orig_deferred);
+ native_end = translate_z80inst(&instbuf, orig_start, context, address, 0);
if (native_next == orig_start + orig_size && (native_next-native_end) < 2) {
while (native_end < orig_start + orig_size) {
*(native_end++) = 0x90; //NOP
@@ -1826,19 +1846,27 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o
z80_handle_deferred(context);
return orig_start;
}
- }
- z80_map_native_address(context, address, dst, after-inst, ZMAX_NATIVE_SIZE);
- opts->cur_code = dst+ZMAX_NATIVE_SIZE;
- jmp(orig_start, dst);
+ }*/
+ z80_map_native_address(context, address, start, after-inst, ZMAX_NATIVE_SIZE);
+ code_info tmp_code = {orig_start, orig_start + 16};
+ jmp(&tmp_code, start);
+ tmp_code = *code;
+ code->cur = start + ZMAX_NATIVE_SIZE;
if (!z80_is_terminal(&instbuf)) {
- jmp(native_end, z80_get_native_address_trans(context, address + after-inst));
+ jmp(&tmp_code, z80_get_native_address_trans(context, address + after-inst));
}
z80_handle_deferred(context);
- return dst;
+ return start;
} else {
- dst = translate_z80inst(&instbuf, orig_start, context, address);
+ code_info tmp_code = *code;
+ code->cur = orig_start;
+ code->last = orig_start + ZMAX_NATIVE_SIZE;
+ translate_z80inst(&instbuf, context, address, 0);
+ code_info tmp2 = *code;
+ *code = tmp_code;
if (!z80_is_terminal(&instbuf)) {
- dst = jmp(dst, z80_get_native_address_trans(context, address + after-inst));
+
+ jmp(&tmp2, z80_get_native_address_trans(context, address + after-inst));
}
z80_handle_deferred(context);
return orig_start;
@@ -1851,41 +1879,28 @@ void translate_z80_stream(z80_context * context, uint32_t address)
if (z80_get_native_address(context, address)) {
return;
}
- x86_z80_options * opts = context->options;
+ z80_options * opts = context->options;
uint32_t start_address = address;
- uint8_t * encoded = NULL, *next;
- if (address < 0x4000) {
- encoded = context->mem_pointers[0] + (address & 0x1FFF);
- } else if(address >= 0x8000 && context->mem_pointers[1]) {
- printf("attempt to translate Z80 code from banked area at address %X\n", address);
- exit(1);
- //encoded = context->mem_pointers[1] + (address & 0x7FFF);
- }
- while (encoded != NULL)
+
+ do
{
z80inst inst;
dprintf("translating Z80 code at address %X\n", address);
do {
- if (opts->code_end-opts->cur_code < ZMAX_NATIVE_SIZE) {
- if (opts->code_end-opts->cur_code < 5) {
- puts("out of code memory, not enough space for jmp to next chunk");
- exit(1);
- }
- size_t size = 1024*1024;
- opts->cur_code = alloc_code(&size);
- opts->code_end = opts->cur_code + size;
- jmp(opts->cur_code, opts->cur_code);
- }
- if (address > 0x4000 && address < 0x8000) {
- opts->cur_code = xor_rr(opts->cur_code, RDI, RDI, SZ_D);
- opts->cur_code = call(opts->cur_code, (uint8_t *)exit);
- break;
- }
uint8_t * existing = z80_get_native_address(context, address);
if (existing) {
- opts->cur_code = jmp(opts->cur_code, existing);
+ jmp(&opts->gen.code, existing);
+ break;
+ }
+ uint8_t * encoded, *next;
+ encoded = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen);
+ if (!encoded) {
+ code_info stub = z80_make_interp_stub(context, address);
+ z80_map_native_address(context, address, stub.cur, 1, stub.last - stub.cur);
break;
}
+ //make sure prologue is in a contiguous chunk of code
+ check_code_prologue(&opts->gen.code);
next = z80_decode(encoded, &inst);
#ifdef DO_DEBUG_PRINT
z80_disasm(&inst, disbuf, address);
@@ -1895,38 +1910,37 @@ void translate_z80_stream(z80_context * context, uint32_t address)
printf("%X\t%s\n", address, disbuf);
}
#endif
- uint8_t *after = translate_z80inst(&inst, opts->cur_code, context, address);
- z80_map_native_address(context, address, opts->cur_code, next-encoded, after - opts->cur_code);
- opts->cur_code = after;
+ code_ptr start = opts->gen.code.cur;
+ translate_z80inst(&inst, context, address, 0);
+ z80_map_native_address(context, address, start, next-encoded, opts->gen.code.cur - start);
address += next-encoded;
- if (address > 0xFFFF) {
address &= 0xFFFF;
-
- } else {
- encoded = next;
- }
} while (!z80_is_terminal(&inst));
- process_deferred(&opts->deferred, context, (native_addr_func)z80_get_native_address);
- if (opts->deferred) {
- address = opts->deferred->address;
+ process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address);
+ if (opts->gen.deferred) {
+ address = opts->gen.deferred->address;
dprintf("defferred address: %X\n", address);
- if (address < 0x4000) {
- encoded = context->mem_pointers[0] + (address & 0x1FFF);
- } else if (address > 0x8000 && context->mem_pointers[1]) {
- encoded = context->mem_pointers[1] + (address & 0x7FFF);
- } else {
- printf("attempt to translate non-memory address: %X\n", address);
- exit(1);
}
- } else {
- encoded = NULL;
- }
- }
+ } while (opts->gen.deferred);
}
-void init_x86_z80_opts(x86_z80_options * options)
+void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks, uint32_t clock_divider)
{
+ memset(options, 0, sizeof(*options));
+
+ options->gen.memmap = chunks;
+ options->gen.memmap_chunks = num_chunks;
+ options->gen.address_size = SZ_W;
+ options->gen.address_mask = 0xFFFF;
+ options->gen.max_address = 0x10000;
+ options->gen.bus_cycles = 3;
+ options->gen.clock_divider = clock_divider;
+ options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers);
+ options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags);
+ options->gen.ram_flags_shift = 7;
+
options->flags = 0;
+#ifdef X86_64
options->regs[Z80_B] = BH;
options->regs[Z80_C] = RBX;
options->regs[Z80_D] = CH;
@@ -1947,90 +1961,457 @@ void init_x86_z80_opts(x86_z80_options * options)
options->regs[Z80_AF] = -1;
options->regs[Z80_IX] = RDX;
options->regs[Z80_IY] = R8;
- size_t size = 1024 * 1024;
- options->cur_code = alloc_code(&size);
- options->code_end = options->cur_code + size;
- options->ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000);
- memset(options->ram_inst_sizes, 0, sizeof(uint8_t) * 0x2000);
- options->deferred = NULL;
+
+ options->gen.scratch1 = R13;
+ options->gen.scratch2 = R14;
+#else
+ memset(options->regs, -1, sizeof(options->regs));
+ options->regs[Z80_A] = RAX;
+ options->regx[Z80_SP] = RBX;
+
+ options->gen.scratch1 = RCX;
+ options->gen.scratch2 = RDX;
+#endif
+
+ options->gen.context_reg = RSI;
+ options->gen.cycles = RBP;
+ options->gen.limit = RDI;
+
+ options->gen.native_code_map = malloc(sizeof(native_map_slot));
+ memset(options->gen.native_code_map, 0, sizeof(native_map_slot));
+ options->gen.deferred = NULL;
+ options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000 + sizeof(uint8_t *));
+ options->gen.ram_inst_sizes[0] = (uint8_t *)(options->gen.ram_inst_sizes + 1);
+ memset(options->gen.ram_inst_sizes[0], 0, sizeof(uint8_t) * 0x2000);
+
+ code_info *code = &options->gen.code;
+ init_code_info(code);
+
+ options->save_context_scratch = code->cur;
+ mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch1), SZ_W);
+ mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_W);
+
+ options->gen.save_context = code->cur;
+ for (int i = 0; i <= Z80_A; i++)
+ {
+ int reg;
+ uint8_t size;
+ if (i < Z80_I) {
+ reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0);
+ size = SZ_W;
+ } else {
+ reg = i;
+ size = SZ_B;
+}
+ if (options->regs[reg] >= 0) {
+ mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size);
+ }
+ if (size == SZ_W) {
+ i++;
+ }
+ }
+ if (options->regs[Z80_SP] >= 0) {
+ mov_rrdisp(code, options->regs[Z80_SP], options->gen.context_reg, offsetof(z80_context, sp), SZ_W);
+ }
+ mov_rrdisp(code, options->gen.limit, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D);
+ mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D);
+ retn(code);
+
+ options->load_context_scratch = code->cur;
+ mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch1), options->gen.scratch1, SZ_W);
+ mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch2, SZ_W);
+ options->gen.load_context = code->cur;
+ for (int i = 0; i <= Z80_A; i++)
+ {
+ int reg;
+ uint8_t size;
+ if (i < Z80_I) {
+ reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0);
+ size = SZ_W;
+ } else {
+ reg = i;
+ size = SZ_B;
+ }
+ if (options->regs[reg] >= 0) {
+ mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, regs) + i, options->regs[reg], size);
+ }
+ if (size == SZ_W) {
+ i++;
+ }
+ }
+ if (options->regs[Z80_SP] >= 0) {
+ mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sp), options->regs[Z80_SP], SZ_W);
+ }
+ mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.limit, SZ_D);
+ mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D);
+ retn(code);
+
+ options->native_addr = code->cur;
+ call(code, options->gen.save_context);
+ push_r(code, options->gen.context_reg);
+ movzx_rr(code, options->gen.scratch1, options->gen.scratch1, SZ_W, SZ_D);
+ call_args(code, (code_ptr)z80_get_native_address_trans, 2, options->gen.context_reg, options->gen.scratch1);
+ mov_rr(code, RAX, options->gen.scratch1, SZ_PTR);
+ pop_r(code, options->gen.context_reg);
+ call(code, options->gen.load_context);
+ retn(code);
+
+ options->gen.handle_cycle_limit = code->cur;
+ cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
+ code_ptr no_sync = code->cur+1;
+ jcc(code, CC_B, no_sync);
+ mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, pc), SZ_W);
+ call(code, options->save_context_scratch);
+ pop_r(code, RAX); //return address in read/write func
+ pop_r(code, RBX); //return address in translated code
+ sub_ir(code, 5, RAX, SZ_PTR); //adjust return address to point to the call that got us here
+ mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
+ mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR);
+ restore_callee_save_regs(code);
+ *no_sync = code->cur - (no_sync + 1);
+ //return to caller of z80_run
+ retn(code);
+
+ options->gen.handle_code_write = (code_ptr)z80_handle_code_write;
+
+ options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, &options->read_8_noinc);
+ options->write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc);
+
+ options->gen.handle_cycle_limit_int = code->cur;
+ cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D);
+ code_ptr skip_int = code->cur+1;
+ jcc(code, CC_B, skip_int);
+ //set limit to the cycle limit
+ mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.limit, SZ_D);
+ //disable interrupts
+ mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B);
+ mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B);
+ cycles(&options->gen, 7);
+ //save return address (in scratch1) to Z80 stack
+ sub_ir(code, 2, options->regs[Z80_SP], SZ_W);
+ mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
+ //we need to do check_cycles and cycles outside of the write_8 call
+ //so that the stack has the correct depth if we need to return to C
+ //for a synchronization
+ check_cycles(&options->gen);
+ cycles(&options->gen, 3);
+ //save word to write before call to write_8_noinc
+ push_r(code, options->gen.scratch1);
+ call(code, options->write_8_noinc);
+ //restore word to write
+ pop_r(code, options->gen.scratch1);
+ //write high byte to SP+1
+ mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W);
+ add_ir(code, 1, options->gen.scratch2, SZ_W);
+ shr_ir(code, 8, options->gen.scratch1, SZ_W);
+ check_cycles(&options->gen);
+ cycles(&options->gen, 3);
+ call(code, options->write_8_noinc);
+ //dispose of return address as we'll be jumping somewhere else
+ pop_r(code, options->gen.scratch2);
+ //TODO: Support interrupt mode 0 and 2
+ mov_ir(code, 0x38, options->gen.scratch1, SZ_W);
+ call(code, options->native_addr);
+ mov_rrind(code, options->gen.scratch1, options->gen.context_reg, SZ_PTR);
+ restore_callee_save_regs(code);
+ //return to caller of z80_run to sync
+ retn(code);
+ *skip_int = code->cur - (skip_int+1);
+ cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D);
+ code_ptr skip_sync = code->cur + 1;
+ jcc(code, CC_B, skip_sync);
+ options->do_sync = code->cur;
+ call(code, options->gen.save_context);
+ pop_rind(code, options->gen.context_reg);
+ //restore callee saved registers
+ restore_callee_save_regs(code);
+ //return to caller of z80_run
+ *skip_sync = code->cur - (skip_sync+1);
+ retn(code);
+
+ options->read_io = code->cur;
+ check_cycles(&options->gen);
+ cycles(&options->gen, 4);
+ //Genesis has no IO hardware and always returns FF
+ //eventually this should use a second memory map array
+ mov_ir(code, 0xFF, options->gen.scratch1, SZ_B);
+ retn(code);
+
+ options->write_io = code->cur;
+ check_cycles(&options->gen);
+ cycles(&options->gen, 4);
+ retn(code);
+
+ options->read_16 = code->cur;
+ cycles(&options->gen, 3);
+ check_cycles(&options->gen);
+ //TODO: figure out how to handle the extra wait state for word reads to bank area
+ //may also need special handling to avoid too much stack depth when access is blocked
+ push_r(code, options->gen.scratch1);
+ call(code, options->read_8_noinc);
+ mov_rr(code, options->gen.scratch1, options->gen.scratch2, SZ_B);
+ pop_r(code, options->gen.scratch1);
+ add_ir(code, 1, options->gen.scratch1, SZ_W);
+ cycles(&options->gen, 3);
+ check_cycles(&options->gen);
+ call(code, options->read_8_noinc);
+ shl_ir(code, 8, options->gen.scratch1, SZ_W);
+ mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B);
+ retn(code);
+
+ options->write_16_highfirst = code->cur;
+ cycles(&options->gen, 3);
+ check_cycles(&options->gen);
+ push_r(code, options->gen.scratch2);
+ push_r(code, options->gen.scratch1);
+ add_ir(code, 1, options->gen.scratch2, SZ_W);
+ shr_ir(code, 8, options->gen.scratch1, SZ_W);
+ call(code, options->write_8_noinc);
+ pop_r(code, options->gen.scratch1);
+ pop_r(code, options->gen.scratch2);
+ cycles(&options->gen, 3);
+ check_cycles(&options->gen);
+ //TODO: Check if we can get away with TCO here
+ call(code, options->write_8_noinc);
+ retn(code);
+
+ options->write_16_lowfirst = code->cur;
+ cycles(&options->gen, 3);
+ check_cycles(&options->gen);
+ push_r(code, options->gen.scratch2);
+ push_r(code, options->gen.scratch1);
+ call(code, options->write_8_noinc);
+ pop_r(code, options->gen.scratch1);
+ pop_r(code, options->gen.scratch2);
+ add_ir(code, 1, options->gen.scratch2, SZ_W);
+ shr_ir(code, 8, options->gen.scratch1, SZ_W);
+ cycles(&options->gen, 3);
+ check_cycles(&options->gen);
+ //TODO: Check if we can get away with TCO here
+ call(code, options->write_8_noinc);
+ retn(code);
+
+ options->retrans_stub = code->cur;
+ //pop return address
+ pop_r(code, options->gen.scratch2);
+ call(code, options->gen.save_context);
+ //adjust pointer before move and call instructions that got us here
+ sub_ir(code, 11, options->gen.scratch2, SZ_PTR);
+ push_r(code, options->gen.context_reg);
+ call_args(code, (code_ptr)z80_retranslate_inst, 3, options->gen.scratch1, options->gen.context_reg, options->gen.scratch2);
+ pop_r(code, options->gen.context_reg);
+ mov_rr(code, RAX, options->gen.scratch1, SZ_PTR);
+ call(code, options->gen.load_context);
+ jmp_r(code, options->gen.scratch1);
+
+ options->run = (z80_run_fun)code->cur;
+ save_callee_save_regs(code);
+ mov_rr(code, RDI, options->gen.context_reg, SZ_PTR);
+ call(code, options->load_context_scratch);
+ cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
+ code_ptr no_extra = code->cur+1;
+ jcc(code, CC_Z, no_extra);
+ push_rdisp(code, options->gen.context_reg, offsetof(z80_context, extra_pc));
+ mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR);
+ *no_extra = code->cur - (no_extra + 1);
+ jmp_rind(code, options->gen.context_reg);
}
-void init_z80_context(z80_context * context, x86_z80_options * options)
+void init_z80_context(z80_context * context, z80_options * options)
{
memset(context, 0, sizeof(*context));
context->static_code_map = malloc(sizeof(*context->static_code_map));
context->static_code_map->base = NULL;
context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000);
memset(context->static_code_map->offsets, 0xFF, sizeof(int32_t) * 0x2000);
- context->banked_code_map = malloc(sizeof(native_map_slot) * (1 << 9));
- memset(context->banked_code_map, 0, sizeof(native_map_slot) * (1 << 9));
+ context->banked_code_map = malloc(sizeof(native_map_slot));
+ memset(context->banked_code_map, 0, sizeof(native_map_slot));
context->options = options;
+ context->int_cycle = CYCLE_NEVER;
+ context->int_pulse_start = CYCLE_NEVER;
+ context->int_pulse_end = CYCLE_NEVER;
}
-void z80_reset(z80_context * context)
+void z80_run(z80_context * context, uint32_t target_cycle)
{
+ if (context->reset || context->busack) {
+ context->current_cycle = target_cycle;
+ } else {
+ if (context->current_cycle < target_cycle) {
+ //busreq is sampled at the end of an m-cycle
+ //we can approximate that by running for a single m-cycle after a bus request
+ context->sync_cycle = context->busreq ? context->current_cycle + 3*context->options->gen.clock_divider : target_cycle;
+ if (!context->native_pc) {
+ context->native_pc = z80_get_native_address_trans(context, context->pc);
+ }
+ while (context->current_cycle < context->sync_cycle)
+ {
+ if (context->int_pulse_end < context->current_cycle || context->int_pulse_end == CYCLE_NEVER) {
+ z80_next_int_pulse(context);
+ }
+ if (context->iff1) {
+ context->int_cycle = context->int_pulse_start < context->int_enable_cycle ? context->int_enable_cycle : context->int_pulse_start;
+ } else {
+ context->int_cycle = CYCLE_NEVER;
+ }
+ context->target_cycle = context->sync_cycle < context->int_cycle ? context->sync_cycle : context->int_cycle;
+ dprintf("Running Z80 from cycle %d to cycle %d. Int cycle: %d (%d - %d)\n", context->current_cycle, context->sync_cycle, context->int_cycle, context->int_pulse_start, context->int_pulse_end);
+ context->options->run(context);
+ dprintf("Z80 ran to cycle %d\n", context->current_cycle);
+ }
+ if (context->busreq) {
+ context->busack = 1;
+ context->current_cycle = target_cycle;
+ }
+ }
+ }
+}
+
+void z80_assert_reset(z80_context * context, uint32_t cycle)
+{
+ z80_run(context, cycle);
+ context->reset = 1;
+}
+
+void z80_clear_reset(z80_context * context, uint32_t cycle)
+{
+ z80_run(context, cycle);
+ if (context->reset) {
+ //TODO: Handle case where reset is not asserted long enough
context->im = 0;
context->iff1 = context->iff2 = 0;
- context->native_pc = z80_get_native_address_trans(context, 0);
+ context->native_pc = NULL;
context->extra_pc = NULL;
+ context->pc = 0;
+ context->reset = 0;
+ if (context->busreq) {
+ //TODO: Figure out appropriate delay
+ context->busack = 1;
+ }
+ }
}
-void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler)
+void z80_assert_busreq(z80_context * context, uint32_t cycle)
+{
+ z80_run(context, cycle);
+ context->busreq = 1;
+ }
+
+void z80_clear_busreq(z80_context * context, uint32_t cycle)
{
- static uint8_t * bp_stub = NULL;
- uint8_t * native = z80_get_native_address_trans(context, address);
- uint8_t * start_native = native;
- native = mov_ir(native, address, SCRATCH1, SZ_W);
- if (!bp_stub) {
- x86_z80_options * opts = context->options;
- uint8_t * dst = opts->cur_code;
- uint8_t * dst_end = opts->code_end;
- if (dst_end - dst < 128) {
- size_t size = 1024*1024;
- dst = alloc_code(&size);
- opts->code_end = dst_end = dst + size;
- }
- bp_stub = dst;
- native = call(native, bp_stub);
+ z80_run(context, cycle);
+ context->busreq = 0;
+ context->busack = 0;
+}
+
+uint8_t z80_get_busack(z80_context * context, uint32_t cycle)
+{
+ z80_run(context, cycle);
+ return context->busack;
+}
+
+void z80_adjust_cycles(z80_context * context, uint32_t deduction)
+{
+ if (context->current_cycle < deduction) {
+ fprintf(stderr, "WARNING: Deduction of %u cycles when Z80 cycle counter is only %u\n", deduction, context->current_cycle);
+ context->current_cycle = 0;
+ } else {
+ context->current_cycle -= deduction;
+ }
+ if (context->int_enable_cycle != CYCLE_NEVER) {
+ if (context->int_enable_cycle < deduction) {
+ context->int_enable_cycle = 0;
+ } else {
+ context->int_enable_cycle -= deduction;
+ }
+ }
+ if (context->int_pulse_start != CYCLE_NEVER) {
+ if (context->int_pulse_end < deduction) {
+ context->int_pulse_start = context->int_pulse_end = CYCLE_NEVER;
+ } else {
+ context->int_pulse_end -= deduction;
+ if (context->int_pulse_start < deduction) {
+ context->int_pulse_start = 0;
+ } else {
+ context->int_pulse_start -= deduction;
+ }
+ }
+ }
+}
+
+uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst)
+{
+ code_info code = {dst, dst+16};
+ mov_ir(&code, address, context->options->gen.scratch1, SZ_W);
+ call(&code, context->bp_stub);
+ return code.cur-dst;
+}
+
+void zcreate_stub(z80_context * context)
+{
+ z80_options * opts = context->options;
+ code_info *code = &opts->gen.code;
+ check_code_prologue(code);
+ context->bp_stub = code->cur;
//Calculate length of prologue
- dst = z80_check_cycles_int(dst, address);
- int check_int_size = dst-bp_stub;
- dst = bp_stub;
+ check_cycles_int(&opts->gen, 0);
+ int check_int_size = code->cur-context->bp_stub;
+ code->cur = context->bp_stub;
+
+ //Calculate length of patch
+ int patch_size = zbreakpoint_patch(context, 0, code->cur);
//Save context and call breakpoint handler
- dst = call(dst, (uint8_t *)z80_save_context);
- dst = push_r(dst, SCRATCH1);
- dst = mov_rr(dst, CONTEXT, RDI, SZ_Q);
- dst = mov_rr(dst, SCRATCH1, RSI, SZ_W);
- dst = call(dst, bp_handler);
- dst = mov_rr(dst, RAX, CONTEXT, SZ_Q);
+ call(code, opts->gen.save_context);
+ push_r(code, opts->gen.scratch1);
+ call_args_abi(code, context->bp_handler, 2, opts->gen.context_reg, opts->gen.scratch1);
+ mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR);
//Restore context
- dst = call(dst, (uint8_t *)z80_load_context);
- dst = pop_r(dst, SCRATCH1);
+ call(code, opts->gen.load_context);
+ pop_r(code, opts->gen.scratch1);
//do prologue stuff
- dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D);
- uint8_t * jmp_off = dst+1;
- dst = jcc(dst, CC_NC, dst + 7);
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q);
- dst = push_r(dst, SCRATCH1);
- dst = jmp(dst, (uint8_t *)z80_handle_cycle_limit_int);
- *jmp_off = dst - (jmp_off+1);
+ cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D);
+ uint8_t * jmp_off = code->cur+1;
+ jcc(code, CC_NC, code->cur + 7);
+ pop_r(code, opts->gen.scratch1);
+ add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR);
+ push_r(code, opts->gen.scratch1);
+ jmp(code, opts->gen.handle_cycle_limit_int);
+ *jmp_off = code->cur - (jmp_off+1);
//jump back to body of translated instruction
- dst = pop_r(dst, SCRATCH1);
- dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q);
- dst = jmp_r(dst, SCRATCH1);
- opts->cur_code = dst;
- } else {
- native = call(native, bp_stub);
+ pop_r(code, opts->gen.scratch1);
+ add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR);
+ jmp_r(code, opts->gen.scratch1);
+}
+
+void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler)
+{
+ context->bp_handler = bp_handler;
+ uint8_t bit = 1 << (address % sizeof(uint8_t));
+ if (!(bit & context->breakpoint_flags[address / sizeof(uint8_t)])) {
+ context->breakpoint_flags[address / sizeof(uint8_t)] |= bit;
+ if (!context->bp_stub) {
+ zcreate_stub(context);
+ }
+ uint8_t * native = z80_get_native_address(context, address);
+ if (native) {
+ zbreakpoint_patch(context, address, native);
+ }
}
}
void zremove_breakpoint(z80_context * context, uint16_t address)
{
+ context->breakpoint_flags[address / sizeof(uint8_t)] &= ~(1 << (address % sizeof(uint8_t)));
uint8_t * native = z80_get_native_address(context, address);
- z80_check_cycles_int(native, address);
+ if (native) {
+ z80_options * opts = context->options;
+ code_info tmp_code = opts->gen.code;
+ opts->gen.code.cur = native;
+ opts->gen.code.last = native + 16;
+ check_cycles_int(&opts->gen, address);
+ opts->gen.code = tmp_code;
+}
}
-
diff --git a/z80_to_x86.h b/z80_to_x86.h
index 48cc942..fefa836 100644
--- a/z80_to_x86.h
+++ b/z80_to_x86.h
@@ -21,14 +21,29 @@ enum {
ZF_NUM
};
+typedef void (*z80_run_fun)(void * context);
+
typedef struct {
- uint8_t * cur_code;
- uint8_t * code_end;
- uint8_t *ram_inst_sizes;
- deferred_addr * deferred;
+ cpu_options gen;
+ code_ptr save_context_scratch;
+ code_ptr load_context_scratch;
+ code_ptr native_addr;
+ code_ptr retrans_stub;
+ code_ptr do_sync;
+ code_ptr read_8;
+ code_ptr write_8;
+ code_ptr read_8_noinc;
+ code_ptr write_8_noinc;
+ code_ptr read_16;
+ code_ptr write_16_highfirst;
+ code_ptr write_16_lowfirst;
+ code_ptr read_io;
+ code_ptr write_io;
+
uint32_t flags;
int8_t regs[Z80_UNUSED];
-} x86_z80_options;
+ z80_run_fun run;
+} z80_options;
typedef struct {
void * native_pc;
@@ -51,23 +66,40 @@ typedef struct {
uint32_t int_cycle;
native_map_slot * static_code_map;
native_map_slot * banked_code_map;
- void * options;
+ z80_options * options;
void * system;
uint8_t ram_code_flags[(8 * 1024)/128/8];
uint32_t int_enable_cycle;
uint16_t pc;
+ uint32_t int_pulse_start;
+ uint32_t int_pulse_end;
+ uint8_t breakpoint_flags[(16 * 1024)/sizeof(uint8_t)];
+ uint8_t * bp_handler;
+ uint8_t * bp_stub;
+ uint8_t * interp_code[256];
+ uint8_t reset;
+ uint8_t busreq;
+ uint8_t busack;
} z80_context;
void translate_z80_stream(z80_context * context, uint32_t address);
-void init_x86_z80_opts(x86_z80_options * options);
-void init_z80_context(z80_context * context, x86_z80_options * options);
-uint8_t * z80_get_native_address(z80_context * context, uint32_t address);
-extern uint8_t * z80_get_native_address_trans(z80_context * context, uint32_t address) asm("z80_get_native_address_trans");
-z80_context * z80_handle_code_write(uint32_t address, z80_context * context) asm("z80_handle_code_write");
-extern void z80_run(z80_context * context) asm("z80_run");
+void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks, uint32_t clock_divider);
+void init_z80_context(z80_context * context, z80_options * options);
+code_ptr z80_get_native_address(z80_context * context, uint32_t address);
+code_ptr z80_get_native_address_trans(z80_context * context, uint32_t address);
+z80_context * z80_handle_code_write(uint32_t address, z80_context * context);
void z80_reset(z80_context * context);
void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler);
void zremove_breakpoint(z80_context * context, uint16_t address);
+void z80_run(z80_context * context, uint32_t target_cycle);
+void z80_assert_reset(z80_context * context, uint32_t cycle);
+void z80_clear_reset(z80_context * context, uint32_t cycle);
+void z80_assert_busreq(z80_context * context, uint32_t cycle);
+void z80_clear_busreq(z80_context * context, uint32_t cycle);
+uint8_t z80_get_busack(z80_context * context, uint32_t cycle);
+void z80_adjust_cycles(z80_context * context, uint32_t deduction);
+//to be provided by system code
+void z80_next_int_pulse(z80_context * z_context);
#endif //Z80_TO_X86_H_
diff --git a/z80inst.c b/z80inst.c
index 0605ebc..cecce6e 100644
--- a/z80inst.c
+++ b/z80inst.c
@@ -1,6 +1,6 @@
/*
Copyright 2013 Michael Pavone
- This file is part of BlastEm.
+ This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#include "z80inst.h"
@@ -433,7 +433,7 @@ z80inst z80_tbl_extd[0xC0-0x40] = {
{op, Z80_L, Z80_UNUSED, Z80_UNUSED, 1},\
{op, Z80_UNUSED, Z80_REG_INDIRECT, Z80_HL, 1},\
{op, Z80_A, Z80_UNUSED, Z80_UNUSED, 1}
-
+
#define BIT_BLOCK(op, bit) \
{op, Z80_USE_IMMED, Z80_REG, Z80_B, bit},\
{op, Z80_USE_IMMED, Z80_REG, Z80_C, bit},\
@@ -771,14 +771,14 @@ z80inst z80_tbl_ix[256] = {
};
#define SHIFT_BLOCK_IX(op) \
- {op, Z80_B, Z80_IX_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_C, Z80_IX_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_D, Z80_IX_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_E, Z80_IX_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_H, Z80_IX_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_L, Z80_IX_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_UNUSED, Z80_IX_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_A, Z80_IX_DISPLACE | Z80_DIR, 0, 0}
+ {op, Z80_B, Z80_IX_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_C, Z80_IX_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_D, Z80_IX_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_E, Z80_IX_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_H, Z80_IX_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_L, Z80_IX_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_UNUSED, Z80_IX_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_A, Z80_IX_DISPLACE | Z80_DIR, 0, 1}
#define BIT_BLOCK_IX(bit) \
{Z80_BIT, Z80_USE_IMMED, Z80_IX_DISPLACE, 0, bit},\
@@ -1129,14 +1129,14 @@ z80inst z80_tbl_iy[256] = {
};
#define SHIFT_BLOCK_IY(op) \
- {op, Z80_B, Z80_IY_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_C, Z80_IY_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_D, Z80_IY_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_E, Z80_IY_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_H, Z80_IY_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_L, Z80_IY_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_UNUSED, Z80_IY_DISPLACE | Z80_DIR, 0, 0},\
- {op, Z80_A, Z80_IY_DISPLACE | Z80_DIR, 0, 0}
+ {op, Z80_B, Z80_IY_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_C, Z80_IY_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_D, Z80_IY_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_E, Z80_IY_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_H, Z80_IY_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_L, Z80_IY_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_UNUSED, Z80_IY_DISPLACE | Z80_DIR, 0, 1},\
+ {op, Z80_A, Z80_IY_DISPLACE | Z80_DIR, 0, 1}
#define BIT_BLOCK_IY(bit) \
{Z80_BIT, Z80_USE_IMMED, Z80_IY_DISPLACE, 0, bit},\
@@ -1250,7 +1250,7 @@ uint8_t * z80_decode(uint8_t * istream, z80inst * decoded)
}
} else {
memcpy(decoded, z80_tbl_a + *istream, sizeof(z80inst));
-
+
}
if ((decoded->addr_mode & 0x1F) == Z80_IMMED && decoded->op != Z80_RST && decoded->op != Z80_IM) {
decoded->immed = *(++istream);
diff --git a/zcompare.py b/zcompare.py
new file mode 100755
index 0000000..d6eacea
--- /dev/null
+++ b/zcompare.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+from glob import glob
+import subprocess
+from sys import exit,argv
+
+prefixes = []
+skip = set()
+for i in range(1, len(argv)):
+ if '.' in argv[i]:
+ f = open(argv[i])
+ for line in f:
+ parts = line.split()
+ for part in parts:
+ if part.endswith('.bin'):
+ skip.add(part)
+ f.close()
+ print 'Skipping',len(skip),'entries from previous report.'
+ else:
+ prefixes.append(argv[i])
+
+for path in glob('ztests/*/*.bin'):
+ if path in skip:
+ continue
+ if prefixes:
+ good = False
+ fname = path.split('/')[-1]
+ for prefix in prefixes:
+ if fname.startswith(prefix):
+ good = True
+ break
+ if not good:
+ continue
+ try:
+ b = subprocess.check_output(['./ztestrun', path])
+ try:
+ m = subprocess.check_output(['gxz80/gxzrun', path])
+ #_,_,b = b.partition('\n')
+ if b != m:
+ print '-----------------------------'
+ print 'Mismatch in ' + path
+ print 'blastem output:'
+ print b
+ print 'gxz80 output:'
+ print m
+ print '-----------------------------'
+ else:
+ print path, 'passed'
+ except subprocess.CalledProcessError as e:
+ print '-----------------------------'
+ print 'gxz80 exited with code', e.returncode, 'for test', path
+ print 'blastem output:'
+ print b
+ print '-----------------------------'
+ except subprocess.CalledProcessError as e:
+ print '-----------------------------'
+ print 'blastem exited with code', e.returncode, 'for test', path
+ print '-----------------------------'
+
diff --git a/ztestgen.c b/ztestgen.c
index e74c38a..bbdc22b 100644
--- a/ztestgen.c
+++ b/ztestgen.c
@@ -24,6 +24,7 @@ extern char * z80_regs[Z80_USE_IMMED];
#define PRE_IX 0xDD
#define PRE_IY 0xFD
#define LD_IR16 0x01
+#define INC_R8 0x04
#define LD_IR8 0x06
#define LD_RR8 0x40
#define AND_R 0xA0
@@ -143,6 +144,43 @@ uint8_t * and_r(uint8_t * dst, uint8_t reg)
}
}
+uint8_t * inc_r(uint8_t *dst, uint8_t reg)
+{
+ if (reg == Z80_IXH || reg == Z80_IXL) {
+ *(dst++) = PRE_IX;
+ return inc_r(dst, reg - (Z80_IXL - Z80_L));
+ } else if(reg == Z80_IYH || reg == Z80_IYL) {
+ *(dst++) = PRE_IY;
+ return inc_r(dst, reg - (Z80_IYL - Z80_L));
+ } else {
+ *(dst++) = INC_R8 | reg << 3;
+ return dst;
+ }
+}
+
+void mark_used8(uint8_t *reg_usage, uint16_t *reg_values, uint8_t reg, uint8_t init_value)
+{
+ reg_usage[reg] = 1;
+ reg_values[reg] = init_value;
+ uint8_t word_reg = z80_word_reg(reg);
+ if (word_reg != Z80_UNUSED) {
+ reg_usage[word_reg] = 1;
+ reg_values[word_reg] = (reg_values[z80_high_reg(word_reg)] << 8) | (reg_values[z80_low_reg(word_reg)] & 0xFF);
+ }
+}
+
+uint8_t alloc_reg8(uint8_t *reg_usage, uint16_t *reg_values, uint8_t init_value)
+{
+ for (uint8_t reg = 0; reg < Z80_BC; reg++)
+ {
+ if (!reg_usage[reg]) {
+ mark_used8(reg_usage, reg_values, reg, init_value);
+ return reg;
+ }
+ }
+ return Z80_UNUSED;
+}
+
void z80_gen_test(z80inst * inst, uint8_t *instbuf, uint8_t instlen)
{
z80inst copy;
@@ -184,12 +222,7 @@ void z80_gen_test(z80inst * inst, uint8_t *instbuf, uint8_t instlen)
reg_values[z80_low_reg(inst->ea_reg)] = reg_values[inst->ea_reg] & 0xFF;
reg_usage[z80_low_reg(inst->ea_reg)] = 1;
} else {
- reg_values[inst->ea_reg] = rand() % 256;
- uint8_t word_reg = z80_word_reg(inst->ea_reg);
- if (word_reg != Z80_UNUSED) {
- reg_usage[word_reg] = 1;
- reg_values[word_reg] = (reg_values[z80_high_reg(word_reg)] << 8) | (reg_values[z80_low_reg(word_reg)] & 0xFF);
- }
+ mark_used8(reg_usage, reg_values, inst->ea_reg, rand() % 256);
}
break;
case Z80_REG_INDIRECT:
@@ -255,6 +288,10 @@ void z80_gen_test(z80inst * inst, uint8_t *instbuf, uint8_t instlen)
}
reg_usage[inst->reg] = 1;
}
+ uint8_t counter_reg = Z80_UNUSED;
+ if (inst->op >= Z80_JP && inst->op <= Z80_JRCC) {
+ counter_reg = alloc_reg8(reg_usage, reg_values, 0);
+ }
puts("--------------");
for (uint8_t reg = 0; reg < Z80_UNUSED; reg++) {
if (reg_values[reg]) {
@@ -293,11 +330,26 @@ void z80_gen_test(z80inst * inst, uint8_t *instbuf, uint8_t instlen)
//setup other regs
for (uint8_t reg = Z80_BC; reg <= Z80_IY; reg++) {
- if (reg != Z80_AF && reg != Z80_SP) {
- cur = ld_ir16(cur, reg, reg_values[reg]);
+ if (reg != Z80_AF && reg != Z80_SP && (inst->op != Z80_JP || addr_mode != Z80_REG_INDIRECT || inst->ea_reg != reg)) {
+ if (i == 1 && (z80_high_reg(reg) == counter_reg || z80_low_reg(reg) == counter_reg)) {
+ if (z80_high_reg(reg) == counter_reg) {
+ if (reg_usage[z80_low_reg(reg)]) {
+ cur = ld_ir8(cur, z80_low_reg(reg), reg_values[z80_low_reg(reg)]);
+ }
+ } else if (reg_usage[z80_high_reg(reg)]) {
+ cur = ld_ir8(cur, z80_high_reg(reg), reg_values[z80_high_reg(reg)]);
+ }
+ } else {
+ cur = ld_ir16(cur, reg, reg_values[reg]);
+ }
}
}
+ if (inst->op == Z80_JP && addr_mode == Z80_REG_INDIRECT) {
+ uint16_t address = cur - prog + (inst->ea_reg == Z80_HL ? 3 : 4) + instlen + 1 + i;
+ cur = ld_ir16(cur, inst->ea_reg, address);
+ }
+
//copy instruction
if (instlen == 3) {
memcpy(cur, instbuf, 2);
@@ -310,6 +362,12 @@ void z80_gen_test(z80inst * inst, uint8_t *instbuf, uint8_t instlen)
//immed/displacement byte(s)
if (addr_mode == Z80_IX_DISPLACE || addr_mode == Z80_IY_DISPLACE) {
*(cur++) = inst->ea_reg;
+ } else if ((inst->op == Z80_JP || inst->op == Z80_JPCC) && addr_mode == Z80_IMMED) {
+ uint16_t address = cur - prog + 3 + i; //2 for immed address, 1/2 for instruction(s) to skip
+ *(cur++) = address;
+ *(cur++) = address >> 8;
+ } else if(inst->op == Z80_JR || inst->op == Z80_JRCC) {
+ *(cur++) = 1 + i; //skip one or 2 instructions based on value of i
} else if (addr_mode == Z80_IMMED & inst->op != Z80_IM) {
*(cur++) = inst->immed & 0xFF;
if (word_sized) {
@@ -325,6 +383,13 @@ void z80_gen_test(z80inst * inst, uint8_t *instbuf, uint8_t instlen)
if (instlen == 3) {
*(cur++) = instbuf[2];
}
+ if (inst->op >= Z80_JP && inst->op <= Z80_JRCC) {
+ cur = inc_r(cur, counter_reg);
+ if (i) {
+ //inc twice on second iteration so we can differentiate the two
+ cur = inc_r(cur, counter_reg);
+ }
+ }
if (!i) {
//Save AF from first run
cur = push(cur, Z80_AF);
@@ -399,7 +464,7 @@ void z80_gen_test(z80inst * inst, uint8_t *instbuf, uint8_t instlen)
uint8_t should_skip(z80inst * inst)
{
- return inst->op >= Z80_JP || (inst->op >= Z80_LDI && inst->op <= Z80_CPDR) || inst->op == Z80_HALT
+ return inst->op >= Z80_DJNZ || (inst->op >= Z80_LDI && inst->op <= Z80_CPDR) || inst->op == Z80_HALT
|| inst->op == Z80_DAA || inst->op == Z80_RLD || inst->op == Z80_RRD || inst->op == Z80_NOP
|| inst->op == Z80_DI || inst->op == Z80_EI;
}
diff --git a/ztestrun.c b/ztestrun.c
index 9f500f1..0e262d3 100644
--- a/ztestrun.c
+++ b/ztestrun.c
@@ -1,6 +1,6 @@
/*
Copyright 2013 Michael Pavone
- This file is part of BlastEm.
+ This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#include "z80inst.h"
@@ -9,92 +9,101 @@
#include "vdp.h"
#include <stdio.h>
#include <stdlib.h>
+#include <stddef.h>
uint8_t z80_ram[0x2000];
-uint16_t cart[0x200000];
-#define MCLKS_PER_Z80 15
-//TODO: Figure out the exact value for this
-#define MCLKS_PER_FRAME (MCLKS_LINE*262)
-#define VINT_CYCLE ((MCLKS_LINE * 226)/MCLKS_PER_Z80)
-#define CYCLE_NEVER 0xFFFFFFFF
-
-uint8_t z80_read_ym(uint16_t location, z80_context * context)
+uint8_t z80_unmapped_read(uint32_t location, void * context)
{
return 0xFF;
}
-z80_context * z80_write_ym(uint16_t location, z80_context * context, uint8_t value)
+void * z80_unmapped_write(uint32_t location, void * context, uint8_t value)
{
return context;
}
-z80_context * z80_vdp_port_write(uint16_t location, z80_context * context, uint8_t value)
+const memmap_chunk z80_map[] = {
+ { 0x0000, 0x4000, 0x1FFF, 0, MMAP_READ | MMAP_WRITE | MMAP_CODE, z80_ram, NULL, NULL, NULL, NULL },
+ { 0x4000, 0x10000, 0xFFFF, 0, 0, NULL, NULL, NULL, z80_unmapped_read, z80_unmapped_write}
+};
+
+void z80_next_int_pulse(z80_context * context)
{
- return context;
+ context->int_pulse_start = context->int_pulse_end = CYCLE_NEVER;
}
int main(int argc, char ** argv)
{
long filesize;
uint8_t *filebuf;
- x86_z80_options opts;
+ z80_options opts;
z80_context context;
- if (argc < 2) {
- fputs("usage: transz80 zrom [cartrom]\n", stderr);
+ char *fname = NULL;
+ uint8_t retranslate = 0;
+ for (int i = 1; i < argc; i++)
+ {
+ if (argv[i][0] == '-') {
+ switch(argv[i][1])
+ {
+ case 'r':
+ retranslate = 1;
+ break;
+ default:
+ fprintf(stderr, "Unrecognized switch -%c\n", argv[i][1]);
+ exit(1);
+ }
+ } else if (!fname) {
+ fname = argv[i];
+ }
+ }
+ if (!fname) {
+ fputs("usage: ztestrun zrom [cartrom]\n", stderr);
exit(1);
}
- FILE * f = fopen(argv[1], "rb");
+ FILE * f = fopen(fname, "rb");
if (!f) {
- fprintf(stderr, "unable to open file %s\n", argv[2]);
+ fprintf(stderr, "unable to open file %s\n", fname);
exit(1);
}
fseek(f, 0, SEEK_END);
filesize = ftell(f);
fseek(f, 0, SEEK_SET);
- fread(z80_ram, 1, filesize < sizeof(z80_ram) ? filesize : sizeof(z80_ram), f);
- fclose(f);
- if (argc > 2) {
- f = fopen(argv[2], "rb");
- if (!f) {
- fprintf(stderr, "unable to open file %s\n", argv[2]);
- exit(1);
- }
- fseek(f, 0, SEEK_END);
- filesize = ftell(f);
- fseek(f, 0, SEEK_SET);
- fread(cart, 1, filesize < sizeof(cart) ? filesize : sizeof(cart), f);
- fclose(f);
- for(unsigned short * cur = cart; cur - cart < (filesize/2); ++cur)
- {
- *cur = (*cur >> 8) | (*cur << 8);
- }
+ filesize = filesize < sizeof(z80_ram) ? filesize : sizeof(z80_ram);
+ if (fread(z80_ram, 1, filesize, f) != filesize) {
+ fprintf(stderr, "error reading %s\n",fname);
+ exit(1);
}
- init_x86_z80_opts(&opts);
+ fclose(f);
+ init_z80_opts(&opts, z80_map, 2, 1);
init_z80_context(&context, &opts);
//Z80 RAM
context.mem_pointers[0] = z80_ram;
- context.sync_cycle = context.target_cycle = 1000;
- context.int_cycle = CYCLE_NEVER;
- //cartridge/bank
- context.mem_pointers[1] = context.mem_pointers[2] = (uint8_t *)cart;
- z80_reset(&context);
- while (context.current_cycle < 1000) {
- z80_run(&context);
+ if (retranslate) {
+ //run core long enough to translate code
+ z80_run(&context, 1);
+ for (int i = 0; i < filesize; i++)
+ {
+ z80_handle_code_write(i, &context);
+ }
+ z80_assert_reset(&context, context.current_cycle);
+ z80_clear_reset(&context, context.current_cycle + 3);
+ z80_adjust_cycles(&context, context.current_cycle);
}
- printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n",
+ z80_run(&context, 1000);
+ printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n",
context.regs[Z80_A], context.regs[Z80_B], context.regs[Z80_C],
- context.regs[Z80_D], context.regs[Z80_E],
- (context.regs[Z80_H] << 8) | context.regs[Z80_L],
- (context.regs[Z80_IXH] << 8) | context.regs[Z80_IXL],
- (context.regs[Z80_IYH] << 8) | context.regs[Z80_IYL],
+ context.regs[Z80_D], context.regs[Z80_E],
+ (context.regs[Z80_H] << 8) | context.regs[Z80_L],
+ (context.regs[Z80_IXH] << 8) | context.regs[Z80_IXL],
+ (context.regs[Z80_IYH] << 8) | context.regs[Z80_IYL],
context.sp, context.im, context.iff1, context.iff2);
printf("Flags: SZVNC\n"
" %d%d%d%d%d\n", context.flags[ZF_S], context.flags[ZF_Z], context.flags[ZF_PV], context.flags[ZF_N], context.flags[ZF_C]);
puts("--Alternate Regs--");
- printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\n",
+ printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\n",
context.alt_regs[Z80_A], context.alt_regs[Z80_B], context.alt_regs[Z80_C],
- context.alt_regs[Z80_D], context.alt_regs[Z80_E],
+ context.alt_regs[Z80_D], context.alt_regs[Z80_E],
(context.alt_regs[Z80_H] << 8) | context.alt_regs[Z80_L]);
return 0;
}