From a39a82cc9773c8336eaa48379f2d2fe3615f178c Mon Sep 17 00:00:00 2001 From: Mike Pavone Date: Tue, 17 Jun 2014 01:50:29 -0400 Subject: blastem builds and almost works on OS X now --- z80_to_x86.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 2cab1c9..34263ad 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -28,21 +28,21 @@ #define dprintf #endif -void z80_read_byte(); -void z80_read_word(); -void z80_write_byte(); -void z80_write_word_highfirst(); -void z80_write_word_lowfirst(); -void z80_save_context(); -void z80_native_addr(); -void z80_do_sync(); -void z80_handle_cycle_limit_int(); -void z80_retrans_stub(); -void z80_io_read(); -void z80_io_write(); -void z80_halt(); -void z80_save_context(); -void z80_load_context(); +extern void z80_read_byte() asm("z80_read_byte"); +extern void z80_read_word() asm("z80_read_word"); +extern void z80_write_byte() asm("z80_write_byte"); +extern void z80_write_word_highfirst() asm("z80_write_word_highfirst"); +extern void z80_write_word_lowfirst() asm("z80_write_word_lowfirst"); +extern void z80_save_context() asm("z80_save_context"); +extern void z80_native_addr() asm("z80_native_addr"); +extern void z80_do_sync() asm("z80_do_sync"); +extern void z80_handle_cycle_limit_int() asm("z80_handle_cycle_limit_int"); +extern void z80_retrans_stub() asm("z80_retrans_stub"); +extern void z80_io_read() asm("z80_io_read"); +extern void z80_io_write() asm("z80_io_write"); +extern void z80_halt() asm("z80_halt"); +extern void z80_save_context() asm("z80_save_context"); +extern void z80_load_context() asm("z80_load_context"); uint8_t z80_size(z80inst * inst) { @@ -1780,6 +1780,7 @@ void z80_handle_deferred(z80_context * context) } } +extern void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) asm("z80_retranslate_inst"); void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) { char disbuf[80]; -- cgit v1.2.3 From e8e8075b59f9e4a6a8e9b1cc96f2fcb447fa508e Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Thu, 19 Jun 2014 08:14:35 -0700 Subject: Slight cleanup of vint handling on the Z80 --- z80_to_x86.c | 1 + 1 file changed, 1 insertion(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 2cab1c9..ea07214 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1964,6 +1964,7 @@ void init_z80_context(z80_context * context, x86_z80_options * options) context->banked_code_map = malloc(sizeof(native_map_slot) * (1 << 9)); memset(context->banked_code_map, 0, sizeof(native_map_slot) * (1 << 9)); context->options = options; + context->int_cycle = 0xFFFFFFFF; } void z80_reset(z80_context * context) -- cgit v1.2.3 From a14794522213f99ddef00aa049785ff806d4b1dd Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Thu, 19 Jun 2014 19:50:16 -0700 Subject: Properly handle Z80 breakpoints on self-modifying code and setting Z80 breakpoints before the Z80 program has been loaded --- z80_to_x86.c | 115 +++++++++++++++++++++++++++++++++++------------------------ 1 file changed, 69 insertions(+), 46 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index ea07214..584d6e1 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -44,6 +44,8 @@ void z80_halt(); void z80_save_context(); void z80_load_context(); +uint8_t * zbreakpoint_patch(z80_context * context, uint16_t address, uint8_t * native); + uint8_t z80_size(z80inst * inst) { uint8_t reg = (inst->reg & 0x1F); @@ -334,6 +336,9 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context x86_z80_options *opts = context->options; uint8_t * start = dst; dst = z80_check_cycles_int(dst, address); + if (context->breakpoint_flags[address / sizeof(uint8_t)] & (1 << (address % sizeof(uint8_t)))) { + zbreakpoint_patch(context, address, start); + } switch(inst->op) { case Z80_LD: @@ -1975,62 +1980,80 @@ void z80_reset(z80_context * context) context->extra_pc = NULL; } -void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) +uint8_t * zbreakpoint_patch(z80_context * context, uint16_t address, uint8_t * native) { - static uint8_t * bp_stub = NULL; - uint8_t * native = z80_get_native_address_trans(context, address); - uint8_t * start_native = native; native = mov_ir(native, address, SCRATCH1, SZ_W); - if (!bp_stub) { - x86_z80_options * opts = context->options; - uint8_t * dst = opts->cur_code; - uint8_t * dst_end = opts->code_end; - if (dst_end - dst < 128) { - size_t size = 1024*1024; - dst = alloc_code(&size); - opts->code_end = dst_end = dst + size; - } - bp_stub = dst; - native = call(native, bp_stub); + native = call(native, context->bp_stub); + return native; +} - //Calculate length of prologue - dst = z80_check_cycles_int(dst, address); - int check_int_size = dst-bp_stub; - dst = bp_stub; +void zcreate_stub(z80_context * context) +{ + x86_z80_options * opts = context->options; + uint8_t * dst = opts->cur_code; + uint8_t * dst_end = opts->code_end; + if (dst_end - dst < 128) { + size_t size = 1024*1024; + dst = alloc_code(&size); + opts->code_end = dst_end = dst + size; + } + context->bp_stub = dst; - //Save context and call breakpoint handler - dst = call(dst, (uint8_t *)z80_save_context); - dst = push_r(dst, SCRATCH1); - dst = mov_rr(dst, CONTEXT, RDI, SZ_Q); - dst = mov_rr(dst, SCRATCH1, RSI, SZ_W); - dst = call(dst, bp_handler); - dst = mov_rr(dst, RAX, CONTEXT, SZ_Q); - //Restore context - dst = call(dst, (uint8_t *)z80_load_context); - dst = pop_r(dst, SCRATCH1); - //do prologue stuff - dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D); - uint8_t * jmp_off = dst+1; - dst = jcc(dst, CC_NC, dst + 7); - dst = pop_r(dst, SCRATCH1); - dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q); - dst = push_r(dst, SCRATCH1); - dst = jmp(dst, (uint8_t *)z80_handle_cycle_limit_int); - *jmp_off = dst - (jmp_off+1); - //jump back to body of translated instruction - dst = pop_r(dst, SCRATCH1); - dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q); - dst = jmp_r(dst, SCRATCH1); - opts->cur_code = dst; - } else { - native = call(native, bp_stub); + //Calculate length of prologue + int check_int_size = z80_check_cycles_int(dst, 0) - dst; + + //Calculate length of patch + int patch_size = zbreakpoint_patch(context, 0, dst) - dst; + + //Save context and call breakpoint handler + dst = call(dst, (uint8_t *)z80_save_context); + dst = push_r(dst, SCRATCH1); + dst = mov_rr(dst, CONTEXT, RDI, SZ_Q); + dst = mov_rr(dst, SCRATCH1, RSI, SZ_W); + dst = call(dst, context->bp_handler); + dst = mov_rr(dst, RAX, CONTEXT, SZ_Q); + //Restore context + dst = call(dst, (uint8_t *)z80_load_context); + dst = pop_r(dst, SCRATCH1); + //do prologue stuff + dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D); + uint8_t * jmp_off = dst+1; + dst = jcc(dst, CC_NC, dst + 7); + dst = pop_r(dst, SCRATCH1); + dst = add_ir(dst, check_int_size - patch_size, SCRATCH1, SZ_Q); + dst = push_r(dst, SCRATCH1); + dst = jmp(dst, (uint8_t *)z80_handle_cycle_limit_int); + *jmp_off = dst - (jmp_off+1); + //jump back to body of translated instruction + dst = pop_r(dst, SCRATCH1); + dst = add_ir(dst, check_int_size - patch_size, SCRATCH1, SZ_Q); + dst = jmp_r(dst, SCRATCH1); + opts->cur_code = dst; +} + +void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) +{ + context->bp_handler = bp_handler; + uint8_t bit = 1 << (address % sizeof(uint8_t)); + if (!(bit & context->breakpoint_flags[address / sizeof(uint8_t)])) { + context->breakpoint_flags[address / sizeof(uint8_t)] |= bit; + if (!context->bp_stub) { + zcreate_stub(context); + } + uint8_t * native = z80_get_native_address(context, address); + if (native) { + zbreakpoint_patch(context, address, native); + } } } void zremove_breakpoint(z80_context * context, uint16_t address) { + context->breakpoint_flags[address / sizeof(uint8_t)] &= 1 << (address % sizeof(uint8_t)); uint8_t * native = z80_get_native_address(context, address); - z80_check_cycles_int(native, address); + if (native) { + z80_check_cycles_int(native, address); + } } -- cgit v1.2.3 From 4c06b02583a69756a7cdf6561c0760087eb642ec Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Fri, 20 Jun 2014 07:57:32 -0700 Subject: Added some preliminary support for interpreting Z80 code from non-RAM addresses --- z80_to_x86.c | 132 +++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 92 insertions(+), 40 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 584d6e1..a714991 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -328,16 +328,18 @@ void z80_print_regs_exit(z80_context * context) exit(0); } -uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context, uint16_t address) +uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context, uint16_t address, uint8_t interp) { uint32_t cycles; x86_ea src_op, dst_op; uint8_t size; x86_z80_options *opts = context->options; uint8_t * start = dst; - dst = z80_check_cycles_int(dst, address); - if (context->breakpoint_flags[address / sizeof(uint8_t)] & (1 << (address % sizeof(uint8_t)))) { - zbreakpoint_patch(context, address, start); + if (!interp) { + dst = z80_check_cycles_int(dst, address); + if (context->breakpoint_flags[address / sizeof(uint8_t)] & (1 << (address % sizeof(uint8_t)))) { + zbreakpoint_patch(context, address, start); + } } switch(inst->op) { @@ -1660,18 +1662,73 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context return dst; } +uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context) +{ + if (!context->interp_code[opcode]) { + if (opcode == 0xCB || (opcode >= 0xDD && opcode & 0xF == 0xD)) { + fprintf(stderr, "Encountered prefix byte %X at address %X. Z80 interpeter doesn't support those yet.", opcode, context->pc); + exit(1); + } + uint8_t codebuf[8]; + memset(codebuf, 0, sizeof(codebuf)); + codebuf[0] = opcode; + z80inst inst; + uint8_t * after = z80_decode(codebuf, &inst); + if (after - codebuf > 1) { + fprintf(stderr, "Encountered multi-byte Z80 instruction at %X. Z80 interpeter doesn't support those yet.", context->pc); + exit(1); + } + x86_z80_options * opts = context->options; + if (opts->code_end - opts->cur_code < ZMAX_NATIVE_SIZE) { + size_t size = 1024*1024; + opts->cur_code = alloc_code(&size); + opts->code_end = opts->cur_code + size; + } + context->interp_code[opcode] = opts->cur_code; + opts->cur_code = translate_z80inst(&inst, opts->cur_code, context, 0, 1); + opts->cur_code = mov_rdisp8r(opts->cur_code, CONTEXT, offsetof(z80_context, pc), SCRATCH1, SZ_W); + opts->cur_code = add_ir(opts->cur_code, after - codebuf, SCRATCH1, SZ_W); + opts->cur_code = call(opts->cur_code, (uint8_t *)z80_native_addr); + opts->cur_code = jmp_r(opts->cur_code, SCRATCH1); + } + return context->interp_code[opcode]; +} + +uint8_t * z80_make_interp_stub(z80_context * context, uint16_t address) +{ + x86_z80_options *opts = context->options; + uint8_t *dst = opts->cur_code; + //TODO: make this play well with the breakpoint code + dst = mov_ir(dst, address, SCRATCH1, SZ_W); + dst = call(dst, (uint8_t *)z80_read_byte); + //normal opcode fetch is already factored into instruction timing + //back out the base 3 cycles from a read here + //not quite perfect, but it will have to do for now + dst = sub_ir(dst, 3, ZCYCLES, SZ_D); + dst = z80_check_cycles_int(dst, address); + dst = call(dst, (uint8_t *)z80_save_context); + dst = mov_rr(dst, SCRATCH1, RDI, SZ_B); + dst = mov_irdisp8(dst, address, CONTEXT, offsetof(z80_context, pc), SZ_W); + dst = push_r(dst, CONTEXT); + dst = call(dst, (uint8_t *)z80_interp_handler); + dst = mov_rr(dst, RAX, SCRATCH1, SZ_Q); + dst = pop_r(dst, CONTEXT); + dst = call(dst, (uint8_t *)z80_load_context); + dst = jmp_r(dst, SCRATCH1); + opts->code_end = dst; + return dst; +} + + uint8_t * z80_get_native_address(z80_context * context, uint32_t address) { native_map_slot *map; if (address < 0x4000) { address &= 0x1FFF; map = context->static_code_map; - } else if (address >= 0x8000) { - address &= 0x7FFF; - map = context->banked_code_map + context->bank_reg; } else { - //dprintf("z80_get_native_address: %X NULL\n", address); - return NULL; + address -= 0x4000; + map = context->banked_code_map; } if (!map->base || !map->offsets || map->offsets[address] == INVALID_OFFSET || map->offsets[address] == EXTENSION_WORD) { //dprintf("z80_get_native_address: %X NULL\n", address); @@ -1683,6 +1740,7 @@ uint8_t * z80_get_native_address(z80_context * context, uint32_t address) uint8_t z80_get_native_inst_size(x86_z80_options * opts, uint32_t address) { + //TODO: Fix for addresses >= 0x4000 if (address >= 0x4000) { return 0; } @@ -1700,15 +1758,14 @@ void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * n opts->ram_inst_sizes[address] = native_size; context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7); context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7); - } else if (address >= 0x8000) { - address &= 0x7FFF; - map = context->banked_code_map + context->bank_reg; + } else { + //HERE + address -= 0x4000; + map = context->banked_code_map; if (!map->offsets) { - map->offsets = malloc(sizeof(int32_t) * 0x8000); - memset(map->offsets, 0xFF, sizeof(int32_t) * 0x8000); + map->offsets = malloc(sizeof(int32_t) * 0xC000); + memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000); } - } else { - return; } if (!map->base) { map->base = native_address; @@ -1719,15 +1776,13 @@ void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * n if (address < 0x4000) { address &= 0x1FFF; map = context->static_code_map; - } else if (address >= 0x8000) { - address &= 0x7FFF; - map = context->banked_code_map + context->bank_reg; } else { - return; + address -= 0x4000; + map = context->banked_code_map; } if (!map->offsets) { - map->offsets = malloc(sizeof(int32_t) * 0x8000); - memset(map->offsets, 0xFF, sizeof(int32_t) * 0x8000); + map->offsets = malloc(sizeof(int32_t) * 0xC000); + memset(map->offsets, 0xFF, sizeof(int32_t) * 0xC000); } map->offsets[address] = EXTENSION_WORD; } @@ -1737,6 +1792,7 @@ void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * n uint32_t z80_get_instruction_start(native_map_slot * static_code_map, uint32_t address) { + //TODO: Fixme for address >= 0x4000 if (!static_code_map->base || address >= 0x4000) { return INVALID_INSTRUCTION_START; } @@ -1814,12 +1870,12 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o opts->cur_code = dst; } deferred_addr * orig_deferred = opts->deferred; - uint8_t * native_end = translate_z80inst(&instbuf, dst, context, address); + uint8_t * native_end = translate_z80inst(&instbuf, dst, context, address, 0); if ((native_end - dst) <= orig_size) { uint8_t * native_next = z80_get_native_address(context, address + after-inst); if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) { remove_deferred_until(&opts->deferred, orig_deferred); - native_end = translate_z80inst(&instbuf, orig_start, context, address); + native_end = translate_z80inst(&instbuf, orig_start, context, address, 0); if (native_next == orig_start + orig_size && (native_next-native_end) < 2) { while (native_end < orig_start + orig_size) { *(native_end++) = 0x90; //NOP @@ -1840,7 +1896,7 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o z80_handle_deferred(context); return dst; } else { - dst = translate_z80inst(&instbuf, orig_start, context, address); + dst = translate_z80inst(&instbuf, orig_start, context, address, 0); if (!z80_is_terminal(&instbuf)) { dst = jmp(dst, z80_get_native_address_trans(context, address + after-inst)); } @@ -1860,12 +1916,9 @@ void translate_z80_stream(z80_context * context, uint32_t address) uint8_t * encoded = NULL, *next; if (address < 0x4000) { encoded = context->mem_pointers[0] + (address & 0x1FFF); - } else if(address >= 0x8000 && context->mem_pointers[1]) { - printf("attempt to translate Z80 code from banked area at address %X\n", address); - exit(1); - //encoded = context->mem_pointers[1] + (address & 0x7FFF); } - while (encoded != NULL) + + while (encoded != NULL || address >= 0x4000) { z80inst inst; dprintf("translating Z80 code at address %X\n", address); @@ -1880,9 +1933,10 @@ void translate_z80_stream(z80_context * context, uint32_t address) opts->code_end = opts->cur_code + size; jmp(opts->cur_code, opts->cur_code); } - if (address > 0x4000 && address < 0x8000) { - opts->cur_code = xor_rr(opts->cur_code, RDI, RDI, SZ_D); - opts->cur_code = call(opts->cur_code, (uint8_t *)exit); + if (address >= 0x4000) { + uint8_t *native_start = opts->cur_code; + uint8_t *after = z80_make_interp_stub(context, address); + z80_map_native_address(context, address, opts->cur_code, 1, after - native_start); break; } uint8_t * existing = z80_get_native_address(context, address); @@ -1899,7 +1953,7 @@ void translate_z80_stream(z80_context * context, uint32_t address) printf("%X\t%s\n", address, disbuf); } #endif - uint8_t *after = translate_z80inst(&inst, opts->cur_code, context, address); + uint8_t *after = translate_z80inst(&inst, opts->cur_code, context, address, 0); z80_map_native_address(context, address, opts->cur_code, next-encoded, after - opts->cur_code); opts->cur_code = after; address += next-encoded; @@ -1916,14 +1970,12 @@ void translate_z80_stream(z80_context * context, uint32_t address) dprintf("defferred address: %X\n", address); if (address < 0x4000) { encoded = context->mem_pointers[0] + (address & 0x1FFF); - } else if (address > 0x8000 && context->mem_pointers[1]) { - encoded = context->mem_pointers[1] + (address & 0x7FFF); } else { - printf("attempt to translate non-memory address: %X\n", address); - exit(1); + encoded = NULL; } } else { encoded = NULL; + address = 0; } } } @@ -1966,8 +2018,8 @@ void init_z80_context(z80_context * context, x86_z80_options * options) context->static_code_map->base = NULL; context->static_code_map->offsets = malloc(sizeof(int32_t) * 0x2000); memset(context->static_code_map->offsets, 0xFF, sizeof(int32_t) * 0x2000); - context->banked_code_map = malloc(sizeof(native_map_slot) * (1 << 9)); - memset(context->banked_code_map, 0, sizeof(native_map_slot) * (1 << 9)); + context->banked_code_map = malloc(sizeof(native_map_slot)); + memset(context->banked_code_map, 0, sizeof(native_map_slot)); context->options = options; context->int_cycle = 0xFFFFFFFF; } -- cgit v1.2.3 From 24d56647b3b2baa4e7016ac5d66c0167d51688be Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sat, 21 Jun 2014 09:36:15 -0700 Subject: Fix Z80 interrupts --- z80_to_x86.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index a714991..c46bed8 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -2022,6 +2022,8 @@ void init_z80_context(z80_context * context, x86_z80_options * options) memset(context->banked_code_map, 0, sizeof(native_map_slot)); context->options = options; context->int_cycle = 0xFFFFFFFF; + context->int_pulse_start = 0xFFFFFFFF; + context->int_pulse_end = 0xFFFFFFFF; } void z80_reset(z80_context * context) -- cgit v1.2.3 From 5b1f68b82cda5b444fa2168d9b24b7101ba4b434 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Wed, 3 Dec 2014 09:30:01 -0800 Subject: Temporarily comment out code to translate Z80 instructions in place as in rare cases it can stomp the next instruction if a branch goes from a short from to a long one --- z80_to_x86.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index c46bed8..dc6cfaf 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1871,6 +1871,7 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o } deferred_addr * orig_deferred = opts->deferred; uint8_t * native_end = translate_z80inst(&instbuf, dst, context, address, 0); + /* if ((native_end - dst) <= orig_size) { uint8_t * native_next = z80_get_native_address(context, address + after-inst); if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) { @@ -1887,6 +1888,7 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o return orig_start; } } + */ z80_map_native_address(context, address, dst, after-inst, ZMAX_NATIVE_SIZE); opts->cur_code = dst+ZMAX_NATIVE_SIZE; jmp(orig_start, dst); -- cgit v1.2.3 From d07b907bc7889308890b590d2aaf88dfc44ae616 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sun, 14 Dec 2014 16:45:23 -0800 Subject: WIP effort to update z80 core for code gen changes --- z80_to_x86.c | 997 +++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 561 insertions(+), 436 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 2cab1c9..a331308 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -54,24 +54,9 @@ uint8_t z80_size(z80inst * inst) return SZ_B; } -uint8_t * zcycles(uint8_t * dst, uint32_t num_cycles) -{ - return add_ir(dst, num_cycles, ZCYCLES, SZ_D); -} - -uint8_t * z80_check_cycles_int(uint8_t * dst, uint16_t address) -{ - dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D); - uint8_t * jmp_off = dst+1; - dst = jcc(dst, CC_NC, dst + 7); - dst = mov_ir(dst, address, SCRATCH1, SZ_W); - dst = call(dst, (uint8_t *)z80_handle_cycle_limit_int); - *jmp_off = dst - (jmp_off+1); - return dst; -} - -uint8_t * translate_z80_reg(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_options * opts) +void translate_z80_reg(z80inst * inst, x86_ea * ea, z80_options * opts) { + code_info *code = &opts->gen.code; if (inst->reg == Z80_USE_IMMED) { ea->mode = MODE_IMMED; ea->disp = inst->immed; @@ -81,12 +66,12 @@ uint8_t * translate_z80_reg(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_ ea->mode = MODE_REG_DIRECT; if (inst->reg == Z80_IYH) { if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { - dst = mov_rr(dst, opts->regs[Z80_IY], SCRATCH1, SZ_W); - dst = ror_ir(dst, 8, SCRATCH1, SZ_W); - ea->base = SCRATCH1; + mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); + ror_ir(code, 8, opts->gen.scratch1, SZ_W); + ea->base = opts->gen.scratch1; } else { ea->base = opts->regs[Z80_IYL]; - dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); + ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); } } else if(opts->regs[inst->reg] >= 0) { ea->base = opts->regs[inst->reg]; @@ -96,64 +81,64 @@ uint8_t * translate_z80_reg(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_ if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { //we can't mix an *H reg with a register that requires the REX prefix ea->base = opts->regs[z80_low_reg(inst->reg)]; - dst = ror_ir(dst, 8, ea->base, SZ_W); + ror_ir(code, 8, ea->base, SZ_W); } } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { //temp regs require REX prefix too ea->base = opts->regs[z80_low_reg(inst->reg)]; - dst = ror_ir(dst, 8, ea->base, SZ_W); + ror_ir(code, 8, ea->base, SZ_W); } } } else { ea->mode = MODE_REG_DISPLACE8; - ea->base = CONTEXT; + ea->base = opts->gen.context_reg; ea->disp = offsetof(z80_context, regs) + inst->reg; } } - return dst; } -uint8_t * z80_save_reg(uint8_t * dst, z80inst * inst, x86_z80_options * opts) +void z80_save_reg(z80inst * inst, z80_options * opts) { + code_info *code = &opts->gen.code; if (inst->reg == Z80_IYH) { if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { - dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); - dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_IYL], SZ_B); - dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); + ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); + mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); + ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); } else { - dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); + ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); } } else if (opts->regs[inst->reg] >= AH && opts->regs[inst->reg] <= BH) { if ((inst->addr_mode & 0x1F) == Z80_REG) { uint8_t other_reg = opts->regs[inst->ea_reg]; if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { //we can't mix an *H reg with a register that requires the REX prefix - dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); + ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); } } else if((inst->addr_mode & 0x1F) != Z80_UNUSED && (inst->addr_mode & 0x1F) != Z80_IMMED) { //temp regs require REX prefix too - dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); + ror_ir(code, 8, opts->regs[z80_low_reg(inst->reg)], SZ_W); } } - return dst; } -uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_options * opts, uint8_t read, uint8_t modify) +void translate_z80_ea(z80inst * inst, x86_ea * ea, z80_options * opts, uint8_t read, uint8_t modify) { + code_info *code = &opts->gen.code; uint8_t size, reg, areg; ea->mode = MODE_REG_DIRECT; - areg = read ? SCRATCH1 : SCRATCH2; + areg = read ? opts->gen.scratch1 : opts->gen.scratch2; switch(inst->addr_mode & 0x1F) { case Z80_REG: if (inst->ea_reg == Z80_IYH) { if (inst->reg == Z80_IYL) { - dst = mov_rr(dst, opts->regs[Z80_IY], SCRATCH1, SZ_W); - dst = ror_ir(dst, 8, SCRATCH1, SZ_W); - ea->base = SCRATCH1; + mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); + ror_ir(code, 8, opts->gen.scratch1, SZ_W); + ea->base = opts->gen.scratch1; } else { ea->base = opts->regs[Z80_IYL]; - dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); + ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); } } else { ea->base = opts->regs[inst->ea_reg]; @@ -162,30 +147,30 @@ uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_o if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { //we can't mix an *H reg with a register that requires the REX prefix ea->base = opts->regs[z80_low_reg(inst->ea_reg)]; - dst = ror_ir(dst, 8, ea->base, SZ_W); + ror_ir(code, 8, ea->base, SZ_W); } } } break; case Z80_REG_INDIRECT: - dst = mov_rr(dst, opts->regs[inst->ea_reg], areg, SZ_W); + mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W); size = z80_size(inst); if (read) { if (modify) { - //dst = push_r(dst, SCRATCH1); - dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(z80_context, scratch1), SZ_W); + //push_r(code, opts->gen.scratch1); + mov_rrdisp8(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); } if (size == SZ_B) { - dst = call(dst, (uint8_t *)z80_read_byte); + call(code, opts->read_8); } else { - dst = call(dst, (uint8_t *)z80_read_word); + dst = call(dst, opts->read_16); } if (modify) { - //dst = pop_r(dst, SCRATCH2); - dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, scratch1), SCRATCH2, SZ_W); + //pop_r(code, opts->gen.scratch2); + mov_rdisp8r(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); } } - ea->base = SCRATCH1; + ea->base = opts->gen.scratch1; break; case Z80_IMMED: ea->mode = MODE_IMMED; @@ -196,7 +181,7 @@ uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_o size = z80_size(inst); if (read) { /*if (modify) { - dst = push_r(dst, SCRATCH1); + dst = push_r(dst, opts->gen.scratch1); }*/ if (size == SZ_B) { dst = call(dst, (uint8_t *)z80_read_byte); @@ -204,11 +189,11 @@ uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_o dst = call(dst, (uint8_t *)z80_read_word); } if (modify) { - //dst = pop_r(dst, SCRATCH2); - dst = mov_ir(dst, inst->immed, SCRATCH2, SZ_W); + //dst = pop_r(dst, opts->gen.scratch2); + dst = mov_ir(dst, inst->immed, opts->gen.scratch2, SZ_W); } } - ea->base = SCRATCH1; + ea->base = opts->gen.scratch1; break; case Z80_IX_DISPLACE: case Z80_IY_DISPLACE: @@ -218,8 +203,8 @@ uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_o size = z80_size(inst); if (read) { if (modify) { - //dst = push_r(dst, SCRATCH1); - dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, offsetof(z80_context, scratch1), SZ_W); + //dst = push_r(dst, opts->gen.scratch1); + dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); } if (size == SZ_B) { dst = call(dst, (uint8_t *)z80_read_byte); @@ -227,11 +212,11 @@ uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_o dst = call(dst, (uint8_t *)z80_read_word); } if (modify) { - //dst = pop_r(dst, SCRATCH2); - dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, scratch1), SCRATCH2, SZ_W); + //dst = pop_r(dst, opts->gen.scratch2); + dst = mov_rdisp8r(dst, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); } } - ea->base = SCRATCH1; + ea->base = opts->gen.scratch1; break; case Z80_UNUSED: ea->mode = MODE_UNUSED; @@ -243,13 +228,13 @@ uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_o return dst; } -uint8_t * z80_save_ea(uint8_t * dst, z80inst * inst, x86_z80_options * opts) +uint8_t * z80_save_ea(uint8_t * dst, z80inst * inst, z80_options * opts) { if ((inst->addr_mode & 0x1F) == Z80_REG) { if (inst->ea_reg == Z80_IYH) { if (inst->reg == Z80_IYL) { dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); - dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_IYL], SZ_B); + dst = mov_rr(dst, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); } else { dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); @@ -326,14 +311,14 @@ void z80_print_regs_exit(z80_context * context) exit(0); } -uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context, uint16_t address) +uint8_t * translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) { uint32_t cycles; x86_ea src_op, dst_op; uint8_t size; - x86_z80_options *opts = context->options; - uint8_t * start = dst; - dst = z80_check_cycles_int(dst, address); + z80_options *opts = context->options; + uint8_t * start = opts->code.cur; + check_cycles_int(&opts->gen, address); switch(inst->op) { case Z80_LD: @@ -364,7 +349,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode & Z80_DIR) { dst = translate_z80_ea(inst, &dst_op, dst, opts, DONT_READ, MODIFY); dst = translate_z80_reg(inst, &src_op, dst, opts); @@ -390,55 +375,55 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } break; case Z80_PUSH: - dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); + dst = cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); if (inst->reg == Z80_AF) { - dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B); - dst = shl_ir(dst, 8, SCRATCH1, SZ_W); - dst = mov_rdisp8r(dst, CONTEXT, zf_off(ZF_S), SCRATCH1, SZ_B); - dst = shl_ir(dst, 1, SCRATCH1, SZ_B); - dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_Z), SCRATCH1, SZ_B); - dst = shl_ir(dst, 2, SCRATCH1, SZ_B); - dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_H), SCRATCH1, SZ_B); - dst = shl_ir(dst, 2, SCRATCH1, SZ_B); - dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_PV), SCRATCH1, SZ_B); - dst = shl_ir(dst, 1, SCRATCH1, SZ_B); - dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_N), SCRATCH1, SZ_B); - dst = shl_ir(dst, 1, SCRATCH1, SZ_B); - dst = or_rdisp8r(dst, CONTEXT, zf_off(ZF_C), SCRATCH1, SZ_B); + dst = mov_rr(dst, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); + dst = shl_ir(dst, 8, opts->gen.scratch1, SZ_W); + dst = mov_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B); + dst = shl_ir(dst, 1, opts->gen.scratch1, SZ_B); + dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B); + dst = shl_ir(dst, 2, opts->gen.scratch1, SZ_B); + dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_H), opts->gen.scratch1, SZ_B); + dst = shl_ir(dst, 2, opts->gen.scratch1, SZ_B); + dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_PV), opts->gen.scratch1, SZ_B); + dst = shl_ir(dst, 1, opts->gen.scratch1, SZ_B); + dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_N), opts->gen.scratch1, SZ_B); + dst = shl_ir(dst, 1, opts->gen.scratch1, SZ_B); + dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B); } else { dst = translate_z80_reg(inst, &src_op, dst, opts); - dst = mov_rr(dst, src_op.base, SCRATCH1, SZ_W); + dst = mov_rr(dst, src_op.base, opts->gen.scratch1, SZ_W); } - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_word_highfirst); //no call to save_z80_reg needed since there's no chance we'll use the only //the upper half of a register pair break; case Z80_POP: - dst = zcycles(dst, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_word); dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); if (inst->reg == Z80_AF) { - dst = bt_ir(dst, 0, SCRATCH1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = bt_ir(dst, 1, SCRATCH1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_N)); - dst = bt_ir(dst, 2, SCRATCH1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_PV)); - dst = bt_ir(dst, 4, SCRATCH1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_H)); - dst = bt_ir(dst, 6, SCRATCH1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_Z)); - dst = bt_ir(dst, 7, SCRATCH1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_S)); - dst = shr_ir(dst, 8, SCRATCH1, SZ_W); - dst = mov_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B); + dst = bt_ir(dst, 0, opts->gen.scratch1, SZ_W); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = bt_ir(dst, 1, opts->gen.scratch1, SZ_W); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_N)); + dst = bt_ir(dst, 2, opts->gen.scratch1, SZ_W); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_PV)); + dst = bt_ir(dst, 4, opts->gen.scratch1, SZ_W); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_H)); + dst = bt_ir(dst, 6, opts->gen.scratch1, SZ_W); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_Z)); + dst = bt_ir(dst, 7, opts->gen.scratch1, SZ_W); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_S)); + dst = shr_ir(dst, 8, opts->gen.scratch1, SZ_W); + dst = mov_rr(dst, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); } else { dst = translate_z80_reg(inst, &src_op, dst, opts); - dst = mov_rr(dst, SCRATCH1, src_op.base, SZ_W); + dst = mov_rr(dst, opts->gen.scratch1, src_op.base, SZ_W); } //no call to save_z80_reg needed since there's no chance we'll use the only //the upper half of a register pair @@ -449,31 +434,31 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else { cycles = 8; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode == Z80_REG) { if(inst->reg == Z80_AF) { - dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH1, SZ_B); - dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); - dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_A), SZ_B); + dst = mov_rr(dst, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); + dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); + dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B); //Flags are currently word aligned, so we can move //them efficiently a word at a time for (int f = ZF_C; f < ZF_NUM; f+=2) { - dst = mov_rdisp8r(dst, CONTEXT, zf_off(f), SCRATCH1, SZ_W); - dst = mov_rdisp8r(dst, CONTEXT, zaf_off(f), SCRATCH2, SZ_W); - dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zaf_off(f), SZ_W); - dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zf_off(f), SZ_W); + dst = mov_rdisp8r(dst, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W); + dst = mov_rdisp8r(dst, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W); + dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W); + dst = mov_rrdisp8(dst, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W); } } else { dst = xchg_rr(dst, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); } } else { - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_byte); - dst = xchg_rr(dst, opts->regs[inst->reg], SCRATCH1, SZ_B); - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); + dst = xchg_rr(dst, opts->regs[inst->reg], opts->gen.scratch1, SZ_B); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_byte); - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); uint8_t high_reg = z80_high_reg(inst->reg); uint8_t use_reg; //even though some of the upper halves can be used directly @@ -481,50 +466,50 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context //prevent us from taking advantage of it use_reg = opts->regs[inst->reg]; dst = ror_ir(dst, 8, use_reg, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); - dst = add_ir(dst, 1, SCRATCH1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); + dst = add_ir(dst, 1, opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_byte); - dst = xchg_rr(dst, use_reg, SCRATCH1, SZ_B); - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); - dst = add_ir(dst, 1, SCRATCH2, SZ_W); + dst = xchg_rr(dst, use_reg, opts->gen.scratch1, SZ_B); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); + dst = add_ir(dst, 1, opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_byte); //restore reg to normal rotation dst = ror_ir(dst, 8, use_reg, SZ_W); - dst = zcycles(dst, 2); + dst = cycles(&opts->gen, 2); } break; case Z80_EXX: - dst = zcycles(dst, 4); - dst = mov_rr(dst, opts->regs[Z80_BC], SCRATCH1, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); - dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); - dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); - dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_C), SZ_W); - dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, zar_off(Z80_L), SZ_W); - dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH1, SZ_W); - dst = mov_rdisp8r(dst, CONTEXT, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); - dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zar_off(Z80_E), SZ_W); + dst = cycles(&opts->gen, 4); + dst = mov_rr(dst, opts->regs[Z80_BC], opts->gen.scratch1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); + dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); + dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); + dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_C), SZ_W); + dst = mov_rrdisp8(dst, opts->gen.scratch2, opts->gen.context_reg, zar_off(Z80_L), SZ_W); + dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch1, SZ_W); + dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); + dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_E), SZ_W); break; case Z80_LDI: { - dst = zcycles(dst, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); + dst = cycles(&opts->gen, 8); + dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_byte); - dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_byte); - dst = zcycles(dst, 2); + dst = cycles(&opts->gen, 2); dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); //TODO: Implement half-carry - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_NZ, CONTEXT, zf_off(ZF_PV)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); break; } case Z80_LDIR: { - dst = zcycles(dst, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); + dst = cycles(&opts->gen, 8); + dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_byte); - dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_byte); dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); @@ -532,37 +517,37 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); uint8_t * cont = dst+1; dst = jcc(dst, CC_Z, dst+2); - dst = zcycles(dst, 7); + dst = cycles(&opts->gen, 7); //TODO: Figure out what the flag state should be here //TODO: Figure out whether an interrupt can interrupt this dst = jmp(dst, start); *cont = dst - (cont + 1); - dst = zcycles(dst, 2); + dst = cycles(&opts->gen, 2); //TODO: Implement half-carry - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; } case Z80_LDD: { - dst = zcycles(dst, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); + dst = cycles(&opts->gen, 8); + dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_byte); - dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_byte); - dst = zcycles(dst, 2); + dst = cycles(&opts->gen, 2); dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); //TODO: Implement half-carry - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_NZ, CONTEXT, zf_off(ZF_PV)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); break; } case Z80_LDDR: { - dst = zcycles(dst, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); + dst = cycles(&opts->gen, 8); + dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_byte); - dst = mov_rr(dst, opts->regs[Z80_DE], SCRATCH2, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_byte); dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); @@ -570,15 +555,15 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); uint8_t * cont = dst+1; dst = jcc(dst, CC_Z, dst+2); - dst = zcycles(dst, 7); + dst = cycles(&opts->gen, 7); //TODO: Figure out what the flag state should be here //TODO: Figure out whether an interrupt can interrupt this dst = jmp(dst, start); *cont = dst - (cont + 1); - dst = zcycles(dst, 2); + dst = cycles(&opts->gen, 2); //TODO: Implement half-carry - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; } /*case Z80_CPI: @@ -595,7 +580,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(z80_size(inst) == SZ_W) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { @@ -603,13 +588,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else { dst = add_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (z80_size(inst) == SZ_B) { - dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); @@ -623,21 +608,21 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(z80_size(inst) == SZ_W) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); - dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); + dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); if (src_op.mode == MODE_REG_DIRECT) { dst = adc_rr(dst, src_op.base, dst_op.base, z80_size(inst)); } else { dst = adc_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); break; @@ -648,7 +633,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(inst->addr_mode == Z80_IMMED) { cycles += 3; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { @@ -656,12 +641,12 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else { dst = sub_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); break; @@ -674,21 +659,21 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(z80_size(inst) == SZ_W) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); - dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); + dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); if (src_op.mode == MODE_REG_DIRECT) { dst = sbb_rr(dst, src_op.base, dst_op.base, z80_size(inst)); } else { dst = sbb_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); break; @@ -701,7 +686,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(z80_size(inst) == SZ_W) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { @@ -710,13 +695,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = and_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); } //TODO: Cleanup flags - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (z80_size(inst) == SZ_B) { - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); @@ -730,7 +715,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(z80_size(inst) == SZ_W) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { @@ -739,13 +724,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = or_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); } //TODO: Cleanup flags - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (z80_size(inst) == SZ_B) { - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); @@ -759,7 +744,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(z80_size(inst) == SZ_W) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { @@ -768,13 +753,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = xor_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); } //TODO: Cleanup flags - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (z80_size(inst) == SZ_B) { - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); @@ -786,7 +771,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(inst->addr_mode == Z80_IMMED) { cycles += 3; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { @@ -794,12 +779,12 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else { dst = cmp_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); break; @@ -812,18 +797,18 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); if (dst_op.mode == MODE_UNUSED) { dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); } dst = add_ir(dst, 1, dst_op.base, z80_size(inst)); if (z80_size(inst) == SZ_B) { - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); @@ -838,18 +823,18 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); dst = translate_z80_reg(inst, &dst_op, dst, opts); if (dst_op.mode == MODE_UNUSED) { dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); } dst = sub_ir(dst, 1, dst_op.base, z80_size(inst)); if (z80_size(inst) == SZ_B) { - dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); + dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); @@ -857,76 +842,76 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context break; //case Z80_DAA: case Z80_CPL: - dst = zcycles(dst, 4); + dst = cycles(&opts->gen, 4); dst = not_r(dst, opts->regs[Z80_A], SZ_B); //TODO: Implement half-carry flag - dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); + dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); break; case Z80_NEG: - dst = zcycles(dst, 8); + dst = cycles(&opts->gen, 8); dst = neg_r(dst, opts->regs[Z80_A], SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = setcc_rdisp8(dst, CC_O, CONTEXT, zf_off(ZF_PV)); - dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); break; case Z80_CCF: - dst = zcycles(dst, 4); - dst = xor_irdisp8(dst, 1, CONTEXT, zf_off(ZF_C), SZ_B); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = cycles(&opts->gen, 4); + dst = xor_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag break; case Z80_SCF: - dst = zcycles(dst, 4); - dst = mov_irdisp8(dst, 1, CONTEXT, zf_off(ZF_C), SZ_B); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = cycles(&opts->gen, 4); + dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag break; case Z80_NOP: if (inst->immed == 42) { dst = call(dst, (uint8_t *)z80_save_context); - dst = mov_rr(dst, CONTEXT, RDI, SZ_Q); + dst = mov_rr(dst, opts->gen.context_reg, RDI, SZ_Q); dst = jmp(dst, (uint8_t *)z80_print_regs_exit); } else { - dst = zcycles(dst, 4 * inst->immed); + dst = cycles(&opts->gen, 4 * inst->immed); } break; case Z80_HALT: - dst = zcycles(dst, 4); - dst = mov_ir(dst, address, SCRATCH1, SZ_W); + dst = cycles(&opts->gen, 4); + dst = mov_ir(dst, address, opts->gen.scratch1, SZ_W); uint8_t * call_inst = dst; dst = call(dst, (uint8_t *)z80_halt); dst = jmp(dst, call_inst); break; case Z80_DI: - dst = zcycles(dst, 4); - dst = mov_irdisp8(dst, 0, CONTEXT, offsetof(z80_context, iff1), SZ_B); - dst = mov_irdisp8(dst, 0, CONTEXT, offsetof(z80_context, iff2), SZ_B); - dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, sync_cycle), ZLIMIT, SZ_D); - dst = mov_irdisp8(dst, 0xFFFFFFFF, CONTEXT, offsetof(z80_context, int_cycle), SZ_D); + dst = cycles(&opts->gen, 4); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); + dst = mov_rdisp8r(dst, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D); + dst = mov_irdisp8(dst, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D); break; case Z80_EI: - dst = zcycles(dst, 4); - dst = mov_rrdisp32(dst, ZCYCLES, CONTEXT, offsetof(z80_context, int_enable_cycle), SZ_D); - dst = mov_irdisp8(dst, 1, CONTEXT, offsetof(z80_context, iff1), SZ_B); - dst = mov_irdisp8(dst, 1, CONTEXT, offsetof(z80_context, iff2), SZ_B); + dst = cycles(&opts->gen, 4); + dst = mov_rrdisp32(dst, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); + dst = mov_irdisp8(dst, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); + dst = mov_irdisp8(dst, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles - dst = add_irdisp32(dst, 4, CONTEXT, offsetof(z80_context, int_enable_cycle), SZ_D); + dst = add_irdisp32(dst, 4, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); dst = call(dst, (uint8_t *)z80_do_sync); break; case Z80_IM: - dst = zcycles(dst, 4); - dst = mov_irdisp8(dst, inst->immed, CONTEXT, offsetof(z80_context, im), SZ_B); + dst = cycles(&opts->gen, 4); + dst = mov_irdisp8(dst, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B); break; case Z80_RLC: cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode != Z80_UNUSED) { dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; dst = translate_z80_reg(inst, &dst_op, dst, opts); @@ -935,13 +920,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context if (src_op.mode != MODE_UNUSED) { dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -953,27 +938,27 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context break; case Z80_RL: cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode != Z80_UNUSED) { dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; dst = translate_z80_reg(inst, &dst_op, dst, opts); } - dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); + dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); dst = rcl_ir(dst, 1, dst_op.base, SZ_B); if (src_op.mode != MODE_UNUSED) { dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -985,11 +970,11 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context break; case Z80_RRC: cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode != Z80_UNUSED) { dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; dst = translate_z80_reg(inst, &dst_op, dst, opts); @@ -998,13 +983,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context if (src_op.mode != MODE_UNUSED) { dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -1016,27 +1001,27 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context break; case Z80_RR: cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode != Z80_UNUSED) { dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; dst = translate_z80_reg(inst, &dst_op, dst, opts); } - dst = bt_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); + dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); dst = rcr_ir(dst, 1, dst_op.base, SZ_B); if (src_op.mode != MODE_UNUSED) { dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -1049,29 +1034,29 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context case Z80_SLA: case Z80_SLL: cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode != Z80_UNUSED) { dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; dst = translate_z80_reg(inst, &dst_op, dst, opts); } dst = shl_ir(dst, 1, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); if (inst->op == Z80_SLL) { dst = or_ir(dst, 1, dst_op.base, SZ_B); } if (src_op.mode != MODE_UNUSED) { dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); } - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -1083,11 +1068,11 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context break; case Z80_SRA: cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode != Z80_UNUSED) { dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; dst = translate_z80_reg(inst, &dst_op, dst, opts); @@ -1096,13 +1081,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context if (src_op.mode != MODE_UNUSED) { dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -1114,11 +1099,11 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context break; case Z80_SRL: cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode != Z80_UNUSED) { dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; dst = translate_z80_reg(inst, &dst_op, dst, opts); @@ -1127,13 +1112,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context if (src_op.mode != MODE_UNUSED) { dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -1144,65 +1129,65 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } break; case Z80_RLD: - dst = zcycles(dst, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); + dst = cycles(&opts->gen, 8); + dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_byte); //Before: (HL) = 0x12, A = 0x34 //After: (HL) = 0x24, A = 0x31 - dst = mov_rr(dst, opts->regs[Z80_A], SCRATCH2, SZ_B); - dst = shl_ir(dst, 4, SCRATCH1, SZ_W); - dst = and_ir(dst, 0xF, SCRATCH2, SZ_W); - dst = and_ir(dst, 0xFFF, SCRATCH1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_A], opts->gen.scratch2, SZ_B); + dst = shl_ir(dst, 4, opts->gen.scratch1, SZ_W); + dst = and_ir(dst, 0xF, opts->gen.scratch2, SZ_W); + dst = and_ir(dst, 0xFFF, opts->gen.scratch1, SZ_W); dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); - dst = or_rr(dst, SCRATCH2, SCRATCH1, SZ_W); - //SCRATCH1 = 0x0124 - dst = ror_ir(dst, 8, SCRATCH1, SZ_W); - dst = zcycles(dst, 4); - dst = or_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B); + dst = or_rr(dst, opts->gen.scratch2, opts->gen.scratch1, SZ_W); + //opts->gen.scratch1 = 0x0124 + dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); + dst = cycles(&opts->gen, 4); + dst = or_rr(dst, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); //set flags //TODO: Implement half-carry flag - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); - dst = ror_ir(dst, 8, SCRATCH1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); + dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_write_byte); break; case Z80_RRD: - dst = zcycles(dst, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH1, SZ_W); + dst = cycles(&opts->gen, 8); + dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_byte); //Before: (HL) = 0x12, A = 0x34 //After: (HL) = 0x41, A = 0x32 - dst = movzx_rr(dst, opts->regs[Z80_A], SCRATCH2, SZ_B, SZ_W); - dst = ror_ir(dst, 4, SCRATCH1, SZ_W); - dst = shl_ir(dst, 4, SCRATCH2, SZ_W); - dst = and_ir(dst, 0xF00F, SCRATCH1, SZ_W); + dst = movzx_rr(dst, opts->regs[Z80_A], opts->gen.scratch2, SZ_B, SZ_W); + dst = ror_ir(dst, 4, opts->gen.scratch1, SZ_W); + dst = shl_ir(dst, 4, opts->gen.scratch2, SZ_W); + dst = and_ir(dst, 0xF00F, opts->gen.scratch1, SZ_W); dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); - //SCRATCH1 = 0x2001 - //SCRATCH2 = 0x0040 - dst = or_rr(dst, SCRATCH2, SCRATCH1, SZ_W); - //SCRATCH1 = 0x2041 - dst = ror_ir(dst, 8, SCRATCH1, SZ_W); - dst = zcycles(dst, 4); - dst = shr_ir(dst, 4, SCRATCH1, SZ_B); - dst = or_rr(dst, SCRATCH1, opts->regs[Z80_A], SZ_B); + //opts->gen.scratch1 = 0x2001 + //opts->gen.scratch2 = 0x0040 + dst = or_rr(dst, opts->gen.scratch2, opts->gen.scratch1, SZ_W); + //opts->gen.scratch1 = 0x2041 + dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); + dst = cycles(&opts->gen, 4); + dst = shr_ir(dst, 4, opts->gen.scratch1, SZ_B); + dst = or_rr(dst, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); //set flags //TODO: Implement half-carry flag - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - dst = mov_rr(dst, opts->regs[Z80_HL], SCRATCH2, SZ_W); - dst = ror_ir(dst, 8, SCRATCH1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); + dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_write_byte); break; case Z80_BIT: { cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); uint8_t bit; if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; @@ -1215,23 +1200,23 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } if (inst->addr_mode != Z80_REG) { //Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4 - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } dst = bt_ir(dst, bit, src_op.base, size); - dst = setcc_rdisp8(dst, CC_NC, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_NC, CONTEXT, zf_off(ZF_PV)); - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); + dst = setcc_rdisp8(dst, CC_NC, opts->gen.context_reg, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_NC, opts->gen.context_reg, zf_off(ZF_PV)); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); if (inst->immed == 7) { dst = cmp_ir(dst, 0, src_op.base, size); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } else { - dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); + dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); } break; } case Z80_SET: { cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); uint8_t bit; if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; @@ -1247,7 +1232,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } if (inst->addr_mode != Z80_REG) { //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } dst = bts_ir(dst, bit, src_op.base, size); if (inst->reg != Z80_USE_IMMED) { @@ -1273,7 +1258,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } case Z80_RES: { cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); uint8_t bit; if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; @@ -1289,7 +1274,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } if (inst->addr_mode != Z80_REG) { //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 - dst = zcycles(dst, 1); + dst = cycles(&opts->gen, 1); } dst = btr_ir(dst, bit, src_op.base, size); if (inst->reg != Z80_USE_IMMED) { @@ -1320,7 +1305,7 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) { cycles += 4; } - dst = zcycles(dst, cycles); + dst = cycles(&opts->gen, cycles); if (inst->addr_mode != Z80_REG_INDIRECT && inst->immed < 0x4000) { uint8_t * call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { @@ -1331,44 +1316,44 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = jmp(dst, call_dst); } else { if (inst->addr_mode == Z80_REG_INDIRECT) { - dst = mov_rr(dst, opts->regs[inst->ea_reg], SCRATCH1, SZ_W); + dst = mov_rr(dst, opts->regs[inst->ea_reg], opts->gen.scratch1, SZ_W); } else { - dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W); + dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_W); } dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); } break; } case Z80_JPCC: { - dst = zcycles(dst, 7);//T States: 4,3 + dst = cycles(&opts->gen, 7);//T States: 4,3 uint8_t cond = CC_Z; switch (inst->reg) { case Z80_CC_NZ: cond = CC_NZ; case Z80_CC_Z: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); break; case Z80_CC_NC: cond = CC_NZ; case Z80_CC_C: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); break; case Z80_CC_PO: cond = CC_NZ; case Z80_CC_PE: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; case Z80_CC_P: cond = CC_NZ; case Z80_CC_M: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); break; } uint8_t *no_jump_off = dst+1; dst = jcc(dst, cond, dst+2); - dst = zcycles(dst, 5);//T States: 5 + dst = cycles(&opts->gen, 5);//T States: 5 uint16_t dest_addr = inst->immed; if (dest_addr < 0x4000) { uint8_t * call_dst = z80_get_native_address(context, dest_addr); @@ -1379,15 +1364,15 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } dst = jmp(dst, call_dst); } else { - dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); + dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); } *no_jump_off = dst - (no_jump_off+1); break; } case Z80_JR: { - dst = zcycles(dst, 12);//T States: 4,3,5 + dst = cycles(&opts->gen, 12);//T States: 4,3,5 uint16_t dest_addr = address + inst->immed + 2; if (dest_addr < 0x4000) { uint8_t * call_dst = z80_get_native_address(context, dest_addr); @@ -1398,31 +1383,31 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } dst = jmp(dst, call_dst); } else { - dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); + dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); } break; } case Z80_JRCC: { - dst = zcycles(dst, 7);//T States: 4,3 + dst = cycles(&opts->gen, 7);//T States: 4,3 uint8_t cond = CC_Z; switch (inst->reg) { case Z80_CC_NZ: cond = CC_NZ; case Z80_CC_Z: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); break; case Z80_CC_NC: cond = CC_NZ; case Z80_CC_C: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); break; } uint8_t *no_jump_off = dst+1; dst = jcc(dst, cond, dst+2); - dst = zcycles(dst, 5);//T States: 5 + dst = cycles(&opts->gen, 5);//T States: 5 uint16_t dest_addr = address + inst->immed + 2; if (dest_addr < 0x4000) { uint8_t * call_dst = z80_get_native_address(context, dest_addr); @@ -1433,19 +1418,19 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } dst = jmp(dst, call_dst); } else { - dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); + dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); } *no_jump_off = dst - (no_jump_off+1); break; } case Z80_DJNZ: - dst = zcycles(dst, 8);//T States: 5,3 + dst = cycles(&opts->gen, 8);//T States: 5,3 dst = sub_ir(dst, 1, opts->regs[Z80_B], SZ_B); uint8_t *no_jump_off = dst+1; dst = jcc(dst, CC_Z, dst+2); - dst = zcycles(dst, 5);//T States: 5 + dst = cycles(&opts->gen, 5);//T States: 5 uint16_t dest_addr = address + inst->immed + 2; if (dest_addr < 0x4000) { uint8_t * call_dst = z80_get_native_address(context, dest_addr); @@ -1456,17 +1441,17 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } dst = jmp(dst, call_dst); } else { - dst = mov_ir(dst, dest_addr, SCRATCH1, SZ_W); + dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); } *no_jump_off = dst - (no_jump_off+1); break; case Z80_CALL: { - dst = zcycles(dst, 11);//T States: 4,3,4 + dst = cycles(&opts->gen, 11);//T States: 4,3,4 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); + dst = mov_ir(dst, address + 3, opts->gen.scratch1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 if (inst->immed < 0x4000) { uint8_t * call_dst = z80_get_native_address(context, inst->immed); @@ -1477,44 +1462,44 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } dst = jmp(dst, call_dst); } else { - dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W); + dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); } break; } case Z80_CALLCC: - dst = zcycles(dst, 10);//T States: 4,3,3 (false case) + dst = cycles(&opts->gen, 10);//T States: 4,3,3 (false case) uint8_t cond = CC_Z; switch (inst->reg) { case Z80_CC_NZ: cond = CC_NZ; case Z80_CC_Z: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); break; case Z80_CC_NC: cond = CC_NZ; case Z80_CC_C: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); break; case Z80_CC_PO: cond = CC_NZ; case Z80_CC_PE: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; case Z80_CC_P: cond = CC_NZ; case Z80_CC_M: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); break; } uint8_t *no_call_off = dst+1; dst = jcc(dst, cond, dst+2); - dst = zcycles(dst, 1);//Last of the above T states takes an extra cycle in the true case + dst = cycles(&opts->gen, 1);//Last of the above T states takes an extra cycle in the true case dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = mov_ir(dst, address + 3, SCRATCH1, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); + dst = mov_ir(dst, address + 3, opts->gen.scratch1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 if (inst->immed < 0x4000) { uint8_t * call_dst = z80_get_native_address(context, inst->immed); @@ -1525,81 +1510,81 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } dst = jmp(dst, call_dst); } else { - dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_W); + dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); } *no_call_off = dst - (no_call_off+1); break; case Z80_RET: - dst = zcycles(dst, 4);//T States: 4 - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = cycles(&opts->gen, 4);//T States: 4 + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); break; case Z80_RETCC: { - dst = zcycles(dst, 5);//T States: 5 + dst = cycles(&opts->gen, 5);//T States: 5 uint8_t cond = CC_Z; switch (inst->reg) { case Z80_CC_NZ: cond = CC_NZ; case Z80_CC_Z: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_Z), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); break; case Z80_CC_NC: cond = CC_NZ; case Z80_CC_C: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_C), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); break; case Z80_CC_PO: cond = CC_NZ; case Z80_CC_PE: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_PV), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; case Z80_CC_P: cond = CC_NZ; case Z80_CC_M: - dst = cmp_irdisp8(dst, 0, CONTEXT, zf_off(ZF_S), SZ_B); + dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); break; } uint8_t *no_call_off = dst+1; dst = jcc(dst, cond, dst+2); - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); *no_call_off = dst - (no_call_off+1); break; } case Z80_RETI: //For some systems, this may need a callback for signalling interrupt routine completion - dst = zcycles(dst, 8);//T States: 4, 4 - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); + dst = cycles(&opts->gen, 8);//T States: 4, 4 + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); break; case Z80_RETN: - dst = zcycles(dst, 8);//T States: 4, 4 - dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, iff2), SCRATCH2, SZ_B); - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH1, SZ_W); - dst = mov_rrdisp8(dst, SCRATCH2, CONTEXT, offsetof(z80_context, iff1), SZ_B); + dst = cycles(&opts->gen, 8);//T States: 4, 4 + dst = mov_rdisp8r(dst, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); + dst = mov_rrdisp8(dst, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, SCRATCH1); + dst = jmp_r(dst, opts->gen.scratch1); break; case Z80_RST: { //RST is basically CALL to an address in page 0 - dst = zcycles(dst, 5);//T States: 5 + dst = cycles(&opts->gen, 5);//T States: 5 dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = mov_ir(dst, address + 1, SCRATCH1, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_SP], SCRATCH2, SZ_W); + dst = mov_ir(dst, address + 1, opts->gen.scratch1, SZ_W); + dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 uint8_t * call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { @@ -1611,15 +1596,15 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context break; } case Z80_IN: - dst = zcycles(dst, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 + dst = cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 if (inst->addr_mode == Z80_IMMED_INDIRECT) { - dst = mov_ir(dst, inst->immed, SCRATCH1, SZ_B); + dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_B); } else { - dst = mov_rr(dst, opts->regs[Z80_C], SCRATCH1, SZ_B); + dst = mov_rr(dst, opts->regs[Z80_C], opts->gen.scratch1, SZ_B); } dst = call(dst, (uint8_t *)z80_io_read); translate_z80_reg(inst, &dst_op, dst, opts); - dst = mov_rr(dst, SCRATCH1, dst_op.base, SZ_B); + dst = mov_rr(dst, opts->gen.scratch1, dst_op.base, SZ_B); dst = z80_save_reg(dst, inst, opts); break; /*case Z80_INI: @@ -1627,14 +1612,14 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context case Z80_IND: case Z80_INDR:*/ case Z80_OUT: - dst = zcycles(dst, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 + dst = cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) { - dst = mov_ir(dst, inst->immed, SCRATCH2, SZ_B); + dst = mov_ir(dst, inst->immed, opts->gen.scratch2, SZ_B); } else { - dst = mov_rr(dst, opts->regs[Z80_C], SCRATCH2, SZ_B); + dst = mov_rr(dst, opts->regs[Z80_C], opts->gen.scratch2, SZ_B); } translate_z80_reg(inst, &src_op, dst, opts); - dst = mov_rr(dst, dst_op.base, SCRATCH1, SZ_B); + dst = mov_rr(dst, dst_op.base, opts->gen.scratch1, SZ_B); dst = call(dst, (uint8_t *)z80_io_write); dst = z80_save_reg(dst, inst, opts); break; @@ -1676,7 +1661,7 @@ uint8_t * z80_get_native_address(z80_context * context, uint32_t address) return map->base + map->offsets[address]; } -uint8_t z80_get_native_inst_size(x86_z80_options * opts, uint32_t address) +uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address) { if (address >= 0x4000) { return 0; @@ -1688,7 +1673,7 @@ void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * n { uint32_t orig_address = address; native_map_slot *map; - x86_z80_options * opts = context->options; + z80_options * opts = context->options; if (address < 0x4000) { address &= 0x1FFF; map = context->static_code_map; @@ -1752,7 +1737,7 @@ z80_context * z80_handle_code_write(uint32_t address, z80_context * context) if (inst_start != INVALID_INSTRUCTION_START) { uint8_t * dst = z80_get_native_address(context, inst_start); dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", dst, inst_start, address); - dst = mov_ir(dst, inst_start, SCRATCH1, SZ_D); + dst = mov_ir(dst, inst_start, opts->gen.scratch1, SZ_D); dst = call(dst, (uint8_t *)z80_retrans_stub); } return context; @@ -1773,7 +1758,7 @@ uint8_t * z80_get_native_address_trans(z80_context * context, uint32_t address) void z80_handle_deferred(z80_context * context) { - x86_z80_options * opts = context->options; + z80_options * opts = context->options; process_deferred(&opts->deferred, context, (native_addr_func)z80_get_native_address); if (opts->deferred) { translate_z80_stream(context, opts->deferred->address); @@ -1783,7 +1768,7 @@ void z80_handle_deferred(z80_context * context) void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * orig_start) { char disbuf[80]; - x86_z80_options * opts = context->options; + z80_options * opts = context->options; uint8_t orig_size = z80_get_native_inst_size(opts, address); uint32_t orig = address; address &= 0x1FFF; @@ -1850,7 +1835,7 @@ void translate_z80_stream(z80_context * context, uint32_t address) if (z80_get_native_address(context, address)) { return; } - x86_z80_options * opts = context->options; + z80_options * opts = context->options; uint32_t start_address = address; uint8_t * encoded = NULL, *next; if (address < 0x4000) { @@ -1923,8 +1908,17 @@ void translate_z80_stream(z80_context * context, uint32_t address) } } -void init_x86_z80_opts(x86_z80_options * options) +void init_x86_z80_opts(z80_options * options, memmap_chunk * chunks, uint32_t num_chunks) { + memset(options, 0, sizeof(*options)); + + options->gen.address_size = SZ_W; + options->gen.address_mask = 0xFFFF; + options->gen.max_address = 0x10000; + options->gen.bus_cycles = 3; + options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers); + options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags); + options->flags = 0; options->regs[Z80_B] = BH; options->regs[Z80_C] = RBX; @@ -1946,15 +1940,146 @@ void init_x86_z80_opts(x86_z80_options * options) options->regs[Z80_AF] = -1; options->regs[Z80_IX] = RDX; options->regs[Z80_IY] = R8; - size_t size = 1024 * 1024; - options->cur_code = alloc_code(&size); - options->code_end = options->cur_code + size; - options->ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000); + + options->bank_reg = R15; + options->bank_pointer = R12; + + options->gen.context_reg = RSI; + options->gen.cycles = RBP; + options->gen.limit = RDI; + options->gen.scratch1 = R13; + options->gen.scratch2 = R14; + + options->gen.native_code_map = malloc(sizeof(native_map_slot)); + memset(options->gen.native_code_map, 0, sizeof(native_map_slot)); + options->gen.deferred = NULL; + options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000); memset(options->ram_inst_sizes, 0, sizeof(uint8_t) * 0x2000); - options->deferred = NULL; + + code_info *code = &options->gen.code; + init_code_info(code); + + options->save_context_scratch = code->cur; + mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); + mov_rrdisp(code, options->gen.scratch2, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_W); + + options->gen.save_context = code->cur; + for (int i = 0; i <= Z80_A; i++) + { + int reg; + uint8_t size; + if (i < Z80_I) { + int reg = i /2 + Z80_BC; + size = SZ_W; + + } else { + reg = i; + size = SZ_B; + } + if (options->regs[reg] >= 0) { + mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size); + } + } + if (options->regs[Z80_SP] >= 0) { + mov_rrdisp(code, options->regs[Z80_SP], options->gen.context_reg, offsetof(z80_context, sp), SZ_W); + } + mov_rrdisp(code, options->gen.limit, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D); + mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D); + mov_rrdisp(code, options->bank_reg, options->gen.context_reg, offsetof(z80_context, bank_reg), SZ_W); + mov_rrdisp(code, options->bank_pointer, options->gen.context_reg, offsetof(z80_context, mem_pointers) + sizeof(uint8_t *) * 1, SZ_PTR); + + options->load_context_scratch = code->cur; + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch1), options->gen.scratch1, SZ_W); + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch2, SZ_W); + options->gen.load_context = code->cur; + for (int i = 0; i <= Z80_A; i++) + { + int reg; + uint8_t size; + if (i < Z80_I) { + int reg = i /2 + Z80_BC; + size = SZ_W; + + } else { + reg = i; + size = SZ_B; + } + if (options->regs[reg] >= 0) { + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, regs) + i, options->regs[reg], size); + } + } + if (options->regs[Z80_SP] >= 0) { + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sp), options->regs[Z80_SP], SZ_W); + } + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.limit, SZ_D); + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D); + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, bank_reg), options->bank_reg, SZ_W); + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, mem_pointers) + sizeof(uint8_t *) * 1, options->bank_pointer, SZ_PTR); + + options->gen.handle_cycle_limit = code->cur; + cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); + code_ptr no_sync = code->cur+1; + jcc(code, CC_B, no_sync); + mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, pc), SZ_W); + call(code, options->save_context_scratch); + pop_r(code, RAX); //return address in read/write func + pop_r(code, RBX); //return address in translated code + sub_ir(code, 5, RAX, SZ_PTR); //adjust return address to point to the call that got us here + mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); + mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR); + //restore callee saved registers + pop_r(code, R15) + pop_r(code, R14) + pop_r(code, R13) + pop_r(code, R12) + pop_r(code, RBP) + pop_r(code, RBX) + *no_sync = code->cur - no_sync; + //return to caller of z80_run + retn(code); + + options->gen.read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, NULL); + options->gen.write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc); + + options->gen.handle_cycle_limit_int = code->cur; + cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D); + code_ptr skip_int = code->cur+1; + jcc(code, CC_B, skip_int); + //set limit to the cycle limit + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.limit, SZ_D); + //disable interrupts + move_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B); + move_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B); + cycles(&options->gen, 7); + //save return address (in scratch1) to Z80 stack + sub_ir(code, 2, options->regs[Z80_SP], SZ_W); + mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W); + //we need to do check_cycles and cycles outside of the write_8 call + //so that the stack has the correct depth if we need to return to C + //for a synchronization + check_cycles(&options->gen); + cycles(&options->gen, 3); + //save word to write before call to write_8_noinc + push_r(code, options->gen.scratch1); + call(code, options->write_8_noinc); + //restore word to write + pop_r(code, options->gen.scratch1); + //write high byte to SP+1 + mov_rr(code, options->regs[Z80_SP], options->gen.scratch2, SZ_W); + add_ir(code, 1, options->gen.scratch2, SZ_W); + shr_ir(code, 8, options->gen.scratch1, SZ_W); + check_cycles(&options->gen); + cycles(&options->gen, 3); + call(code, options->write_8_noinc); + //dispose of return address as we'll be jumping somewhere else + pop_r(options->gen.scratch2); + //TODO: Support interrupt mode 0 and 2 + mov_ir(code, 0x38, options->gen.scratch1, SZ_W); + call(code, (code_ptr)z80_native_addr); + jmp_r(code, options->gen.scratch1); } -void init_z80_context(z80_context * context, x86_z80_options * options) +void init_z80_context(z80_context * context, z80_options * options) { memset(context, 0, sizeof(*context)); context->static_code_map = malloc(sizeof(*context->static_code_map)); @@ -1979,9 +2104,9 @@ void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_ha static uint8_t * bp_stub = NULL; uint8_t * native = z80_get_native_address_trans(context, address); uint8_t * start_native = native; - native = mov_ir(native, address, SCRATCH1, SZ_W); + native = mov_ir(native, address, opts->gen.scratch1, SZ_W); if (!bp_stub) { - x86_z80_options * opts = context->options; + z80_options * opts = context->options; uint8_t * dst = opts->cur_code; uint8_t * dst_end = opts->code_end; if (dst_end - dst < 128) { @@ -1999,27 +2124,27 @@ void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_ha //Save context and call breakpoint handler dst = call(dst, (uint8_t *)z80_save_context); - dst = push_r(dst, SCRATCH1); - dst = mov_rr(dst, CONTEXT, RDI, SZ_Q); - dst = mov_rr(dst, SCRATCH1, RSI, SZ_W); + dst = push_r(dst, opts->gen.scratch1); + dst = mov_rr(dst, opts->gen.context_reg, RDI, SZ_Q); + dst = mov_rr(dst, opts->gen.scratch1, RSI, SZ_W); dst = call(dst, bp_handler); - dst = mov_rr(dst, RAX, CONTEXT, SZ_Q); + dst = mov_rr(dst, RAX, opts->gen.context_reg, SZ_Q); //Restore context dst = call(dst, (uint8_t *)z80_load_context); - dst = pop_r(dst, SCRATCH1); + dst = pop_r(dst, opts->gen.scratch1); //do prologue stuff - dst = cmp_rr(dst, ZCYCLES, ZLIMIT, SZ_D); + dst = cmp_rr(dst, opts->gen.cycles, opts->gen.limit, SZ_D); uint8_t * jmp_off = dst+1; dst = jcc(dst, CC_NC, dst + 7); - dst = pop_r(dst, SCRATCH1); - dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q); - dst = push_r(dst, SCRATCH1); + dst = pop_r(dst, opts->gen.scratch1); + dst = add_ir(dst, check_int_size - (native-start_native), opts->gen.scratch1, SZ_Q); + dst = push_r(dst, opts->gen.scratch1); dst = jmp(dst, (uint8_t *)z80_handle_cycle_limit_int); *jmp_off = dst - (jmp_off+1); //jump back to body of translated instruction - dst = pop_r(dst, SCRATCH1); - dst = add_ir(dst, check_int_size - (native-start_native), SCRATCH1, SZ_Q); - dst = jmp_r(dst, SCRATCH1); + dst = pop_r(dst, opts->gen.scratch1); + dst = add_ir(dst, check_int_size - (native-start_native), opts->gen.scratch1, SZ_Q); + dst = jmp_r(dst, opts->gen.scratch1); opts->cur_code = dst; } else { native = call(native, bp_stub); -- cgit v1.2.3 From 3c8d04a6b51184d9856cebd2e445791e451cb56a Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Tue, 16 Dec 2014 01:10:54 -0800 Subject: Fix flags for rra, rrca, rla and rlca. Fix timing for rr, rrc, rl and rlc when using IX or IY. Fix access to I and R registers (R still needs to be made 7-bit though). Fix flags for ld a, i. The fix for access to I fixes PCM playback in Titan Overdrive and music playback in Crackdown. --- z80_to_x86.c | 62 ++++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 18 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index dc6cfaf..b529766 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -157,7 +157,7 @@ uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_o ea->base = opts->regs[Z80_IYL]; dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); } - } else { + } else if(opts->regs[inst->ea_reg] >= 0) { ea->base = opts->regs[inst->ea_reg]; if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) { uint8_t other_reg = opts->regs[inst->reg]; @@ -167,6 +167,10 @@ uint8_t * translate_z80_ea(z80inst * inst, x86_ea * ea, uint8_t * dst, x86_z80_o dst = ror_ir(dst, 8, ea->base, SZ_W); } } + } else { + ea->mode = MODE_REG_DISPLACE8; + ea->base = CONTEXT; + ea->disp = offsetof(z80_context, regs) + inst->ea_reg; } break; case Z80_REG_INDIRECT: @@ -390,6 +394,16 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context } else { dst = mov_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, size); } + if (inst->ea_reg == Z80_I && inst->addr_mode == Z80_REG) { + //ld a, i sets some flags + //TODO: Implement half-carry flag + dst = cmp_ir(dst, 0, dst_op.base, SZ_B); + dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B);; + dst = mov_rdisp8r(dst, CONTEXT, offsetof(z80_context, iff2), SCRATCH1, SZ_B); + dst = mov_rrdisp8(dst, SCRATCH1, CONTEXT, zf_off(ZF_PV), SZ_B); + } dst = z80_save_reg(dst, inst, opts); dst = z80_save_ea(dst, inst, opts); if (inst->addr_mode & Z80_DIR) { @@ -945,10 +959,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + if (inst->immed) { + //rlca does not set these flags + dst = cmp_ir(dst, 0, dst_op.base, SZ_B); + dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + } if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -977,10 +994,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + if (inst->immed) { + //rla does not set these flags + dst = cmp_ir(dst, 0, dst_op.base, SZ_B); + dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + } if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -1008,10 +1028,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + if (inst->immed) { + //rrca does not set these flags + dst = cmp_ir(dst, 0, dst_op.base, SZ_B); + dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + } if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -1040,10 +1063,13 @@ uint8_t * translate_z80inst(z80inst * inst, uint8_t * dst, z80_context * context dst = setcc_rdisp8(dst, CC_C, CONTEXT, zf_off(ZF_C)); dst = mov_irdisp8(dst, 0, CONTEXT, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + if (inst->immed) { + //rra does not set these flags + dst = cmp_ir(dst, 0, dst_op.base, SZ_B); + dst = setcc_rdisp8(dst, CC_P, CONTEXT, zf_off(ZF_PV)); + dst = setcc_rdisp8(dst, CC_Z, CONTEXT, zf_off(ZF_Z)); + dst = setcc_rdisp8(dst, CC_S, CONTEXT, zf_off(ZF_S)); + } if (inst->addr_mode != Z80_UNUSED) { dst = z80_save_result(dst, inst); if (src_op.mode != MODE_UNUSED) { @@ -2105,7 +2131,7 @@ void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_ha void zremove_breakpoint(z80_context * context, uint16_t address) { - context->breakpoint_flags[address / sizeof(uint8_t)] &= 1 << (address % sizeof(uint8_t)); + context->breakpoint_flags[address / sizeof(uint8_t)] &= ~(1 << (address % sizeof(uint8_t))); uint8_t * native = z80_get_native_address(context, address); if (native) { z80_check_cycles_int(native, address); -- cgit v1.2.3 From 9dc3feb135af0a8853798bd79cff754cd86dbd0f Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Wed, 17 Dec 2014 09:53:51 -0800 Subject: Get Z80 core back into compileable state --- z80_to_x86.c | 1643 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 816 insertions(+), 827 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index a331308..ba4fc66 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -54,7 +54,7 @@ uint8_t z80_size(z80inst * inst) return SZ_B; } -void translate_z80_reg(z80inst * inst, x86_ea * ea, z80_options * opts) +void translate_z80_reg(z80inst * inst, host_ea * ea, z80_options * opts) { code_info *code = &opts->gen.code; if (inst->reg == Z80_USE_IMMED) { @@ -122,7 +122,7 @@ void z80_save_reg(z80inst * inst, z80_options * opts) } } -void translate_z80_ea(z80inst * inst, x86_ea * ea, z80_options * opts, uint8_t read, uint8_t modify) +void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t read, uint8_t modify) { code_info *code = &opts->gen.code; uint8_t size, reg, areg; @@ -158,16 +158,16 @@ void translate_z80_ea(z80inst * inst, x86_ea * ea, z80_options * opts, uint8_t r if (read) { if (modify) { //push_r(code, opts->gen.scratch1); - mov_rrdisp8(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); + mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); } if (size == SZ_B) { call(code, opts->read_8); } else { - dst = call(dst, opts->read_16); + call(code, opts->read_16); } if (modify) { //pop_r(code, opts->gen.scratch2); - mov_rdisp8r(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); + mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); } } ea->base = opts->gen.scratch1; @@ -177,20 +177,20 @@ void translate_z80_ea(z80inst * inst, x86_ea * ea, z80_options * opts, uint8_t r ea->disp = inst->immed; break; case Z80_IMMED_INDIRECT: - dst = mov_ir(dst, inst->immed, areg, SZ_W); + mov_ir(code, inst->immed, areg, SZ_W); size = z80_size(inst); if (read) { /*if (modify) { - dst = push_r(dst, opts->gen.scratch1); + push_r(code, opts->gen.scratch1); }*/ if (size == SZ_B) { - dst = call(dst, (uint8_t *)z80_read_byte); + call(code, (uint8_t *)z80_read_byte); } else { - dst = call(dst, (uint8_t *)z80_read_word); + call(code, (uint8_t *)z80_read_word); } if (modify) { - //dst = pop_r(dst, opts->gen.scratch2); - dst = mov_ir(dst, inst->immed, opts->gen.scratch2, SZ_W); + //pop_r(code, opts->gen.scratch2); + mov_ir(code, inst->immed, opts->gen.scratch2, SZ_W); } } ea->base = opts->gen.scratch1; @@ -198,22 +198,22 @@ void translate_z80_ea(z80inst * inst, x86_ea * ea, z80_options * opts, uint8_t r case Z80_IX_DISPLACE: case Z80_IY_DISPLACE: reg = opts->regs[(inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY]; - dst = mov_rr(dst, reg, areg, SZ_W); - dst = add_ir(dst, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W); + mov_rr(code, reg, areg, SZ_W); + add_ir(code, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W); size = z80_size(inst); if (read) { if (modify) { - //dst = push_r(dst, opts->gen.scratch1); - dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); + //push_r(code, opts->gen.scratch1); + mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); } if (size == SZ_B) { - dst = call(dst, (uint8_t *)z80_read_byte); + call(code, (uint8_t *)z80_read_byte); } else { - dst = call(dst, (uint8_t *)z80_read_word); + call(code, (uint8_t *)z80_read_word); } if (modify) { - //dst = pop_r(dst, opts->gen.scratch2); - dst = mov_rdisp8r(dst, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); + //pop_r(code, opts->gen.scratch2); + mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, scratch1), opts->gen.scratch2, SZ_W); } } ea->base = opts->gen.scratch1; @@ -225,32 +225,30 @@ void translate_z80_ea(z80inst * inst, x86_ea * ea, z80_options * opts, uint8_t r fprintf(stderr, "Unrecognized Z80 addressing mode %d\n", inst->addr_mode & 0x1F); exit(1); } - return dst; } -uint8_t * z80_save_ea(uint8_t * dst, z80inst * inst, z80_options * opts) +void z80_save_ea(code_info *code, z80inst * inst, z80_options * opts) { if ((inst->addr_mode & 0x1F) == Z80_REG) { if (inst->ea_reg == Z80_IYH) { if (inst->reg == Z80_IYL) { - dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); - dst = mov_rr(dst, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); - dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); + ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); + mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); + ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); } else { - dst = ror_ir(dst, 8, opts->regs[Z80_IY], SZ_W); + ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); } } else if (inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { uint8_t other_reg = opts->regs[inst->reg]; if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { //we can't mix an *H reg with a register that requires the REX prefix - dst = ror_ir(dst, 8, opts->regs[z80_low_reg(inst->ea_reg)], SZ_W); + ror_ir(code, 8, opts->regs[z80_low_reg(inst->ea_reg)], SZ_W); } } } - return dst; } -uint8_t * z80_save_result(uint8_t * dst, z80inst * inst) +void z80_save_result(code_info *code, z80inst * inst) { switch(inst->addr_mode & 0x1f) { @@ -259,12 +257,11 @@ uint8_t * z80_save_result(uint8_t * dst, z80inst * inst) case Z80_IX_DISPLACE: case Z80_IY_DISPLACE: if (z80_size(inst) == SZ_B) { - dst = call(dst, (uint8_t *)z80_write_byte); + call(code, (uint8_t *)z80_write_byte); } else { - dst = call(dst, (uint8_t *)z80_write_word_lowfirst); + call(code, (uint8_t *)z80_write_word_lowfirst); } } - return dst; } enum { @@ -311,13 +308,14 @@ void z80_print_regs_exit(z80_context * context) exit(0); } -uint8_t * translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) +void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) { - uint32_t cycles; - x86_ea src_op, dst_op; + uint32_t num_cycles; + host_ea src_op, dst_op; uint8_t size; z80_options *opts = context->options; - uint8_t * start = opts->code.cur; + uint8_t * start = opts->gen.code.cur; + code_info *code = &opts->gen.code; check_cycles_int(&opts->gen, address); switch(inst->op) { @@ -327,243 +325,243 @@ uint8_t * translate_z80inst(z80inst * inst, z80_context * context, uint16_t addr { case Z80_REG: case Z80_REG_INDIRECT: - cycles = size == SZ_B ? 4 : 6; + num_cycles = size == SZ_B ? 4 : 6; if (inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) { - cycles += 4; + num_cycles += 4; } if (inst->reg == Z80_I || inst->ea_reg == Z80_I) { - cycles += 5; + num_cycles += 5; } break; case Z80_IMMED: - cycles = size == SZ_B ? 7 : 10; + num_cycles = size == SZ_B ? 7 : 10; break; case Z80_IMMED_INDIRECT: - cycles = 10; + num_cycles = 10; break; case Z80_IX_DISPLACE: case Z80_IY_DISPLACE: - cycles = 16; + num_cycles = 16; break; } if ((inst->reg >= Z80_IXL && inst->reg <= Z80_IYH) || inst->reg == Z80_IX || inst->reg == Z80_IY) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); + cycles(&opts->gen, num_cycles); if (inst->addr_mode & Z80_DIR) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, DONT_READ, MODIFY); - dst = translate_z80_reg(inst, &src_op, dst, opts); + translate_z80_ea(inst, &dst_op, opts, DONT_READ, MODIFY); + translate_z80_reg(inst, &src_op, opts); } else { - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); + translate_z80_reg(inst, &dst_op, opts); } if (src_op.mode == MODE_REG_DIRECT) { if(dst_op.mode == MODE_REG_DISPLACE8) { - dst = mov_rrdisp8(dst, src_op.base, dst_op.base, dst_op.disp, size); + mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, size); } else { - dst = mov_rr(dst, src_op.base, dst_op.base, size); + mov_rr(code, src_op.base, dst_op.base, size); } } else if(src_op.mode == MODE_IMMED) { - dst = mov_ir(dst, src_op.disp, dst_op.base, size); + mov_ir(code, src_op.disp, dst_op.base, size); } else { - dst = mov_rdisp8r(dst, src_op.base, src_op.disp, dst_op.base, size); + mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, size); } - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); if (inst->addr_mode & Z80_DIR) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); } break; case Z80_PUSH: - dst = cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); - dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); + cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); + sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); if (inst->reg == Z80_AF) { - dst = mov_rr(dst, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); - dst = shl_ir(dst, 8, opts->gen.scratch1, SZ_W); - dst = mov_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B); - dst = shl_ir(dst, 1, opts->gen.scratch1, SZ_B); - dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B); - dst = shl_ir(dst, 2, opts->gen.scratch1, SZ_B); - dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_H), opts->gen.scratch1, SZ_B); - dst = shl_ir(dst, 2, opts->gen.scratch1, SZ_B); - dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_PV), opts->gen.scratch1, SZ_B); - dst = shl_ir(dst, 1, opts->gen.scratch1, SZ_B); - dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_N), opts->gen.scratch1, SZ_B); - dst = shl_ir(dst, 1, opts->gen.scratch1, SZ_B); - dst = or_rdisp8r(dst, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B); + mov_rr(code, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); + shl_ir(code, 8, opts->gen.scratch1, SZ_W); + mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B); + shl_ir(code, 1, opts->gen.scratch1, SZ_B); + or_rdispr(code, opts->gen.context_reg, zf_off(ZF_Z), opts->gen.scratch1, SZ_B); + shl_ir(code, 2, opts->gen.scratch1, SZ_B); + or_rdispr(code, opts->gen.context_reg, zf_off(ZF_H), opts->gen.scratch1, SZ_B); + shl_ir(code, 2, opts->gen.scratch1, SZ_B); + or_rdispr(code, opts->gen.context_reg, zf_off(ZF_PV), opts->gen.scratch1, SZ_B); + shl_ir(code, 1, opts->gen.scratch1, SZ_B); + or_rdispr(code, opts->gen.context_reg, zf_off(ZF_N), opts->gen.scratch1, SZ_B); + shl_ir(code, 1, opts->gen.scratch1, SZ_B); + or_rdispr(code, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B); } else { - dst = translate_z80_reg(inst, &src_op, dst, opts); - dst = mov_rr(dst, src_op.base, opts->gen.scratch1, SZ_W); + translate_z80_reg(inst, &src_op, opts); + mov_rr(code, src_op.base, opts->gen.scratch1, SZ_W); } - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_word_highfirst); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_word_highfirst); //no call to save_z80_reg needed since there's no chance we'll use the only //the upper half of a register pair break; case Z80_POP: - dst = cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_word); - dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); + cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_word); + add_ir(code, 2, opts->regs[Z80_SP], SZ_W); if (inst->reg == Z80_AF) { - dst = bt_ir(dst, 0, opts->gen.scratch1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = bt_ir(dst, 1, opts->gen.scratch1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_N)); - dst = bt_ir(dst, 2, opts->gen.scratch1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_PV)); - dst = bt_ir(dst, 4, opts->gen.scratch1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_H)); - dst = bt_ir(dst, 6, opts->gen.scratch1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_Z)); - dst = bt_ir(dst, 7, opts->gen.scratch1, SZ_W); - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_S)); - dst = shr_ir(dst, 8, opts->gen.scratch1, SZ_W); - dst = mov_rr(dst, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); + bt_ir(code, 0, opts->gen.scratch1, SZ_W); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + bt_ir(code, 1, opts->gen.scratch1, SZ_W); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_N)); + bt_ir(code, 2, opts->gen.scratch1, SZ_W); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_PV)); + bt_ir(code, 4, opts->gen.scratch1, SZ_W); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_H)); + bt_ir(code, 6, opts->gen.scratch1, SZ_W); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_Z)); + bt_ir(code, 7, opts->gen.scratch1, SZ_W); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_S)); + shr_ir(code, 8, opts->gen.scratch1, SZ_W); + mov_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); } else { - dst = translate_z80_reg(inst, &src_op, dst, opts); - dst = mov_rr(dst, opts->gen.scratch1, src_op.base, SZ_W); + translate_z80_reg(inst, &src_op, opts); + mov_rr(code, opts->gen.scratch1, src_op.base, SZ_W); } //no call to save_z80_reg needed since there's no chance we'll use the only //the upper half of a register pair break; case Z80_EX: if (inst->addr_mode == Z80_REG || inst->reg == Z80_HL) { - cycles = 4; + num_cycles = 4; } else { - cycles = 8; + num_cycles = 8; } - dst = cycles(&opts->gen, cycles); + cycles(&opts->gen, num_cycles); if (inst->addr_mode == Z80_REG) { if(inst->reg == Z80_AF) { - dst = mov_rr(dst, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); - dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); - dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B); + mov_rr(code, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); + mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); + mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B); //Flags are currently word aligned, so we can move //them efficiently a word at a time for (int f = ZF_C; f < ZF_NUM; f+=2) { - dst = mov_rdisp8r(dst, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W); - dst = mov_rdisp8r(dst, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W); - dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W); - dst = mov_rrdisp8(dst, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W); + mov_rdispr(code, opts->gen.context_reg, zf_off(f), opts->gen.scratch1, SZ_W); + mov_rdispr(code, opts->gen.context_reg, zaf_off(f), opts->gen.scratch2, SZ_W); + mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zaf_off(f), SZ_W); + mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W); } } else { - dst = xchg_rr(dst, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); + xchg_rr(code, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); } } else { - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_byte); - dst = xchg_rr(dst, opts->regs[inst->reg], opts->gen.scratch1, SZ_B); - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_byte); - dst = cycles(&opts->gen, 1); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_byte); + xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_byte); + cycles(&opts->gen, 1); uint8_t high_reg = z80_high_reg(inst->reg); uint8_t use_reg; //even though some of the upper halves can be used directly //the limitations on mixing *H regs with the REX prefix //prevent us from taking advantage of it use_reg = opts->regs[inst->reg]; - dst = ror_ir(dst, 8, use_reg, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - dst = add_ir(dst, 1, opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_byte); - dst = xchg_rr(dst, use_reg, opts->gen.scratch1, SZ_B); - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - dst = add_ir(dst, 1, opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_byte); + ror_ir(code, 8, use_reg, SZ_W); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); + add_ir(code, 1, opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_byte); + xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); + add_ir(code, 1, opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_byte); //restore reg to normal rotation - dst = ror_ir(dst, 8, use_reg, SZ_W); - dst = cycles(&opts->gen, 2); + ror_ir(code, 8, use_reg, SZ_W); + cycles(&opts->gen, 2); } break; case Z80_EXX: - dst = cycles(&opts->gen, 4); - dst = mov_rr(dst, opts->regs[Z80_BC], opts->gen.scratch1, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); - dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); - dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); - dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_C), SZ_W); - dst = mov_rrdisp8(dst, opts->gen.scratch2, opts->gen.context_reg, zar_off(Z80_L), SZ_W); - dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch1, SZ_W); - dst = mov_rdisp8r(dst, opts->gen.context_reg, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); - dst = mov_rrdisp8(dst, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_E), SZ_W); + cycles(&opts->gen, 4); + mov_rr(code, opts->regs[Z80_BC], opts->gen.scratch1, SZ_W); + mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); + mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); + mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); + mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_C), SZ_W); + mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zar_off(Z80_L), SZ_W); + mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch1, SZ_W); + mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); + mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_E), SZ_W); break; case Z80_LDI: { - dst = cycles(&opts->gen, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_byte); - dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_byte); - dst = cycles(&opts->gen, 2); - dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); - dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); - dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); + cycles(&opts->gen, 8); + mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_byte); + mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_byte); + cycles(&opts->gen, 2); + add_ir(code, 1, opts->regs[Z80_DE], SZ_W); + add_ir(code, 1, opts->regs[Z80_HL], SZ_W); + sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); //TODO: Implement half-carry - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); break; } case Z80_LDIR: { - dst = cycles(&opts->gen, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_byte); - dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_byte); - dst = add_ir(dst, 1, opts->regs[Z80_DE], SZ_W); - dst = add_ir(dst, 1, opts->regs[Z80_HL], SZ_W); + cycles(&opts->gen, 8); + mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_byte); + mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_byte); + add_ir(code, 1, opts->regs[Z80_DE], SZ_W); + add_ir(code, 1, opts->regs[Z80_HL], SZ_W); - dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); - uint8_t * cont = dst+1; - dst = jcc(dst, CC_Z, dst+2); - dst = cycles(&opts->gen, 7); + sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + uint8_t * cont = code->cur+1; + jcc(code, CC_Z, code->cur+2); + cycles(&opts->gen, 7); //TODO: Figure out what the flag state should be here //TODO: Figure out whether an interrupt can interrupt this - dst = jmp(dst, start); - *cont = dst - (cont + 1); - dst = cycles(&opts->gen, 2); + jmp(code, start); + *cont = code->cur - (cont + 1); + cycles(&opts->gen, 2); //TODO: Implement half-carry - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; } case Z80_LDD: { - dst = cycles(&opts->gen, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_byte); - dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_byte); - dst = cycles(&opts->gen, 2); - dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); - dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); - dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); + cycles(&opts->gen, 8); + mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_byte); + mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_byte); + cycles(&opts->gen, 2); + sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); + sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); + sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); //TODO: Implement half-carry - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); break; } case Z80_LDDR: { - dst = cycles(&opts->gen, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_byte); - dst = mov_rr(dst, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_byte); - dst = sub_ir(dst, 1, opts->regs[Z80_DE], SZ_W); - dst = sub_ir(dst, 1, opts->regs[Z80_HL], SZ_W); + cycles(&opts->gen, 8); + mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_byte); + mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_byte); + sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); + sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); - dst = sub_ir(dst, 1, opts->regs[Z80_BC], SZ_W); - uint8_t * cont = dst+1; - dst = jcc(dst, CC_Z, dst+2); - dst = cycles(&opts->gen, 7); + sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + uint8_t * cont = code->cur+1; + jcc(code, CC_Z, code->cur+2); + cycles(&opts->gen, 7); //TODO: Figure out what the flag state should be here //TODO: Figure out whether an interrupt can interrupt this - dst = jmp(dst, start); - *cont = dst - (cont + 1); - dst = cycles(&opts->gen, 2); + jmp(code, start); + *cont = code->cur - (cont + 1); + cycles(&opts->gen, 2); //TODO: Implement half-carry - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; } /*case Z80_CPI: @@ -572,622 +570,622 @@ uint8_t * translate_z80inst(z80inst * inst, z80_context * context, uint16_t addr case Z80_CPDR: break;*/ case Z80_ADD: - cycles = 4; + num_cycles = 4; if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 12; + num_cycles += 12; } else if(inst->addr_mode == Z80_IMMED) { - cycles += 3; + num_cycles += 3; } else if(z80_size(inst) == SZ_W) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { - dst = add_rr(dst, src_op.base, dst_op.base, z80_size(inst)); + add_rr(code, src_op.base, dst_op.base, z80_size(inst)); } else { - dst = add_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); + add_ir(code, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (z80_size(inst) == SZ_B) { - dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); break; case Z80_ADC: - cycles = 4; + num_cycles = 4; if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 12; + num_cycles += 12; } else if(inst->addr_mode == Z80_IMMED) { - cycles += 3; + num_cycles += 3; } else if(z80_size(inst) == SZ_W) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); - dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); + bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); if (src_op.mode == MODE_REG_DIRECT) { - dst = adc_rr(dst, src_op.base, dst_op.base, z80_size(inst)); + adc_rr(code, src_op.base, dst_op.base, z80_size(inst)); } else { - dst = adc_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); + adc_ir(code, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); + setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); break; case Z80_SUB: - cycles = 4; + num_cycles = 4; if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 12; + num_cycles += 12; } else if(inst->addr_mode == Z80_IMMED) { - cycles += 3; + num_cycles += 3; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { - dst = sub_rr(dst, src_op.base, dst_op.base, z80_size(inst)); + sub_rr(code, src_op.base, dst_op.base, z80_size(inst)); } else { - dst = sub_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); + sub_ir(code, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); break; case Z80_SBC: - cycles = 4; + num_cycles = 4; if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 12; + num_cycles += 12; } else if(inst->addr_mode == Z80_IMMED) { - cycles += 3; + num_cycles += 3; } else if(z80_size(inst) == SZ_W) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); - dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); + bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); if (src_op.mode == MODE_REG_DIRECT) { - dst = sbb_rr(dst, src_op.base, dst_op.base, z80_size(inst)); + sbb_rr(code, src_op.base, dst_op.base, z80_size(inst)); } else { - dst = sbb_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); + sbb_ir(code, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); + setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); break; case Z80_AND: - cycles = 4; + num_cycles = 4; if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 12; + num_cycles += 12; } else if(inst->addr_mode == Z80_IMMED) { - cycles += 3; + num_cycles += 3; } else if(z80_size(inst) == SZ_W) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { - dst = and_rr(dst, src_op.base, dst_op.base, z80_size(inst)); + and_rr(code, src_op.base, dst_op.base, z80_size(inst)); } else { - dst = and_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); + and_ir(code, src_op.disp, dst_op.base, z80_size(inst)); } //TODO: Cleanup flags - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (z80_size(inst) == SZ_B) { - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); break; case Z80_OR: - cycles = 4; + num_cycles = 4; if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 12; + num_cycles += 12; } else if(inst->addr_mode == Z80_IMMED) { - cycles += 3; + num_cycles += 3; } else if(z80_size(inst) == SZ_W) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { - dst = or_rr(dst, src_op.base, dst_op.base, z80_size(inst)); + or_rr(code, src_op.base, dst_op.base, z80_size(inst)); } else { - dst = or_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); + or_ir(code, src_op.disp, dst_op.base, z80_size(inst)); } //TODO: Cleanup flags - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (z80_size(inst) == SZ_B) { - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); break; case Z80_XOR: - cycles = 4; + num_cycles = 4; if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 12; + num_cycles += 12; } else if(inst->addr_mode == Z80_IMMED) { - cycles += 3; + num_cycles += 3; } else if(z80_size(inst) == SZ_W) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { - dst = xor_rr(dst, src_op.base, dst_op.base, z80_size(inst)); + xor_rr(code, src_op.base, dst_op.base, z80_size(inst)); } else { - dst = xor_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); + xor_ir(code, src_op.disp, dst_op.base, z80_size(inst)); } //TODO: Cleanup flags - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (z80_size(inst) == SZ_B) { - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); break; case Z80_CP: - cycles = 4; + num_cycles = 4; if (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 12; + num_cycles += 12; } else if(inst->addr_mode == Z80_IMMED) { - cycles += 3; + num_cycles += 3; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { - dst = cmp_rr(dst, src_op.base, dst_op.base, z80_size(inst)); + cmp_rr(code, src_op.base, dst_op.base, z80_size(inst)); } else { - dst = cmp_ir(dst, src_op.disp, dst_op.base, z80_size(inst)); + cmp_ir(code, src_op.disp, dst_op.base, z80_size(inst)); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); break; case Z80_INC: - cycles = 4; + num_cycles = 4; if (inst->reg == Z80_IX || inst->reg == Z80_IY) { - cycles += 6; + num_cycles += 6; } else if(z80_size(inst) == SZ_W) { - cycles += 2; + num_cycles += 2; } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); if (dst_op.mode == MODE_UNUSED) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); + translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); } - dst = add_ir(dst, 1, dst_op.base, z80_size(inst)); + add_ir(code, 1, dst_op.base, z80_size(inst)); if (z80_size(inst) == SZ_B) { - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); - dst = z80_save_result(dst, inst); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); + z80_save_result(code, inst); break; case Z80_DEC: - cycles = 4; + num_cycles = 4; if (inst->reg == Z80_IX || inst->reg == Z80_IY) { - cycles += 6; + num_cycles += 6; } else if(z80_size(inst) == SZ_W) { - cycles += 2; + num_cycles += 2; } else if(inst->reg == Z80_IXH || inst->reg == Z80_IXL || inst->reg == Z80_IYH || inst->reg == Z80_IYL || inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); - dst = translate_z80_reg(inst, &dst_op, dst, opts); + cycles(&opts->gen, num_cycles); + translate_z80_reg(inst, &dst_op, opts); if (dst_op.mode == MODE_UNUSED) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); + translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); } - dst = sub_ir(dst, 1, dst_op.base, z80_size(inst)); + sub_ir(code, 1, dst_op.base, z80_size(inst)); if (z80_size(inst) == SZ_B) { - dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } - dst = z80_save_reg(dst, inst, opts); - dst = z80_save_ea(dst, inst, opts); - dst = z80_save_result(dst, inst); + z80_save_reg(inst, opts); + z80_save_ea(code, inst, opts); + z80_save_result(code, inst); break; //case Z80_DAA: case Z80_CPL: - dst = cycles(&opts->gen, 4); - dst = not_r(dst, opts->regs[Z80_A], SZ_B); + cycles(&opts->gen, 4); + not_r(code, opts->regs[Z80_A], SZ_B); //TODO: Implement half-carry flag - dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); break; case Z80_NEG: - dst = cycles(&opts->gen, 8); - dst = neg_r(dst, opts->regs[Z80_A], SZ_B); + cycles(&opts->gen, 8); + neg_r(code, opts->regs[Z80_A], SZ_B); //TODO: Implement half-carry flag - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = setcc_rdisp8(dst, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); - dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + setcc_rdisp(code, CC_O, opts->gen.context_reg, zf_off(ZF_PV)); + mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); break; case Z80_CCF: - dst = cycles(&opts->gen, 4); - dst = xor_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + cycles(&opts->gen, 4); + xor_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag break; case Z80_SCF: - dst = cycles(&opts->gen, 4); - dst = mov_irdisp8(dst, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + cycles(&opts->gen, 4); + mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag break; case Z80_NOP: if (inst->immed == 42) { - dst = call(dst, (uint8_t *)z80_save_context); - dst = mov_rr(dst, opts->gen.context_reg, RDI, SZ_Q); - dst = jmp(dst, (uint8_t *)z80_print_regs_exit); + call(code, (uint8_t *)z80_save_context); + mov_rr(code, opts->gen.context_reg, RDI, SZ_Q); + jmp(code, (uint8_t *)z80_print_regs_exit); } else { - dst = cycles(&opts->gen, 4 * inst->immed); + cycles(&opts->gen, 4 * inst->immed); } break; case Z80_HALT: - dst = cycles(&opts->gen, 4); - dst = mov_ir(dst, address, opts->gen.scratch1, SZ_W); - uint8_t * call_inst = dst; - dst = call(dst, (uint8_t *)z80_halt); - dst = jmp(dst, call_inst); + cycles(&opts->gen, 4); + mov_ir(code, address, opts->gen.scratch1, SZ_W); + uint8_t * call_inst = code->cur; + call(code, (uint8_t *)z80_halt); + jmp(code, call_inst); break; case Z80_DI: - dst = cycles(&opts->gen, 4); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); - dst = mov_rdisp8r(dst, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D); - dst = mov_irdisp8(dst, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D); + cycles(&opts->gen, 4); + mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); + mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, sync_cycle), opts->gen.limit, SZ_D); + mov_irdisp(code, 0xFFFFFFFF, opts->gen.context_reg, offsetof(z80_context, int_cycle), SZ_D); break; case Z80_EI: - dst = cycles(&opts->gen, 4); - dst = mov_rrdisp32(dst, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); - dst = mov_irdisp8(dst, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); - dst = mov_irdisp8(dst, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); + cycles(&opts->gen, 4); + mov_rrdisp(code, opts->gen.cycles, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); + mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); + mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles - dst = add_irdisp32(dst, 4, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); - dst = call(dst, (uint8_t *)z80_do_sync); + add_irdisp(code, 4, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); + call(code, (uint8_t *)z80_do_sync); break; case Z80_IM: - dst = cycles(&opts->gen, 4); - dst = mov_irdisp8(dst, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B); + cycles(&opts->gen, 4); + mov_irdisp(code, inst->immed, opts->gen.context_reg, offsetof(z80_context, im), SZ_B); break; case Z80_RLC: - cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); - dst = cycles(&opts->gen, cycles); + num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); + cycles(&opts->gen, num_cycles); if (inst->addr_mode != Z80_UNUSED) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); - dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = cycles(&opts->gen, 1); + translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); + translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register + cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_reg(inst, &dst_op, opts); } - dst = rol_ir(dst, 1, dst_op.base, SZ_B); + rol_ir(code, 1, dst_op.base, SZ_B); if (src_op.mode != MODE_UNUSED) { - dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); + mov_rr(code, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); if (src_op.mode != MODE_UNUSED) { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } } else { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } break; case Z80_RL: - cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); - dst = cycles(&opts->gen, cycles); + num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); + cycles(&opts->gen, num_cycles); if (inst->addr_mode != Z80_UNUSED) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); - dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = cycles(&opts->gen, 1); + translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); + translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register + cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_reg(inst, &dst_op, opts); } - dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); - dst = rcl_ir(dst, 1, dst_op.base, SZ_B); + bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + rcl_ir(code, 1, dst_op.base, SZ_B); if (src_op.mode != MODE_UNUSED) { - dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); + mov_rr(code, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); if (src_op.mode != MODE_UNUSED) { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } } else { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } break; case Z80_RRC: - cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); - dst = cycles(&opts->gen, cycles); + num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); + cycles(&opts->gen, num_cycles); if (inst->addr_mode != Z80_UNUSED) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); - dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = cycles(&opts->gen, 1); + translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); + translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register + cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_reg(inst, &dst_op, opts); } - dst = ror_ir(dst, 1, dst_op.base, SZ_B); + ror_ir(code, 1, dst_op.base, SZ_B); if (src_op.mode != MODE_UNUSED) { - dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); + mov_rr(code, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); if (src_op.mode != MODE_UNUSED) { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } } else { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } break; case Z80_RR: - cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); - dst = cycles(&opts->gen, cycles); + num_cycles = inst->immed == 0 ? 4 : (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8); + cycles(&opts->gen, num_cycles); if (inst->addr_mode != Z80_UNUSED) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); - dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = cycles(&opts->gen, 1); + translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); + translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register + cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_reg(inst, &dst_op, opts); } - dst = bt_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); - dst = rcr_ir(dst, 1, dst_op.base, SZ_B); + bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + rcr_ir(code, 1, dst_op.base, SZ_B); if (src_op.mode != MODE_UNUSED) { - dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); + mov_rr(code, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); if (src_op.mode != MODE_UNUSED) { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } } else { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } break; case Z80_SLA: case Z80_SLL: - cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; - dst = cycles(&opts->gen, cycles); + num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; + cycles(&opts->gen, num_cycles); if (inst->addr_mode != Z80_UNUSED) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); - dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = cycles(&opts->gen, 1); + translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); + translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register + cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_reg(inst, &dst_op, opts); } - dst = shl_ir(dst, 1, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + shl_ir(code, 1, dst_op.base, SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); if (inst->op == Z80_SLL) { - dst = or_ir(dst, 1, dst_op.base, SZ_B); + or_ir(code, 1, dst_op.base, SZ_B); } if (src_op.mode != MODE_UNUSED) { - dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); + mov_rr(code, dst_op.base, src_op.base, SZ_B); } - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); if (src_op.mode != MODE_UNUSED) { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } } else { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } break; case Z80_SRA: - cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; - dst = cycles(&opts->gen, cycles); + num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; + cycles(&opts->gen, num_cycles); if (inst->addr_mode != Z80_UNUSED) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); - dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = cycles(&opts->gen, 1); + translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); + translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register + cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_reg(inst, &dst_op, opts); } - dst = sar_ir(dst, 1, dst_op.base, SZ_B); + sar_ir(code, 1, dst_op.base, SZ_B); if (src_op.mode != MODE_UNUSED) { - dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); + mov_rr(code, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); if (src_op.mode != MODE_UNUSED) { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } } else { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } break; case Z80_SRL: - cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; - dst = cycles(&opts->gen, cycles); + num_cycles = inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE ? 16 : 8; + cycles(&opts->gen, num_cycles); if (inst->addr_mode != Z80_UNUSED) { - dst = translate_z80_ea(inst, &dst_op, dst, opts, READ, MODIFY); - dst = translate_z80_reg(inst, &src_op, dst, opts); //For IX/IY variants that also write to a register - dst = cycles(&opts->gen, 1); + translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); + translate_z80_reg(inst, &src_op, opts); //For IX/IY variants that also write to a register + cycles(&opts->gen, 1); } else { src_op.mode = MODE_UNUSED; - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_reg(inst, &dst_op, opts); } - dst = shr_ir(dst, 1, dst_op.base, SZ_B); + shr_ir(code, 1, dst_op.base, SZ_B); if (src_op.mode != MODE_UNUSED) { - dst = mov_rr(dst, dst_op.base, src_op.base, SZ_B); + mov_rr(code, dst_op.base, src_op.base, SZ_B); } - dst = setcc_rdisp8(dst, CC_C, opts->gen.context_reg, zf_off(ZF_C)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - dst = cmp_ir(dst, 0, dst_op.base, SZ_B); - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); if (src_op.mode != MODE_UNUSED) { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } } else { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } break; case Z80_RLD: - dst = cycles(&opts->gen, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_byte); + cycles(&opts->gen, 8); + mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_byte); //Before: (HL) = 0x12, A = 0x34 //After: (HL) = 0x24, A = 0x31 - dst = mov_rr(dst, opts->regs[Z80_A], opts->gen.scratch2, SZ_B); - dst = shl_ir(dst, 4, opts->gen.scratch1, SZ_W); - dst = and_ir(dst, 0xF, opts->gen.scratch2, SZ_W); - dst = and_ir(dst, 0xFFF, opts->gen.scratch1, SZ_W); - dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); - dst = or_rr(dst, opts->gen.scratch2, opts->gen.scratch1, SZ_W); + mov_rr(code, opts->regs[Z80_A], opts->gen.scratch2, SZ_B); + shl_ir(code, 4, opts->gen.scratch1, SZ_W); + and_ir(code, 0xF, opts->gen.scratch2, SZ_W); + and_ir(code, 0xFFF, opts->gen.scratch1, SZ_W); + and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B); + or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W); //opts->gen.scratch1 = 0x0124 - dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); - dst = cycles(&opts->gen, 4); - dst = or_rr(dst, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); + ror_ir(code, 8, opts->gen.scratch1, SZ_W); + cycles(&opts->gen, 4); + or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); //set flags //TODO: Implement half-carry flag - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); - dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_write_byte); + mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); + ror_ir(code, 8, opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_write_byte); break; case Z80_RRD: - dst = cycles(&opts->gen, 8); - dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_byte); + cycles(&opts->gen, 8); + mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_byte); //Before: (HL) = 0x12, A = 0x34 //After: (HL) = 0x41, A = 0x32 - dst = movzx_rr(dst, opts->regs[Z80_A], opts->gen.scratch2, SZ_B, SZ_W); - dst = ror_ir(dst, 4, opts->gen.scratch1, SZ_W); - dst = shl_ir(dst, 4, opts->gen.scratch2, SZ_W); - dst = and_ir(dst, 0xF00F, opts->gen.scratch1, SZ_W); - dst = and_ir(dst, 0xF0, opts->regs[Z80_A], SZ_B); + movzx_rr(code, opts->regs[Z80_A], opts->gen.scratch2, SZ_B, SZ_W); + ror_ir(code, 4, opts->gen.scratch1, SZ_W); + shl_ir(code, 4, opts->gen.scratch2, SZ_W); + and_ir(code, 0xF00F, opts->gen.scratch1, SZ_W); + and_ir(code, 0xF0, opts->regs[Z80_A], SZ_B); //opts->gen.scratch1 = 0x2001 //opts->gen.scratch2 = 0x0040 - dst = or_rr(dst, opts->gen.scratch2, opts->gen.scratch1, SZ_W); + or_rr(code, opts->gen.scratch2, opts->gen.scratch1, SZ_W); //opts->gen.scratch1 = 0x2041 - dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); - dst = cycles(&opts->gen, 4); - dst = shr_ir(dst, 4, opts->gen.scratch1, SZ_B); - dst = or_rr(dst, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); + ror_ir(code, 8, opts->gen.scratch1, SZ_W); + cycles(&opts->gen, 4); + shr_ir(code, 4, opts->gen.scratch1, SZ_B); + or_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); //set flags //TODO: Implement half-carry flag - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); - dst = setcc_rdisp8(dst, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - dst = setcc_rdisp8(dst, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - dst = mov_rr(dst, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); - dst = ror_ir(dst, 8, opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_write_byte); + mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); + ror_ir(code, 8, opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_write_byte); break; case Z80_BIT: { - cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; - dst = cycles(&opts->gen, cycles); + num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; + cycles(&opts->gen, num_cycles); uint8_t bit; if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; @@ -1196,27 +1194,27 @@ uint8_t * translate_z80inst(z80inst * inst, z80_context * context, uint16_t addr } else { size = SZ_B; bit = inst->immed; - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, DONT_MODIFY); + translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); } if (inst->addr_mode != Z80_REG) { //Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4 - dst = cycles(&opts->gen, 1); + cycles(&opts->gen, 1); } - dst = bt_ir(dst, bit, src_op.base, size); - dst = setcc_rdisp8(dst, CC_NC, opts->gen.context_reg, zf_off(ZF_Z)); - dst = setcc_rdisp8(dst, CC_NC, opts->gen.context_reg, zf_off(ZF_PV)); - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); + bt_ir(code, bit, src_op.base, size); + setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_PV)); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); if (inst->immed == 7) { - dst = cmp_ir(dst, 0, src_op.base, size); - dst = setcc_rdisp8(dst, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, src_op.base, size); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } else { - dst = mov_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); + mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); } break; } case Z80_SET: { - cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; - dst = cycles(&opts->gen, cycles); + num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; + cycles(&opts->gen, num_cycles); uint8_t bit; if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; @@ -1225,40 +1223,40 @@ uint8_t * translate_z80inst(z80inst * inst, z80_context * context, uint16_t addr } else { size = SZ_B; bit = inst->immed; - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, MODIFY); + translate_z80_ea(inst, &src_op, opts, READ, MODIFY); } if (inst->reg != Z80_USE_IMMED) { - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_reg(inst, &dst_op, opts); } if (inst->addr_mode != Z80_REG) { //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 - dst = cycles(&opts->gen, 1); + cycles(&opts->gen, 1); } - dst = bts_ir(dst, bit, src_op.base, size); + bts_ir(code, bit, src_op.base, size); if (inst->reg != Z80_USE_IMMED) { if (size == SZ_W) { if (dst_op.base >= R8) { - dst = ror_ir(dst, 8, src_op.base, SZ_W); - dst = mov_rr(dst, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); - dst = ror_ir(dst, 8, src_op.base, SZ_W); + ror_ir(code, 8, src_op.base, SZ_W); + mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); + ror_ir(code, 8, src_op.base, SZ_W); } else { - dst = mov_rr(dst, opts->regs[inst->ea_reg], dst_op.base, SZ_B); + mov_rr(code, opts->regs[inst->ea_reg], dst_op.base, SZ_B); } } else { - dst = mov_rr(dst, src_op.base, dst_op.base, SZ_B); + mov_rr(code, src_op.base, dst_op.base, SZ_B); } } if ((inst->addr_mode & 0x1F) != Z80_REG) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); if (inst->reg != Z80_USE_IMMED) { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } } break; } case Z80_RES: { - cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; - dst = cycles(&opts->gen, cycles); + num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; + cycles(&opts->gen, num_cycles); uint8_t bit; if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; @@ -1267,361 +1265,361 @@ uint8_t * translate_z80inst(z80inst * inst, z80_context * context, uint16_t addr } else { size = SZ_B; bit = inst->immed; - dst = translate_z80_ea(inst, &src_op, dst, opts, READ, MODIFY); + translate_z80_ea(inst, &src_op, opts, READ, MODIFY); } if (inst->reg != Z80_USE_IMMED) { - dst = translate_z80_reg(inst, &dst_op, dst, opts); + translate_z80_reg(inst, &dst_op, opts); } if (inst->addr_mode != Z80_REG) { //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 - dst = cycles(&opts->gen, 1); + cycles(&opts->gen, 1); } - dst = btr_ir(dst, bit, src_op.base, size); + btr_ir(code, bit, src_op.base, size); if (inst->reg != Z80_USE_IMMED) { if (size == SZ_W) { if (dst_op.base >= R8) { - dst = ror_ir(dst, 8, src_op.base, SZ_W); - dst = mov_rr(dst, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); - dst = ror_ir(dst, 8, src_op.base, SZ_W); + ror_ir(code, 8, src_op.base, SZ_W); + mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); + ror_ir(code, 8, src_op.base, SZ_W); } else { - dst = mov_rr(dst, opts->regs[inst->ea_reg], dst_op.base, SZ_B); + mov_rr(code, opts->regs[inst->ea_reg], dst_op.base, SZ_B); } } else { - dst = mov_rr(dst, src_op.base, dst_op.base, SZ_B); + mov_rr(code, src_op.base, dst_op.base, SZ_B); } } if (inst->addr_mode != Z80_REG) { - dst = z80_save_result(dst, inst); + z80_save_result(code, inst); if (inst->reg != Z80_USE_IMMED) { - dst = z80_save_reg(dst, inst, opts); + z80_save_reg(inst, opts); } } break; } case Z80_JP: { - cycles = 4; + num_cycles = 4; if (inst->addr_mode != Z80_REG_INDIRECT) { - cycles += 6; + num_cycles += 6; } else if(inst->ea_reg == Z80_IX || inst->ea_reg == Z80_IY) { - cycles += 4; + num_cycles += 4; } - dst = cycles(&opts->gen, cycles); + cycles(&opts->gen, num_cycles); if (inst->addr_mode != Z80_REG_INDIRECT && inst->immed < 0x4000) { - uint8_t * call_dst = z80_get_native_address(context, inst->immed); + code_ptr call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { - opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); + opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); //fake address to force large displacement - call_dst = dst + 256; + call_dst + 256; } - dst = jmp(dst, call_dst); + jmp(code, call_dst); } else { if (inst->addr_mode == Z80_REG_INDIRECT) { - dst = mov_rr(dst, opts->regs[inst->ea_reg], opts->gen.scratch1, SZ_W); + mov_rr(code, opts->regs[inst->ea_reg], opts->gen.scratch1, SZ_W); } else { - dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_W); + mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); } - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); } break; } case Z80_JPCC: { - dst = cycles(&opts->gen, 7);//T States: 4,3 + cycles(&opts->gen, 7);//T States: 4,3 uint8_t cond = CC_Z; switch (inst->reg) { case Z80_CC_NZ: cond = CC_NZ; case Z80_CC_Z: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); break; case Z80_CC_NC: cond = CC_NZ; case Z80_CC_C: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); break; case Z80_CC_PO: cond = CC_NZ; case Z80_CC_PE: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; case Z80_CC_P: cond = CC_NZ; case Z80_CC_M: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); break; } - uint8_t *no_jump_off = dst+1; - dst = jcc(dst, cond, dst+2); - dst = cycles(&opts->gen, 5);//T States: 5 + uint8_t *no_jump_off = code->cur+1; + jcc(code, cond, code->cur+2); + cycles(&opts->gen, 5);//T States: 5 uint16_t dest_addr = inst->immed; if (dest_addr < 0x4000) { - uint8_t * call_dst = z80_get_native_address(context, dest_addr); + code_ptr call_dst = z80_get_native_address(context, dest_addr); if (!call_dst) { - opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); + opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); //fake address to force large displacement - call_dst = dst + 256; + call_dst + 256; } - dst = jmp(dst, call_dst); + jmp(code, call_dst); } else { - dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); } - *no_jump_off = dst - (no_jump_off+1); + *no_jump_off = code->cur - (no_jump_off+1); break; } case Z80_JR: { - dst = cycles(&opts->gen, 12);//T States: 4,3,5 + cycles(&opts->gen, 12);//T States: 4,3,5 uint16_t dest_addr = address + inst->immed + 2; if (dest_addr < 0x4000) { - uint8_t * call_dst = z80_get_native_address(context, dest_addr); + code_ptr call_dst = z80_get_native_address(context, dest_addr); if (!call_dst) { - opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); + opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); //fake address to force large displacement - call_dst = dst + 256; + call_dst + 256; } - dst = jmp(dst, call_dst); + jmp(code, call_dst); } else { - dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); } break; } case Z80_JRCC: { - dst = cycles(&opts->gen, 7);//T States: 4,3 + cycles(&opts->gen, 7);//T States: 4,3 uint8_t cond = CC_Z; switch (inst->reg) { case Z80_CC_NZ: cond = CC_NZ; case Z80_CC_Z: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); break; case Z80_CC_NC: cond = CC_NZ; case Z80_CC_C: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); break; } - uint8_t *no_jump_off = dst+1; - dst = jcc(dst, cond, dst+2); - dst = cycles(&opts->gen, 5);//T States: 5 + uint8_t *no_jump_off = code->cur+1; + jcc(code, cond, code->cur+2); + cycles(&opts->gen, 5);//T States: 5 uint16_t dest_addr = address + inst->immed + 2; if (dest_addr < 0x4000) { - uint8_t * call_dst = z80_get_native_address(context, dest_addr); + code_ptr call_dst = z80_get_native_address(context, dest_addr); if (!call_dst) { - opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); + opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); //fake address to force large displacement - call_dst = dst + 256; + call_dst + 256; } - dst = jmp(dst, call_dst); + jmp(code, call_dst); } else { - dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); } - *no_jump_off = dst - (no_jump_off+1); + *no_jump_off = code->cur - (no_jump_off+1); break; } case Z80_DJNZ: - dst = cycles(&opts->gen, 8);//T States: 5,3 - dst = sub_ir(dst, 1, opts->regs[Z80_B], SZ_B); - uint8_t *no_jump_off = dst+1; - dst = jcc(dst, CC_Z, dst+2); - dst = cycles(&opts->gen, 5);//T States: 5 + cycles(&opts->gen, 8);//T States: 5,3 + sub_ir(code, 1, opts->regs[Z80_B], SZ_B); + uint8_t *no_jump_off = code->cur+1; + jcc(code, CC_Z, code->cur+2); + cycles(&opts->gen, 5);//T States: 5 uint16_t dest_addr = address + inst->immed + 2; if (dest_addr < 0x4000) { - uint8_t * call_dst = z80_get_native_address(context, dest_addr); + code_ptr call_dst = z80_get_native_address(context, dest_addr); if (!call_dst) { - opts->deferred = defer_address(opts->deferred, dest_addr, dst + 1); + opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); //fake address to force large displacement - call_dst = dst + 256; + call_dst + 256; } - dst = jmp(dst, call_dst); + jmp(code, call_dst); } else { - dst = mov_ir(dst, dest_addr, opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); } - *no_jump_off = dst - (no_jump_off+1); + *no_jump_off = code->cur - (no_jump_off+1); break; case Z80_CALL: { - dst = cycles(&opts->gen, 11);//T States: 4,3,4 - dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = mov_ir(dst, address + 3, opts->gen.scratch1, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 + cycles(&opts->gen, 11);//T States: 4,3,4 + sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); + mov_ir(code, address + 3, opts->gen.scratch1, SZ_W); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 if (inst->immed < 0x4000) { - uint8_t * call_dst = z80_get_native_address(context, inst->immed); + code_ptr call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { - opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); + opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); //fake address to force large displacement - call_dst = dst + 256; + call_dst + 256; } - dst = jmp(dst, call_dst); + jmp(code, call_dst); } else { - dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); } break; } case Z80_CALLCC: - dst = cycles(&opts->gen, 10);//T States: 4,3,3 (false case) + cycles(&opts->gen, 10);//T States: 4,3,3 (false case) uint8_t cond = CC_Z; switch (inst->reg) { case Z80_CC_NZ: cond = CC_NZ; case Z80_CC_Z: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); break; case Z80_CC_NC: cond = CC_NZ; case Z80_CC_C: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); break; case Z80_CC_PO: cond = CC_NZ; case Z80_CC_PE: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; case Z80_CC_P: cond = CC_NZ; case Z80_CC_M: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); break; } - uint8_t *no_call_off = dst+1; - dst = jcc(dst, cond, dst+2); - dst = cycles(&opts->gen, 1);//Last of the above T states takes an extra cycle in the true case - dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = mov_ir(dst, address + 3, opts->gen.scratch1, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 + uint8_t *no_call_off = code->cur+1; + jcc(code, cond, code->cur+2); + cycles(&opts->gen, 1);//Last of the above T states takes an extra cycle in the true case + sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); + mov_ir(code, address + 3, opts->gen.scratch1, SZ_W); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 if (inst->immed < 0x4000) { - uint8_t * call_dst = z80_get_native_address(context, inst->immed); + code_ptr call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { - opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); + opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); //fake address to force large displacement - call_dst = dst + 256; + call_dst + 256; } - dst = jmp(dst, call_dst); + jmp(code, call_dst); } else { - dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); } - *no_call_off = dst - (no_call_off+1); + *no_call_off = code->cur - (no_call_off+1); break; case Z80_RET: - dst = cycles(&opts->gen, 4);//T States: 4 - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 - dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + cycles(&opts->gen, 4);//T States: 4 + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_word);//T STates: 3, 3 + add_ir(code, 2, opts->regs[Z80_SP], SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); break; case Z80_RETCC: { - dst = cycles(&opts->gen, 5);//T States: 5 + cycles(&opts->gen, 5);//T States: 5 uint8_t cond = CC_Z; switch (inst->reg) { case Z80_CC_NZ: cond = CC_NZ; case Z80_CC_Z: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_Z), SZ_B); break; case Z80_CC_NC: cond = CC_NZ; case Z80_CC_C: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); break; case Z80_CC_PO: cond = CC_NZ; case Z80_CC_PE: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); break; case Z80_CC_P: cond = CC_NZ; case Z80_CC_M: - dst = cmp_irdisp8(dst, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); + cmp_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); break; } - uint8_t *no_call_off = dst+1; - dst = jcc(dst, cond, dst+2); - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 - dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); - *no_call_off = dst - (no_call_off+1); + uint8_t *no_call_off = code->cur+1; + jcc(code, cond, code->cur+2); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_word);//T STates: 3, 3 + add_ir(code, 2, opts->regs[Z80_SP], SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); + *no_call_off = code->cur - (no_call_off+1); break; } case Z80_RETI: //For some systems, this may need a callback for signalling interrupt routine completion - dst = cycles(&opts->gen, 8);//T States: 4, 4 - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 - dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + cycles(&opts->gen, 8);//T States: 4, 4 + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); + call(code, (uint8_t *)z80_read_word);//T STates: 3, 3 + add_ir(code, 2, opts->regs[Z80_SP], SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); break; case Z80_RETN: - dst = cycles(&opts->gen, 8);//T States: 4, 4 - dst = mov_rdisp8r(dst, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B); - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - dst = mov_rrdisp8(dst, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); - dst = call(dst, (uint8_t *)z80_read_word);//T STates: 3, 3 - dst = add_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = call(dst, (uint8_t *)z80_native_addr); - dst = jmp_r(dst, opts->gen.scratch1); + cycles(&opts->gen, 8);//T States: 4, 4 + mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); + mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); + call(code, (uint8_t *)z80_read_word);//T STates: 3, 3 + add_ir(code, 2, opts->regs[Z80_SP], SZ_W); + call(code, (uint8_t *)z80_native_addr); + jmp_r(code, opts->gen.scratch1); break; case Z80_RST: { //RST is basically CALL to an address in page 0 - dst = cycles(&opts->gen, 5);//T States: 5 - dst = sub_ir(dst, 2, opts->regs[Z80_SP], SZ_W); - dst = mov_ir(dst, address + 1, opts->gen.scratch1, SZ_W); - dst = mov_rr(dst, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - dst = call(dst, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 - uint8_t * call_dst = z80_get_native_address(context, inst->immed); + cycles(&opts->gen, 5);//T States: 5 + sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); + mov_ir(code, address + 1, opts->gen.scratch1, SZ_W); + mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); + call(code, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 + code_ptr call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { - opts->deferred = defer_address(opts->deferred, inst->immed, dst + 1); + opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); //fake address to force large displacement - call_dst = dst + 256; + call_dst + 256; } - dst = jmp(dst, call_dst); + jmp(code, call_dst); break; } case Z80_IN: - dst = cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 + cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 if (inst->addr_mode == Z80_IMMED_INDIRECT) { - dst = mov_ir(dst, inst->immed, opts->gen.scratch1, SZ_B); + mov_ir(code, inst->immed, opts->gen.scratch1, SZ_B); } else { - dst = mov_rr(dst, opts->regs[Z80_C], opts->gen.scratch1, SZ_B); + mov_rr(code, opts->regs[Z80_C], opts->gen.scratch1, SZ_B); } - dst = call(dst, (uint8_t *)z80_io_read); - translate_z80_reg(inst, &dst_op, dst, opts); - dst = mov_rr(dst, opts->gen.scratch1, dst_op.base, SZ_B); - dst = z80_save_reg(dst, inst, opts); + call(code, (uint8_t *)z80_io_read); + translate_z80_reg(inst, &dst_op, opts); + mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B); + z80_save_reg(inst, opts); break; /*case Z80_INI: case Z80_INIR: case Z80_IND: case Z80_INDR:*/ case Z80_OUT: - dst = cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 + cycles(&opts->gen, inst->reg == Z80_A ? 7 : 8);//T States: 4 3/4 if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) { - dst = mov_ir(dst, inst->immed, opts->gen.scratch2, SZ_B); + mov_ir(code, inst->immed, opts->gen.scratch2, SZ_B); } else { - dst = mov_rr(dst, opts->regs[Z80_C], opts->gen.scratch2, SZ_B); + mov_rr(code, opts->regs[Z80_C], opts->gen.scratch2, SZ_B); } - translate_z80_reg(inst, &src_op, dst, opts); - dst = mov_rr(dst, dst_op.base, opts->gen.scratch1, SZ_B); - dst = call(dst, (uint8_t *)z80_io_write); - dst = z80_save_reg(dst, inst, opts); + translate_z80_reg(inst, &src_op, opts); + mov_rr(code, dst_op.base, opts->gen.scratch1, SZ_B); + call(code, (uint8_t *)z80_io_write); + z80_save_reg(inst, opts); break; /*case Z80_OUTI: case Z80_OTIR: @@ -1637,7 +1635,6 @@ uint8_t * translate_z80inst(z80inst * inst, z80_context * context, uint16_t addr exit(1); } } - return dst; } uint8_t * z80_get_native_address(z80_context * context, uint32_t address) @@ -1666,7 +1663,7 @@ uint8_t z80_get_native_inst_size(z80_options * opts, uint32_t address) if (address >= 0x4000) { return 0; } - return opts->ram_inst_sizes[address & 0x1FFF]; + return opts->gen.ram_inst_sizes[0][address & 0x1FFF]; } void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * native_address, uint8_t size, uint8_t native_size) @@ -1677,7 +1674,7 @@ void z80_map_native_address(z80_context * context, uint32_t address, uint8_t * n if (address < 0x4000) { address &= 0x1FFF; map = context->static_code_map; - opts->ram_inst_sizes[address] = native_size; + opts->gen.ram_inst_sizes[0][address] = native_size; context->ram_code_flags[(address & 0x1C00) >> 10] |= 1 << ((address & 0x380) >> 7); context->ram_code_flags[((address + size) & 0x1C00) >> 10] |= 1 << (((address + size) & 0x380) >> 7); } else if (address >= 0x8000) { @@ -1735,10 +1732,12 @@ z80_context * z80_handle_code_write(uint32_t address, z80_context * context) { uint32_t inst_start = z80_get_instruction_start(context->static_code_map, address); if (inst_start != INVALID_INSTRUCTION_START) { - uint8_t * dst = z80_get_native_address(context, inst_start); - dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", dst, inst_start, address); - dst = mov_ir(dst, inst_start, opts->gen.scratch1, SZ_D); - dst = call(dst, (uint8_t *)z80_retrans_stub); + code_ptr dst = z80_get_native_address(context, inst_start); + code_info code = {dst, dst+16}; + z80_options * opts = context->options; + dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code, inst_start, address); + mov_ir(&code, inst_start, opts->gen.scratch1, SZ_D); + call(&code, (uint8_t *)z80_retrans_stub); } return context; } @@ -1759,9 +1758,9 @@ uint8_t * z80_get_native_address_trans(z80_context * context, uint32_t address) void z80_handle_deferred(z80_context * context) { z80_options * opts = context->options; - process_deferred(&opts->deferred, context, (native_addr_func)z80_get_native_address); - if (opts->deferred) { - translate_z80_stream(context, opts->deferred->address); + process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address); + if (opts->gen.deferred) { + translate_z80_stream(context, opts->gen.deferred->address); } } @@ -1772,8 +1771,7 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o uint8_t orig_size = z80_get_native_inst_size(opts, address); uint32_t orig = address; address &= 0x1FFF; - uint8_t * dst = opts->cur_code; - uint8_t * dst_end = opts->code_end; + code_info *code = &opts->gen.code; uint8_t *after, *inst = context->mem_pointers[0] + address; z80inst instbuf; dprintf("Retranslating code at Z80 address %X, native address %p\n", address, orig_start); @@ -1787,18 +1785,14 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o } #endif if (orig_size != ZMAX_NATIVE_SIZE) { - if (dst_end - dst < ZMAX_NATIVE_SIZE) { - size_t size = 1024*1024; - dst = alloc_code(&size); - opts->code_end = dst_end = dst + size; - opts->cur_code = dst; - } - deferred_addr * orig_deferred = opts->deferred; - uint8_t * native_end = translate_z80inst(&instbuf, dst, context, address); + code_ptr start = code->cur; + deferred_addr * orig_deferred = opts->gen.deferred; + translate_z80inst(&instbuf, context, address); + /* if ((native_end - dst) <= orig_size) { uint8_t * native_next = z80_get_native_address(context, address + after-inst); if (native_next && ((native_next == orig_start + orig_size) || (orig_size - (native_end - dst)) > 5)) { - remove_deferred_until(&opts->deferred, orig_deferred); + remove_deferred_until(&opts->gen.deferred, orig_deferred); native_end = translate_z80inst(&instbuf, orig_start, context, address); if (native_next == orig_start + orig_size && (native_next-native_end) < 2) { while (native_end < orig_start + orig_size) { @@ -1810,20 +1804,25 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o z80_handle_deferred(context); return orig_start; } - } - z80_map_native_address(context, address, dst, after-inst, ZMAX_NATIVE_SIZE); - opts->cur_code = dst+ZMAX_NATIVE_SIZE; - jmp(orig_start, dst); + }*/ + z80_map_native_address(context, address, start, after-inst, ZMAX_NATIVE_SIZE); + code_info tmp_code = {orig_start, orig_start + 16}; + jmp(&tmp_code, start); if (!z80_is_terminal(&instbuf)) { - jmp(native_end, z80_get_native_address_trans(context, address + after-inst)); + jmp(code, z80_get_native_address_trans(context, address + after-inst)); } + code->cur = start + ZMAX_NATIVE_SIZE; z80_handle_deferred(context); - return dst; + return start; } else { - dst = translate_z80inst(&instbuf, orig_start, context, address); + code_info tmp_code = *code; + code->cur = orig_start; + code->last = orig_start + ZMAX_NATIVE_SIZE; + translate_z80inst(&instbuf, context, address); if (!z80_is_terminal(&instbuf)) { - dst = jmp(dst, z80_get_native_address_trans(context, address + after-inst)); + jmp(code, z80_get_native_address_trans(context, address + after-inst)); } + *code = tmp_code; z80_handle_deferred(context); return orig_start; } @@ -1850,24 +1849,14 @@ void translate_z80_stream(z80_context * context, uint32_t address) z80inst inst; dprintf("translating Z80 code at address %X\n", address); do { - if (opts->code_end-opts->cur_code < ZMAX_NATIVE_SIZE) { - if (opts->code_end-opts->cur_code < 5) { - puts("out of code memory, not enough space for jmp to next chunk"); - exit(1); - } - size_t size = 1024*1024; - opts->cur_code = alloc_code(&size); - opts->code_end = opts->cur_code + size; - jmp(opts->cur_code, opts->cur_code); - } if (address > 0x4000 && address < 0x8000) { - opts->cur_code = xor_rr(opts->cur_code, RDI, RDI, SZ_D); - opts->cur_code = call(opts->cur_code, (uint8_t *)exit); + xor_rr(&opts->gen.code, RDI, RDI, SZ_D); + call(&opts->gen.code, (uint8_t *)exit); break; } uint8_t * existing = z80_get_native_address(context, address); if (existing) { - opts->cur_code = jmp(opts->cur_code, existing); + jmp(&opts->gen.code, existing); break; } next = z80_decode(encoded, &inst); @@ -1879,9 +1868,9 @@ void translate_z80_stream(z80_context * context, uint32_t address) printf("%X\t%s\n", address, disbuf); } #endif - uint8_t *after = translate_z80inst(&inst, opts->cur_code, context, address); - z80_map_native_address(context, address, opts->cur_code, next-encoded, after - opts->cur_code); - opts->cur_code = after; + code_ptr start = opts->gen.code.cur; + translate_z80inst(&inst, context, address); + z80_map_native_address(context, address, start, next-encoded, opts->gen.code.cur - start); address += next-encoded; if (address > 0xFFFF) { address &= 0xFFFF; @@ -1890,9 +1879,9 @@ void translate_z80_stream(z80_context * context, uint32_t address) encoded = next; } } while (!z80_is_terminal(&inst)); - process_deferred(&opts->deferred, context, (native_addr_func)z80_get_native_address); - if (opts->deferred) { - address = opts->deferred->address; + process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address); + if (opts->gen.deferred) { + address = opts->gen.deferred->address; dprintf("defferred address: %X\n", address); if (address < 0x4000) { encoded = context->mem_pointers[0] + (address & 0x1FFF); @@ -1953,8 +1942,9 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk * chunks, uint32_t nu options->gen.native_code_map = malloc(sizeof(native_map_slot)); memset(options->gen.native_code_map, 0, sizeof(native_map_slot)); options->gen.deferred = NULL; - options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000); - memset(options->ram_inst_sizes, 0, sizeof(uint8_t) * 0x2000); + options->gen.ram_inst_sizes = malloc(sizeof(uint8_t) * 0x2000 + sizeof(uint8_t *)); + options->gen.ram_inst_sizes[0] = (uint8_t *)(options->gen.ram_inst_sizes + 1); + memset(options->gen.ram_inst_sizes[0], 0, sizeof(uint8_t) * 0x2000); code_info *code = &options->gen.code; init_code_info(code); @@ -2028,18 +2018,18 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk * chunks, uint32_t nu mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR); //restore callee saved registers - pop_r(code, R15) - pop_r(code, R14) - pop_r(code, R13) - pop_r(code, R12) - pop_r(code, RBP) - pop_r(code, RBX) + pop_r(code, R15); + pop_r(code, R14); + pop_r(code, R13); + pop_r(code, R12); + pop_r(code, RBP); + pop_r(code, RBX); *no_sync = code->cur - no_sync; //return to caller of z80_run retn(code); - options->gen.read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, NULL); - options->gen.write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc); + options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, NULL); + options->write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc); options->gen.handle_cycle_limit_int = code->cur; cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, int_cycle), options->gen.cycles, SZ_D); @@ -2048,8 +2038,8 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk * chunks, uint32_t nu //set limit to the cycle limit mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.limit, SZ_D); //disable interrupts - move_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B); - move_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B); + mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff1), SZ_B); + mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, iff2), SZ_B); cycles(&options->gen, 7); //save return address (in scratch1) to Z80 stack sub_ir(code, 2, options->regs[Z80_SP], SZ_W); @@ -2072,7 +2062,7 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk * chunks, uint32_t nu cycles(&options->gen, 3); call(code, options->write_8_noinc); //dispose of return address as we'll be jumping somewhere else - pop_r(options->gen.scratch2); + pop_r(code, options->gen.scratch2); //TODO: Support interrupt mode 0 and 2 mov_ir(code, 0x38, options->gen.scratch1, SZ_W); call(code, (code_ptr)z80_native_addr); @@ -2102,59 +2092,58 @@ void z80_reset(z80_context * context) void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_handler) { static uint8_t * bp_stub = NULL; + z80_options * opts = context->options; uint8_t * native = z80_get_native_address_trans(context, address); - uint8_t * start_native = native; - native = mov_ir(native, address, opts->gen.scratch1, SZ_W); + code_info tmp_code = {native, native+16}; + mov_ir(&tmp_code, address, opts->gen.scratch1, SZ_W); if (!bp_stub) { - z80_options * opts = context->options; - uint8_t * dst = opts->cur_code; - uint8_t * dst_end = opts->code_end; - if (dst_end - dst < 128) { - size_t size = 1024*1024; - dst = alloc_code(&size); - opts->code_end = dst_end = dst + size; - } - bp_stub = dst; - native = call(native, bp_stub); + code_info *code = &opts->gen.code; + //TODO: do an alloc check here to make sure the prologue length calc works + bp_stub = code->cur; + call(&tmp_code, bp_stub); //Calculate length of prologue - dst = z80_check_cycles_int(dst, address); - int check_int_size = dst-bp_stub; - dst = bp_stub; + check_cycles_int(&opts->gen, address); + int check_int_size = code->cur-bp_stub; + code->cur = bp_stub; //Save context and call breakpoint handler - dst = call(dst, (uint8_t *)z80_save_context); - dst = push_r(dst, opts->gen.scratch1); - dst = mov_rr(dst, opts->gen.context_reg, RDI, SZ_Q); - dst = mov_rr(dst, opts->gen.scratch1, RSI, SZ_W); - dst = call(dst, bp_handler); - dst = mov_rr(dst, RAX, opts->gen.context_reg, SZ_Q); + call(code, (uint8_t *)z80_save_context); + push_r(code, opts->gen.scratch1); + mov_rr(code, opts->gen.context_reg, RDI, SZ_Q); + mov_rr(code, opts->gen.scratch1, RSI, SZ_W); + call(code, bp_handler); + mov_rr(code, RAX, opts->gen.context_reg, SZ_Q); //Restore context - dst = call(dst, (uint8_t *)z80_load_context); - dst = pop_r(dst, opts->gen.scratch1); + call(code, (uint8_t *)z80_load_context); + pop_r(code, opts->gen.scratch1); //do prologue stuff - dst = cmp_rr(dst, opts->gen.cycles, opts->gen.limit, SZ_D); - uint8_t * jmp_off = dst+1; - dst = jcc(dst, CC_NC, dst + 7); - dst = pop_r(dst, opts->gen.scratch1); - dst = add_ir(dst, check_int_size - (native-start_native), opts->gen.scratch1, SZ_Q); - dst = push_r(dst, opts->gen.scratch1); - dst = jmp(dst, (uint8_t *)z80_handle_cycle_limit_int); - *jmp_off = dst - (jmp_off+1); + cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D); + uint8_t * jmp_off = code->cur+1; + jcc(code, CC_NC, code->cur + 7); + pop_r(code, opts->gen.scratch1); + add_ir(code, check_int_size - (code->cur-native), opts->gen.scratch1, SZ_Q); + push_r(code, opts->gen.scratch1); + jmp(code, (uint8_t *)z80_handle_cycle_limit_int); + *jmp_off = code->cur - (jmp_off+1); //jump back to body of translated instruction - dst = pop_r(dst, opts->gen.scratch1); - dst = add_ir(dst, check_int_size - (native-start_native), opts->gen.scratch1, SZ_Q); - dst = jmp_r(dst, opts->gen.scratch1); - opts->cur_code = dst; + pop_r(code, opts->gen.scratch1); + add_ir(code, check_int_size - (code->cur-native), opts->gen.scratch1, SZ_Q); + jmp_r(code, opts->gen.scratch1); } else { - native = call(native, bp_stub); + call(&tmp_code, bp_stub); } } void zremove_breakpoint(z80_context * context, uint16_t address) { uint8_t * native = z80_get_native_address(context, address); - z80_check_cycles_int(native, address); + z80_options * opts = context->options; + code_info tmp_code = opts->gen.code; + opts->gen.code.cur = native; + opts->gen.code.last = native + 16; + check_cycles_int(&opts->gen, address); + opts->gen.code = tmp_code; } -- cgit v1.2.3 From 4cad512b6d7ac0f7042b90e1029626fb14788bf0 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Wed, 17 Dec 2014 23:03:19 -0800 Subject: Get rest of emulator compiling again with Z80 core enabled --- z80_to_x86.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index ba4fc66..6d5f928 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1897,7 +1897,7 @@ void translate_z80_stream(z80_context * context, uint32_t address) } } -void init_x86_z80_opts(z80_options * options, memmap_chunk * chunks, uint32_t num_chunks) +void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks) { memset(options, 0, sizeof(*options)); @@ -2069,6 +2069,13 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk * chunks, uint32_t nu jmp_r(code, options->gen.scratch1); } +void * z80_gen_bank_write(uint32_t start_address, void * voptions) +{ + z80_options * options = voptions; + //TODO: Handle writes to bank register + return options; +} + void init_z80_context(z80_context * context, z80_options * options) { memset(context, 0, sizeof(*context)); -- cgit v1.2.3 From 12c73dc400c1b6b61531df4ff0fd1efe4ef7ae12 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Mon, 22 Dec 2014 20:55:10 -0800 Subject: Z80 core is sort of working again --- z80_to_x86.c | 237 ++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 154 insertions(+), 83 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 6d5f928..b556b57 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -28,22 +28,6 @@ #define dprintf #endif -void z80_read_byte(); -void z80_read_word(); -void z80_write_byte(); -void z80_write_word_highfirst(); -void z80_write_word_lowfirst(); -void z80_save_context(); -void z80_native_addr(); -void z80_do_sync(); -void z80_handle_cycle_limit_int(); -void z80_retrans_stub(); -void z80_io_read(); -void z80_io_write(); -void z80_halt(); -void z80_save_context(); -void z80_load_context(); - uint8_t z80_size(z80inst * inst) { uint8_t reg = (inst->reg & 0x1F); @@ -184,9 +168,9 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t push_r(code, opts->gen.scratch1); }*/ if (size == SZ_B) { - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); } else { - call(code, (uint8_t *)z80_read_word); + call(code, opts->read_16); } if (modify) { //pop_r(code, opts->gen.scratch2); @@ -207,9 +191,9 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, offsetof(z80_context, scratch1), SZ_W); } if (size == SZ_B) { - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); } else { - call(code, (uint8_t *)z80_read_word); + call(code, opts->read_16); } if (modify) { //pop_r(code, opts->gen.scratch2); @@ -248,7 +232,7 @@ void z80_save_ea(code_info *code, z80inst * inst, z80_options * opts) } } -void z80_save_result(code_info *code, z80inst * inst) +void z80_save_result(z80_options *opts, z80inst * inst) { switch(inst->addr_mode & 0x1f) { @@ -257,9 +241,9 @@ void z80_save_result(code_info *code, z80inst * inst) case Z80_IX_DISPLACE: case Z80_IY_DISPLACE: if (z80_size(inst) == SZ_B) { - call(code, (uint8_t *)z80_write_byte); + call(&opts->gen.code, opts->write_8); } else { - call(code, (uint8_t *)z80_write_word_lowfirst); + call(&opts->gen.code, opts->write_16_lowfirst); } } } @@ -369,7 +353,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) z80_save_reg(inst, opts); z80_save_ea(code, inst, opts); if (inst->addr_mode & Z80_DIR) { - z80_save_result(code, inst); + z80_save_result(opts, inst); } break; case Z80_PUSH: @@ -394,14 +378,14 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) mov_rr(code, src_op.base, opts->gen.scratch1, SZ_W); } mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_word_highfirst); + call(code, opts->write_16_highfirst); //no call to save_z80_reg needed since there's no chance we'll use the only //the upper half of a register pair break; case Z80_POP: cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 8 : 4); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_word); + call(code, opts->read_16); add_ir(code, 2, opts->regs[Z80_SP], SZ_W); if (inst->reg == Z80_AF) { @@ -452,10 +436,10 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) } } else { mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_byte); + call(code, opts->write_8); cycles(&opts->gen, 1); uint8_t high_reg = z80_high_reg(inst->reg); uint8_t use_reg; @@ -466,11 +450,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) ror_ir(code, 8, use_reg, SZ_W); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); add_ir(code, 1, opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); add_ir(code, 1, opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_byte); + call(code, opts->write_8); //restore reg to normal rotation ror_ir(code, 8, use_reg, SZ_W); cycles(&opts->gen, 2); @@ -491,9 +475,9 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) case Z80_LDI: { cycles(&opts->gen, 8); mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_byte); + call(code, opts->write_8); cycles(&opts->gen, 2); add_ir(code, 1, opts->regs[Z80_DE], SZ_W); add_ir(code, 1, opts->regs[Z80_HL], SZ_W); @@ -506,9 +490,9 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) case Z80_LDIR: { cycles(&opts->gen, 8); mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_byte); + call(code, opts->write_8); add_ir(code, 1, opts->regs[Z80_DE], SZ_W); add_ir(code, 1, opts->regs[Z80_HL], SZ_W); @@ -529,9 +513,9 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) case Z80_LDD: { cycles(&opts->gen, 8); mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_byte); + call(code, opts->write_8); cycles(&opts->gen, 2); sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); @@ -544,9 +528,9 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) case Z80_LDDR: { cycles(&opts->gen, 8); mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_byte); + call(code, opts->write_8); sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); @@ -810,7 +794,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) } z80_save_reg(inst, opts); z80_save_ea(code, inst, opts); - z80_save_result(code, inst); + z80_save_result(opts, inst); break; case Z80_DEC: num_cycles = 4; @@ -836,7 +820,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) } z80_save_reg(inst, opts); z80_save_ea(code, inst, opts); - z80_save_result(code, inst); + z80_save_result(opts, inst); break; //case Z80_DAA: case Z80_CPL: @@ -869,20 +853,30 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) break; case Z80_NOP: if (inst->immed == 42) { - call(code, (uint8_t *)z80_save_context); + call(code, opts->gen.save_context); mov_rr(code, opts->gen.context_reg, RDI, SZ_Q); jmp(code, (uint8_t *)z80_print_regs_exit); } else { cycles(&opts->gen, 4 * inst->immed); } break; - case Z80_HALT: + case Z80_HALT: { cycles(&opts->gen, 4); mov_ir(code, address, opts->gen.scratch1, SZ_W); uint8_t * call_inst = code->cur; - call(code, (uint8_t *)z80_halt); + mov_rr(code, opts->gen.limit, opts->gen.scratch2, SZ_D); + sub_rr(code, opts->gen.cycles, opts->gen.scratch2, SZ_D); + and_ir(code, 0xFFFFFFFC, opts->gen.scratch2, SZ_D); + add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D); + cmp_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D); + code_ptr skip_last = code->cur+1; + jcc(code, CC_B, opts->gen.handle_cycle_limit_int); + cycles(&opts->gen, 4); + *skip_last = code->cur - (skip_last+1); + call(code, opts->gen.handle_cycle_limit_int); jmp(code, call_inst); break; + } case Z80_DI: cycles(&opts->gen, 4); mov_irdisp(code, 0, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); @@ -897,7 +891,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles add_irdisp(code, 4, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); - call(code, (uint8_t *)z80_do_sync); + call(code, opts->do_sync); break; case Z80_IM: cycles(&opts->gen, 4); @@ -926,7 +920,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - z80_save_result(code, inst); + z80_save_result(opts, inst); if (src_op.mode != MODE_UNUSED) { z80_save_reg(inst, opts); } @@ -958,7 +952,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - z80_save_result(code, inst); + z80_save_result(opts, inst); if (src_op.mode != MODE_UNUSED) { z80_save_reg(inst, opts); } @@ -989,7 +983,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - z80_save_result(code, inst); + z80_save_result(opts, inst); if (src_op.mode != MODE_UNUSED) { z80_save_reg(inst, opts); } @@ -1021,7 +1015,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - z80_save_result(code, inst); + z80_save_result(opts, inst); if (src_op.mode != MODE_UNUSED) { z80_save_reg(inst, opts); } @@ -1056,7 +1050,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - z80_save_result(code, inst); + z80_save_result(opts, inst); if (src_op.mode != MODE_UNUSED) { z80_save_reg(inst, opts); } @@ -1087,7 +1081,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - z80_save_result(code, inst); + z80_save_result(opts, inst); if (src_op.mode != MODE_UNUSED) { z80_save_reg(inst, opts); } @@ -1118,7 +1112,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); if (inst->addr_mode != Z80_UNUSED) { - z80_save_result(code, inst); + z80_save_result(opts, inst); if (src_op.mode != MODE_UNUSED) { z80_save_reg(inst, opts); } @@ -1129,7 +1123,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) case Z80_RLD: cycles(&opts->gen, 8); mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); //Before: (HL) = 0x12, A = 0x34 //After: (HL) = 0x24, A = 0x31 mov_rr(code, opts->regs[Z80_A], opts->gen.scratch2, SZ_B); @@ -1151,12 +1145,12 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); ror_ir(code, 8, opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_write_byte); + call(code, opts->write_8); break; case Z80_RRD: cycles(&opts->gen, 8); mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_byte); + call(code, opts->read_8); //Before: (HL) = 0x12, A = 0x34 //After: (HL) = 0x41, A = 0x32 movzx_rr(code, opts->regs[Z80_A], opts->gen.scratch2, SZ_B, SZ_W); @@ -1181,7 +1175,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); ror_ir(code, 8, opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_write_byte); + call(code, opts->write_8); break; case Z80_BIT: { num_cycles = (inst->addr_mode == Z80_IX_DISPLACE || inst->addr_mode == Z80_IY_DISPLACE) ? 8 : 16; @@ -1247,7 +1241,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) } } if ((inst->addr_mode & 0x1F) != Z80_REG) { - z80_save_result(code, inst); + z80_save_result(opts, inst); if (inst->reg != Z80_USE_IMMED) { z80_save_reg(inst, opts); } @@ -1289,7 +1283,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) } } if (inst->addr_mode != Z80_REG) { - z80_save_result(code, inst); + z80_save_result(opts, inst); if (inst->reg != Z80_USE_IMMED) { z80_save_reg(inst, opts); } @@ -1318,7 +1312,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) } else { mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); } - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); } break; @@ -1363,7 +1357,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) jmp(code, call_dst); } else { mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); } *no_jump_off = code->cur - (no_jump_off+1); @@ -1382,7 +1376,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) jmp(code, call_dst); } else { mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); } break; @@ -1417,7 +1411,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) jmp(code, call_dst); } else { mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); } *no_jump_off = code->cur - (no_jump_off+1); @@ -1440,7 +1434,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) jmp(code, call_dst); } else { mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); } *no_jump_off = code->cur - (no_jump_off+1); @@ -1450,7 +1444,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); mov_ir(code, address + 3, opts->gen.scratch1, SZ_W); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 + call(code, opts->write_16_highfirst);//T States: 3, 3 if (inst->immed < 0x4000) { code_ptr call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { @@ -1461,7 +1455,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) jmp(code, call_dst); } else { mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); } break; @@ -1498,7 +1492,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); mov_ir(code, address + 3, opts->gen.scratch1, SZ_W); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 + call(code, opts->write_16_highfirst);//T States: 3, 3 if (inst->immed < 0x4000) { code_ptr call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { @@ -1509,7 +1503,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) jmp(code, call_dst); } else { mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); } *no_call_off = code->cur - (no_call_off+1); @@ -1517,9 +1511,9 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) case Z80_RET: cycles(&opts->gen, 4);//T States: 4 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_word);//T STates: 3, 3 + call(code, opts->read_16);//T STates: 3, 3 add_ir(code, 2, opts->regs[Z80_SP], SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); break; case Z80_RETCC: { @@ -1551,9 +1545,9 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) uint8_t *no_call_off = code->cur+1; jcc(code, cond, code->cur+2); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_word);//T STates: 3, 3 + call(code, opts->read_16);//T STates: 3, 3 add_ir(code, 2, opts->regs[Z80_SP], SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); *no_call_off = code->cur - (no_call_off+1); break; @@ -1562,9 +1556,9 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) //For some systems, this may need a callback for signalling interrupt routine completion cycles(&opts->gen, 8);//T States: 4, 4 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); - call(code, (uint8_t *)z80_read_word);//T STates: 3, 3 + call(code, opts->read_16);//T STates: 3, 3 add_ir(code, 2, opts->regs[Z80_SP], SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); break; case Z80_RETN: @@ -1572,9 +1566,9 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch2, SZ_B); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); - call(code, (uint8_t *)z80_read_word);//T STates: 3, 3 + call(code, opts->read_16);//T STates: 3, 3 add_ir(code, 2, opts->regs[Z80_SP], SZ_W); - call(code, (uint8_t *)z80_native_addr); + call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); break; case Z80_RST: { @@ -1583,7 +1577,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); mov_ir(code, address + 1, opts->gen.scratch1, SZ_W); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); - call(code, (uint8_t *)z80_write_word_highfirst);//T States: 3, 3 + call(code, opts->write_16_highfirst);//T States: 3, 3 code_ptr call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); @@ -1600,7 +1594,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) } else { mov_rr(code, opts->regs[Z80_C], opts->gen.scratch1, SZ_B); } - call(code, (uint8_t *)z80_io_read); + call(code, opts->read_io); translate_z80_reg(inst, &dst_op, opts); mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B); z80_save_reg(inst, opts); @@ -1618,7 +1612,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) } translate_z80_reg(inst, &src_op, opts); mov_rr(code, dst_op.base, opts->gen.scratch1, SZ_B); - call(code, (uint8_t *)z80_io_write); + call(code, opts->write_io); z80_save_reg(inst, opts); break; /*case Z80_OUTI: @@ -1737,7 +1731,7 @@ z80_context * z80_handle_code_write(uint32_t address, z80_context * context) z80_options * opts = context->options; dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code, inst_start, address); mov_ir(&code, inst_start, opts->gen.scratch1, SZ_D); - call(&code, (uint8_t *)z80_retrans_stub); + call(&code, opts->retrans_stub); } return context; } @@ -1959,8 +1953,9 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 int reg; uint8_t size; if (i < Z80_I) { - int reg = i /2 + Z80_BC; + reg = i /2 + Z80_BC; size = SZ_W; + i++; } else { reg = i; @@ -1977,6 +1972,7 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D); mov_rrdisp(code, options->bank_reg, options->gen.context_reg, offsetof(z80_context, bank_reg), SZ_W); mov_rrdisp(code, options->bank_pointer, options->gen.context_reg, offsetof(z80_context, mem_pointers) + sizeof(uint8_t *) * 1, SZ_PTR); + retn(code); options->load_context_scratch = code->cur; mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch1), options->gen.scratch1, SZ_W); @@ -2005,6 +2001,18 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D); mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, bank_reg), options->bank_reg, SZ_W); mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, mem_pointers) + sizeof(uint8_t *) * 1, options->bank_pointer, SZ_PTR); + retn(code); + + options->native_addr = code->cur; + call(code, options->gen.save_context); + push_r(code, options->gen.context_reg); + mov_rr(code, options->gen.context_reg, RDI, SZ_PTR); + movzx_rr(code, options->gen.scratch1, RSI, SZ_W, SZ_D); + call(code, (code_ptr)z80_get_native_address_trans); + mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); + pop_r(code, options->gen.context_reg); + call(code, options->gen.load_context); + retn(code); options->gen.handle_cycle_limit = code->cur; cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); @@ -2065,8 +2073,70 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 pop_r(code, options->gen.scratch2); //TODO: Support interrupt mode 0 and 2 mov_ir(code, 0x38, options->gen.scratch1, SZ_W); - call(code, (code_ptr)z80_native_addr); + call(code, options->native_addr); + jmp_r(code, options->gen.scratch1); + *skip_int = code->cur - (skip_int+1); + cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); + code_ptr skip_sync = code->cur + 1; + jcc(code, CC_B, skip_sync); + options->do_sync = code->cur; + call(code, options->gen.save_context); + pop_rind(code, options->gen.context_reg); + //restore callee saved registers + pop_r(code, R15); + pop_r(code, R14); + pop_r(code, R13); + pop_r(code, R12); + pop_r(code, RBP); + pop_r(code, RBX); + //return to caller of z80_run + *skip_sync = code->cur - (skip_sync+1); + retn(code); + + options->read_io = code->cur; + check_cycles(&options->gen); + cycles(&options->gen, 4); + //Genesis has no IO hardware and always returns FF + //eventually this should use a second memory map array + mov_ir(code, 0xFF, options->gen.scratch1, SZ_B); + retn(code); + + options->write_io = code->cur; + check_cycles(&options->gen); + cycles(&options->gen, 4); + retn(code); + + options->retrans_stub = code->cur; + //pop return address + pop_r(code, options->gen.scratch2); + call(code, options->gen.save_context); + //adjust pointer before move and call instructions that got us here + sub_ir(code, 11, options->gen.scratch2, SZ_PTR); + mov_rr(code, options->gen.scratch1, RDI, SZ_D); + mov_rr(code, options->gen.scratch2, RDX, SZ_PTR); + push_r(code, options->gen.context_reg); + call(code, (code_ptr)z80_retranslate_inst); + pop_r(code, options->gen.context_reg); + mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); + call(code, options->gen.load_context); jmp_r(code, options->gen.scratch1); + + options->run = (z80_run_fun)code->cur; + //save callee save registers + push_r(code, RBX); + push_r(code, RBP); + push_r(code, R12); + push_r(code, R13); + push_r(code, R14); + push_r(code, R15); + mov_rr(code, RDI, options->gen.context_reg, SZ_PTR); + cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); + code_ptr no_extra = code->cur+1; + jcc(code, CC_Z, no_extra); + push_rdisp(code, options->gen.context_reg, offsetof(z80_context, extra_pc)); + mov_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); + *no_extra = code->cur - (no_extra + 1); + jmp_rind(code, options->gen.context_reg); } void * z80_gen_bank_write(uint32_t start_address, void * voptions) @@ -2086,6 +2156,7 @@ void init_z80_context(z80_context * context, z80_options * options) context->banked_code_map = malloc(sizeof(native_map_slot) * (1 << 9)); memset(context->banked_code_map, 0, sizeof(native_map_slot) * (1 << 9)); context->options = options; + context->run = options->run; } void z80_reset(z80_context * context) @@ -2115,14 +2186,14 @@ void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_ha code->cur = bp_stub; //Save context and call breakpoint handler - call(code, (uint8_t *)z80_save_context); + call(code, opts->gen.save_context); push_r(code, opts->gen.scratch1); mov_rr(code, opts->gen.context_reg, RDI, SZ_Q); mov_rr(code, opts->gen.scratch1, RSI, SZ_W); call(code, bp_handler); mov_rr(code, RAX, opts->gen.context_reg, SZ_Q); //Restore context - call(code, (uint8_t *)z80_load_context); + call(code, opts->gen.load_context); pop_r(code, opts->gen.scratch1); //do prologue stuff cmp_rr(code, opts->gen.cycles, opts->gen.limit, SZ_D); @@ -2131,7 +2202,7 @@ void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_ha pop_r(code, opts->gen.scratch1); add_ir(code, check_int_size - (code->cur-native), opts->gen.scratch1, SZ_Q); push_r(code, opts->gen.scratch1); - jmp(code, (uint8_t *)z80_handle_cycle_limit_int); + jmp(code, opts->gen.handle_cycle_limit_int); *jmp_off = code->cur - (jmp_off+1); //jump back to body of translated instruction pop_r(code, opts->gen.scratch1); -- cgit v1.2.3 From 55471707f6478f63fc07b2d260f6755ca259b6a2 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Fri, 26 Dec 2014 12:34:41 -0800 Subject: Add in missing generated Z80 helper functions. Fix a small bug in Z80_HALT. Fix generation of save and load context for Z80 --- z80_to_x86.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 7 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index b556b57..c402ad5 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -870,7 +870,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D); cmp_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D); code_ptr skip_last = code->cur+1; - jcc(code, CC_B, opts->gen.handle_cycle_limit_int); + jcc(code, CC_NB, code->cur+2); cycles(&opts->gen, 4); *skip_last = code->cur - (skip_last+1); call(code, opts->gen.handle_cycle_limit_int); @@ -1953,10 +1953,8 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 int reg; uint8_t size; if (i < Z80_I) { - reg = i /2 + Z80_BC; + reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0); size = SZ_W; - i++; - } else { reg = i; size = SZ_B; @@ -1964,6 +1962,9 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 if (options->regs[reg] >= 0) { mov_rrdisp(code, options->regs[reg], options->gen.context_reg, offsetof(z80_context, regs) + i, size); } + if (size == SZ_W) { + i++; + } } if (options->regs[Z80_SP] >= 0) { mov_rrdisp(code, options->regs[Z80_SP], options->gen.context_reg, offsetof(z80_context, sp), SZ_W); @@ -1983,9 +1984,8 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 int reg; uint8_t size; if (i < Z80_I) { - int reg = i /2 + Z80_BC; + reg = i /2 + Z80_BC + (i > Z80_H ? 2 : 0); size = SZ_W; - } else { reg = i; size = SZ_B; @@ -1993,6 +1993,9 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 if (options->regs[reg] >= 0) { mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, regs) + i, options->regs[reg], size); } + if (size == SZ_W) { + i++; + } } if (options->regs[Z80_SP] >= 0) { mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, sp), options->regs[Z80_SP], SZ_W); @@ -2035,8 +2038,10 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 *no_sync = code->cur - no_sync; //return to caller of z80_run retn(code); + + options->gen.handle_code_write = (code_ptr)z80_handle_code_write; - options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, NULL); + options->read_8 = gen_mem_fun(&options->gen, chunks, num_chunks, READ_8, &options->read_8_noinc); options->write_8 = gen_mem_fun(&options->gen, chunks, num_chunks, WRITE_8, &options->write_8_noinc); options->gen.handle_cycle_limit_int = code->cur; @@ -2105,6 +2110,55 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 check_cycles(&options->gen); cycles(&options->gen, 4); retn(code); + + options->read_16 = code->cur; + cycles(&options->gen, 3); + check_cycles(&options->gen); + //TODO: figure out how to handle the extra wait state for word reads to bank area + //may also need special handling to avoid too much stack depth when acces is blocked + push_r(code, options->gen.scratch1); + call(code, options->read_8_noinc); + mov_rr(code, options->gen.scratch1, options->gen.scratch2, SZ_B); + pop_r(code, options->gen.scratch1); + add_ir(code, 1, options->gen.scratch1, SZ_W); + cycles(&options->gen, 3); + check_cycles(&options->gen); + call(code, options->read_8_noinc); + shl_ir(code, 8, options->gen.scratch1, SZ_W); + mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B); + retn(code); + + options->write_16_highfirst = code->cur; + cycles(&options->gen, 3); + check_cycles(&options->gen); + push_r(code, options->gen.scratch2); + push_r(code, options->gen.scratch1); + add_ir(code, 1, options->gen.scratch2, SZ_W); + shr_ir(code, 8, options->gen.scratch1, SZ_W); + call(code, options->write_8_noinc); + pop_r(code, options->gen.scratch1); + pop_r(code, options->gen.scratch2); + cycles(&options->gen, 3); + check_cycles(&options->gen); + //TODO: Check if we can get away with TCO here + call(code, options->write_8_noinc); + retn(code); + + options->write_16_lowfirst = code->cur; + cycles(&options->gen, 3); + check_cycles(&options->gen); + push_r(code, options->gen.scratch2); + push_r(code, options->gen.scratch1); + call(code, options->write_8_noinc); + pop_r(code, options->gen.scratch1); + pop_r(code, options->gen.scratch2); + add_ir(code, 1, options->gen.scratch2, SZ_W); + shr_ir(code, 8, options->gen.scratch1, SZ_W); + cycles(&options->gen, 3); + check_cycles(&options->gen); + //TODO: Check if we can get away with TCO here + call(code, options->write_8_noinc); + retn(code); options->retrans_stub = code->cur; //pop return address @@ -2130,6 +2184,7 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 push_r(code, R14); push_r(code, R15); mov_rr(code, RDI, options->gen.context_reg, SZ_PTR); + call(code, options->load_context_scratch); cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); code_ptr no_extra = code->cur+1; jcc(code, CC_Z, no_extra); -- cgit v1.2.3 From 669d5ebf9474cb7efcd6aee6a84c28b3910b9348 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Fri, 26 Dec 2014 12:52:13 -0800 Subject: Update code->cur before calling z80_get_address_trans in z80_retranslate_inst to avoid any newly translated instructions from being placed in the "buffer zone". Save the current value of the code_info struct for placing the final jmp instruction in the correct place --- z80_to_x86.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index c402ad5..fd30d56 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1779,6 +1779,7 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o } #endif if (orig_size != ZMAX_NATIVE_SIZE) { + check_alloc_code(code, ZMAX_NATIVE_SIZE); code_ptr start = code->cur; deferred_addr * orig_deferred = opts->gen.deferred; translate_z80inst(&instbuf, context, address); @@ -1802,10 +1803,11 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o z80_map_native_address(context, address, start, after-inst, ZMAX_NATIVE_SIZE); code_info tmp_code = {orig_start, orig_start + 16}; jmp(&tmp_code, start); + tmp_code = *code; + code->cur = start + ZMAX_NATIVE_SIZE; if (!z80_is_terminal(&instbuf)) { - jmp(code, z80_get_native_address_trans(context, address + after-inst)); + jmp(&tmp_code, z80_get_native_address_trans(context, address + after-inst)); } - code->cur = start + ZMAX_NATIVE_SIZE; z80_handle_deferred(context); return start; } else { -- cgit v1.2.3 From 48d4b66ac0a36c584d03e59062f58a9c4de9d33b Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Fri, 26 Dec 2014 12:56:53 -0800 Subject: Fix an off-by-one error in a branch destination in the generation of handle_cycle_limit for the Z80 --- z80_to_x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index fd30d56..1fcd24f 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -2037,7 +2037,7 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 pop_r(code, R12); pop_r(code, RBP); pop_r(code, RBX); - *no_sync = code->cur - no_sync; + *no_sync = code->cur - (no_sync + 1); //return to caller of z80_run retn(code); -- cgit v1.2.3 From e8a9c14ec2e0b75eb1aef1c2a77936c6f7fd8326 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Fri, 26 Dec 2014 15:45:31 -0800 Subject: Fix a few bugs introduced in the Z80 core from the adjustments to fit with the code gen refactor --- z80_to_x86.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 1fcd24f..c785e8a 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1303,7 +1303,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); //fake address to force large displacement - call_dst + 256; + call_dst = code->cur + 256; } jmp(code, call_dst); } else { @@ -1352,7 +1352,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); //fake address to force large displacement - call_dst + 256; + call_dst = code->cur + 256; } jmp(code, call_dst); } else { @@ -1371,7 +1371,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); //fake address to force large displacement - call_dst + 256; + call_dst = code->cur + 256; } jmp(code, call_dst); } else { @@ -1406,7 +1406,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); //fake address to force large displacement - call_dst + 256; + call_dst = code->cur + 256; } jmp(code, call_dst); } else { @@ -1429,7 +1429,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); //fake address to force large displacement - call_dst + 256; + call_dst = code->cur + 256; } jmp(code, call_dst); } else { @@ -1450,7 +1450,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); //fake address to force large displacement - call_dst + 256; + call_dst = code->cur + 256; } jmp(code, call_dst); } else { @@ -1498,7 +1498,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); //fake address to force large displacement - call_dst + 256; + call_dst = code->cur + 256; } jmp(code, call_dst); } else { @@ -1582,7 +1582,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address) if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); //fake address to force large displacement - call_dst + 256; + call_dst = code->cur + 256; } jmp(code, call_dst); break; @@ -1815,10 +1815,12 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o code->cur = orig_start; code->last = orig_start + ZMAX_NATIVE_SIZE; translate_z80inst(&instbuf, context, address); + code_info tmp2 = *code; + *code = tmp_code; if (!z80_is_terminal(&instbuf)) { - jmp(code, z80_get_native_address_trans(context, address + after-inst)); + + jmp(&tmp2, z80_get_native_address_trans(context, address + after-inst)); } - *code = tmp_code; z80_handle_deferred(context); return orig_start; } @@ -1855,6 +1857,8 @@ void translate_z80_stream(z80_context * context, uint32_t address) jmp(&opts->gen.code, existing); break; } + //make sure prologue is in a contiguous chunk of code + check_code_prologue(&opts->gen.code); next = z80_decode(encoded, &inst); #ifdef DO_DEBUG_PRINT z80_disasm(&inst, disbuf, address); @@ -2228,12 +2232,12 @@ void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_ha { static uint8_t * bp_stub = NULL; z80_options * opts = context->options; - uint8_t * native = z80_get_native_address_trans(context, address); + code_ptr native = z80_get_native_address_trans(context, address); code_info tmp_code = {native, native+16}; mov_ir(&tmp_code, address, opts->gen.scratch1, SZ_W); if (!bp_stub) { code_info *code = &opts->gen.code; - //TODO: do an alloc check here to make sure the prologue length calc works + check_code_prologue(code); bp_stub = code->cur; call(&tmp_code, bp_stub); @@ -2257,13 +2261,13 @@ void zinsert_breakpoint(z80_context * context, uint16_t address, uint8_t * bp_ha uint8_t * jmp_off = code->cur+1; jcc(code, CC_NC, code->cur + 7); pop_r(code, opts->gen.scratch1); - add_ir(code, check_int_size - (code->cur-native), opts->gen.scratch1, SZ_Q); + add_ir(code, check_int_size - (tmp_code.cur-native), opts->gen.scratch1, SZ_Q); push_r(code, opts->gen.scratch1); jmp(code, opts->gen.handle_cycle_limit_int); *jmp_off = code->cur - (jmp_off+1); //jump back to body of translated instruction pop_r(code, opts->gen.scratch1); - add_ir(code, check_int_size - (code->cur-native), opts->gen.scratch1, SZ_Q); + add_ir(code, check_int_size - (tmp_code.cur-native), opts->gen.scratch1, SZ_Q); jmp_r(code, opts->gen.scratch1); } else { call(&tmp_code, bp_stub); -- cgit v1.2.3 From c61ca95add7b82aadef09aea8b4c48774e079069 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Mon, 29 Dec 2014 23:08:39 -0800 Subject: Fix handling of code writes for Z80 core. This seems to get things close to being back to where they were before the big refactor that broke the Z80 core. Some problems remain. Notably the sound driver in Sonic 2 is still quite broken. --- z80_to_x86.c | 1 + 1 file changed, 1 insertion(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index c785e8a..bb701a6 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1907,6 +1907,7 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 options->gen.bus_cycles = 3; options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers); options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags); + options->gen.ram_flags_shift = 7; options->flags = 0; options->regs[Z80_B] = BH; -- cgit v1.2.3 From ec4eed4f35910aa27ca353fceea38155806ef188 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Thu, 1 Jan 2015 14:36:55 -0800 Subject: Remove some of the hard coded assumptions about the memory map from the CPU cores --- z80_to_x86.c | 157 ++++++++++++++++++++--------------------------------------- 1 file changed, 54 insertions(+), 103 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 6da2321..dc38cae 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1331,7 +1331,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, num_cycles += 4; } cycles(&opts->gen, num_cycles); - if (inst->addr_mode != Z80_REG_INDIRECT && inst->immed < 0x4000) { + if (inst->addr_mode != Z80_REG_INDIRECT) { code_ptr call_dst = z80_get_native_address(context, inst->immed); if (!call_dst) { opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); @@ -1380,38 +1380,26 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, jcc(code, cond, code->cur+2); cycles(&opts->gen, 5);//T States: 5 uint16_t dest_addr = inst->immed; - if (dest_addr < 0x4000) { - code_ptr call_dst = z80_get_native_address(context, dest_addr); - if (!call_dst) { - opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); - //fake address to force large displacement - call_dst = code->cur + 256; - } - jmp(code, call_dst); - } else { - mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); - call(code, opts->native_addr); - jmp_r(code, opts->gen.scratch1); + code_ptr call_dst = z80_get_native_address(context, dest_addr); + if (!call_dst) { + opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); + //fake address to force large displacement + call_dst = code->cur + 256; } + jmp(code, call_dst); *no_jump_off = code->cur - (no_jump_off+1); break; } case Z80_JR: { cycles(&opts->gen, 12);//T States: 4,3,5 uint16_t dest_addr = address + inst->immed + 2; - if (dest_addr < 0x4000) { - code_ptr call_dst = z80_get_native_address(context, dest_addr); - if (!call_dst) { - opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); - //fake address to force large displacement - call_dst = code->cur + 256; - } - jmp(code, call_dst); - } else { - mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); - call(code, opts->native_addr); - jmp_r(code, opts->gen.scratch1); + code_ptr call_dst = z80_get_native_address(context, dest_addr); + if (!call_dst) { + opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); + //fake address to force large displacement + call_dst = code->cur + 256; } + jmp(code, call_dst); break; } case Z80_JRCC: { @@ -1434,66 +1422,49 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, jcc(code, cond, code->cur+2); cycles(&opts->gen, 5);//T States: 5 uint16_t dest_addr = address + inst->immed + 2; - if (dest_addr < 0x4000) { - code_ptr call_dst = z80_get_native_address(context, dest_addr); - if (!call_dst) { - opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); - //fake address to force large displacement - call_dst = code->cur + 256; - } - jmp(code, call_dst); - } else { - mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); - call(code, opts->native_addr); - jmp_r(code, opts->gen.scratch1); + code_ptr call_dst = z80_get_native_address(context, dest_addr); + if (!call_dst) { + opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); + //fake address to force large displacement + call_dst = code->cur + 256; } + jmp(code, call_dst); *no_jump_off = code->cur - (no_jump_off+1); break; } - case Z80_DJNZ: + case Z80_DJNZ: { cycles(&opts->gen, 8);//T States: 5,3 sub_ir(code, 1, opts->regs[Z80_B], SZ_B); uint8_t *no_jump_off = code->cur+1; jcc(code, CC_Z, code->cur+2); cycles(&opts->gen, 5);//T States: 5 uint16_t dest_addr = address + inst->immed + 2; - if (dest_addr < 0x4000) { - code_ptr call_dst = z80_get_native_address(context, dest_addr); - if (!call_dst) { - opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); - //fake address to force large displacement - call_dst = code->cur + 256; - } - jmp(code, call_dst); - } else { - mov_ir(code, dest_addr, opts->gen.scratch1, SZ_W); - call(code, opts->native_addr); - jmp_r(code, opts->gen.scratch1); + code_ptr call_dst = z80_get_native_address(context, dest_addr); + if (!call_dst) { + opts->gen.deferred = defer_address(opts->gen.deferred, dest_addr, code->cur + 1); + //fake address to force large displacement + call_dst = code->cur + 256; } + jmp(code, call_dst); *no_jump_off = code->cur - (no_jump_off+1); break; + } case Z80_CALL: { cycles(&opts->gen, 11);//T States: 4,3,4 sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); mov_ir(code, address + 3, opts->gen.scratch1, SZ_W); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); call(code, opts->write_16_highfirst);//T States: 3, 3 - if (inst->immed < 0x4000) { - code_ptr call_dst = z80_get_native_address(context, inst->immed); - if (!call_dst) { - opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); - //fake address to force large displacement - call_dst = code->cur + 256; - } - jmp(code, call_dst); - } else { - mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); - call(code, opts->native_addr); - jmp_r(code, opts->gen.scratch1); + code_ptr call_dst = z80_get_native_address(context, inst->immed); + if (!call_dst) { + opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); + //fake address to force large displacement + call_dst = code->cur + 256; } + jmp(code, call_dst); break; } - case Z80_CALLCC: + case Z80_CALLCC: { cycles(&opts->gen, 10);//T States: 4,3,3 (false case) uint8_t cond = CC_Z; switch (inst->reg) @@ -1526,21 +1497,16 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, mov_ir(code, address + 3, opts->gen.scratch1, SZ_W); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); call(code, opts->write_16_highfirst);//T States: 3, 3 - if (inst->immed < 0x4000) { - code_ptr call_dst = z80_get_native_address(context, inst->immed); - if (!call_dst) { - opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); - //fake address to force large displacement - call_dst = code->cur + 256; - } - jmp(code, call_dst); - } else { - mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); - call(code, opts->native_addr); - jmp_r(code, opts->gen.scratch1); + code_ptr call_dst = z80_get_native_address(context, inst->immed); + if (!call_dst) { + opts->gen.deferred = defer_address(opts->gen.deferred, inst->immed, code->cur + 1); + //fake address to force large displacement + call_dst = code->cur + 256; } + jmp(code, call_dst); *no_call_off = code->cur - (no_call_off+1); break; + } case Z80_RET: cycles(&opts->gen, 4);//T States: 4 mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); @@ -1850,10 +1816,8 @@ void * z80_retranslate_inst(uint32_t address, z80_context * context, uint8_t * o char disbuf[80]; z80_options * opts = context->options; uint8_t orig_size = z80_get_native_inst_size(opts, address); - uint32_t orig = address; - address &= 0x1FFF; code_info *code = &opts->gen.code; - uint8_t *after, *inst = context->mem_pointers[0] + address; + uint8_t *after, *inst = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen); z80inst instbuf; dprintf("Retranslating code at Z80 address %X, native address %p\n", address, orig_start); after = z80_decode(inst, &instbuf); @@ -1921,26 +1885,24 @@ void translate_z80_stream(z80_context * context, uint32_t address) } z80_options * opts = context->options; uint32_t start_address = address; - uint8_t * encoded = NULL, *next; - if (address < 0x4000) { - encoded = context->mem_pointers[0] + (address & 0x1FFF); - } - while (encoded != NULL || address >= 0x4000) + do { z80inst inst; dprintf("translating Z80 code at address %X\n", address); do { - if (address >= 0x4000) { - code_info stub = z80_make_interp_stub(context, address); - z80_map_native_address(context, address, stub.cur, 1, stub.last - stub.cur); - break; - } uint8_t * existing = z80_get_native_address(context, address); if (existing) { jmp(&opts->gen.code, existing); break; } + uint8_t * encoded, *next; + encoded = get_native_pointer(address, (void **)context->mem_pointers, &opts->gen); + if (!encoded) { + code_info stub = z80_make_interp_stub(context, address); + z80_map_native_address(context, address, stub.cur, 1, stub.last - stub.cur); + break; + } //make sure prologue is in a contiguous chunk of code check_code_prologue(&opts->gen.code); next = z80_decode(encoded, &inst); @@ -1956,33 +1918,22 @@ void translate_z80_stream(z80_context * context, uint32_t address) translate_z80inst(&inst, context, address, 0); z80_map_native_address(context, address, start, next-encoded, opts->gen.code.cur - start); address += next-encoded; - if (address > 0xFFFF) { - address &= 0xFFFF; - - } else { - encoded = next; - } + address &= 0xFFFF; } while (!z80_is_terminal(&inst)); process_deferred(&opts->gen.deferred, context, (native_addr_func)z80_get_native_address); if (opts->gen.deferred) { address = opts->gen.deferred->address; dprintf("defferred address: %X\n", address); - if (address < 0x4000) { - encoded = context->mem_pointers[0] + (address & 0x1FFF); - } else { - encoded = NULL; - } - } else { - encoded = NULL; - address = 0; } - } + } while (opts->gen.deferred); } void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks) { memset(options, 0, sizeof(*options)); + options->gen.memmap = chunks; + options->gen.memmap_chunks = num_chunks; options->gen.address_size = SZ_W; options->gen.address_mask = 0xFFFF; options->gen.max_address = 0x10000; -- cgit v1.2.3 From 9e0779e64b71a60ee9287ed01330baf020cfd932 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Thu, 1 Jan 2015 20:07:47 -0800 Subject: Use call_args and call_args_abi in Z80 core --- z80_to_x86.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index dc38cae..1912b89 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -875,8 +875,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, case Z80_NOP: if (inst->immed == 42) { call(code, opts->gen.save_context); - mov_rr(code, opts->gen.context_reg, RDI, SZ_Q); - jmp(code, (uint8_t *)z80_print_regs_exit); + call_args(code, (code_ptr)z80_print_regs_exit, 1, opts->gen.context_reg); } else { cycles(&opts->gen, 4 * inst->immed); } @@ -1675,10 +1674,9 @@ code_info z80_make_interp_stub(z80_context * context, uint16_t address) cycles(&opts->gen, -3); check_cycles_int(&opts->gen, address); call(code, opts->gen.save_context); - mov_rr(code, opts->gen.scratch1, RDI, SZ_B); mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W); push_r(code, opts->gen.context_reg); - call(code, (code_ptr)z80_interp_handler); + call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.scratch2); mov_rr(code, RAX, opts->gen.scratch1, SZ_Q); pop_r(code, opts->gen.context_reg); call(code, opts->gen.load_context); @@ -2049,9 +2047,8 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 options->native_addr = code->cur; call(code, options->gen.save_context); push_r(code, options->gen.context_reg); - mov_rr(code, options->gen.context_reg, RDI, SZ_PTR); - movzx_rr(code, options->gen.scratch1, RSI, SZ_W, SZ_D); - call(code, (code_ptr)z80_get_native_address_trans); + movzx_rr(code, options->gen.scratch1, options->gen.scratch1, SZ_W, SZ_D); + call_args(code, (code_ptr)z80_get_native_address_trans, 2, options->gen.context_reg, options->gen.scratch1); mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); pop_r(code, options->gen.context_reg); call(code, options->gen.load_context); @@ -2155,7 +2152,7 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 cycles(&options->gen, 3); check_cycles(&options->gen); //TODO: figure out how to handle the extra wait state for word reads to bank area - //may also need special handling to avoid too much stack depth when acces is blocked + //may also need special handling to avoid too much stack depth when access is blocked push_r(code, options->gen.scratch1); call(code, options->read_8_noinc); mov_rr(code, options->gen.scratch1, options->gen.scratch2, SZ_B); @@ -2206,10 +2203,8 @@ void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint3 call(code, options->gen.save_context); //adjust pointer before move and call instructions that got us here sub_ir(code, 11, options->gen.scratch2, SZ_PTR); - mov_rr(code, options->gen.scratch1, RDI, SZ_D); - mov_rr(code, options->gen.scratch2, RDX, SZ_PTR); push_r(code, options->gen.context_reg); - call(code, (code_ptr)z80_retranslate_inst); + call_args(code, (code_ptr)z80_retranslate_inst, 3, options->gen.scratch1, options->gen.context_reg, options->gen.scratch2); pop_r(code, options->gen.context_reg); mov_rr(code, RAX, options->gen.scratch1, SZ_PTR); call(code, options->gen.load_context); @@ -2291,9 +2286,7 @@ void zcreate_stub(z80_context * context) //Save context and call breakpoint handler call(code, opts->gen.save_context); push_r(code, opts->gen.scratch1); - mov_rr(code, opts->gen.context_reg, RDI, SZ_Q); - mov_rr(code, opts->gen.scratch1, RSI, SZ_W); - call(code, context->bp_handler); + call_args_abi(code, context->bp_handler, 2, opts->gen.context_reg, opts->gen.scratch1); mov_rr(code, RAX, opts->gen.context_reg, SZ_Q); //Restore context call(code, opts->gen.load_context); -- cgit v1.2.3 From d41ae43228509a1a67446492b844013cf1e68c36 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Thu, 1 Jan 2015 20:26:22 -0800 Subject: Minor Z80 core cleanup --- z80_to_x86.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 1912b89..a30adde 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -14,12 +14,6 @@ #define MODE_UNUSED (MODE_IMMED-1) -#define ZCYCLES RBP -#define ZLIMIT RDI -#define SCRATCH1 R13 -#define SCRATCH2 R14 -#define CONTEXT RSI - //#define DO_DEBUG_PRINT #ifdef DO_DEBUG_PRINT @@ -138,7 +132,7 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t } } else { ea->mode = MODE_REG_DISPLACE8; - ea->base = CONTEXT; + ea->base = opts->gen.context_reg; ea->disp = offsetof(z80_context, regs) + inst->ea_reg; } break; @@ -368,7 +362,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B);; - mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), SCRATCH1, SZ_B); + mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, iff2), opts->gen.scratch1, SZ_B); mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zf_off(ZF_PV), SZ_B); } z80_save_reg(inst, opts); @@ -1926,7 +1920,7 @@ void translate_z80_stream(z80_context * context, uint32_t address) } while (opts->gen.deferred); } -void init_x86_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks) +void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks) { memset(options, 0, sizeof(*options)); @@ -2263,7 +2257,7 @@ void z80_reset(z80_context * context) uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst) { code_info code = {dst, dst+16}; - mov_ir(&code, address, SCRATCH1, SZ_W); + mov_ir(&code, address, context->options->gen.scratch1, SZ_W); call(&code, context->bp_stub); return code.cur-dst; } -- cgit v1.2.3 From 758586c1b7feec3c4fa3761ed17d1a6ea3cbab00 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Thu, 1 Jan 2015 22:18:32 -0800 Subject: Add the 3 cycle delay back in to Z80 bank area access --- z80_to_x86.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index a30adde..5f028b6 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -2223,13 +2223,6 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t jmp_rind(code, options->gen.context_reg); } -void * z80_gen_bank_write(uint32_t start_address, void * voptions) -{ - z80_options * options = voptions; - //TODO: Handle writes to bank register - return options; -} - void init_z80_context(z80_context * context, z80_options * options) { memset(context, 0, sizeof(*context)); -- cgit v1.2.3 From 32c7399651b886128f9cbd7d185684f84d392a1a Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Thu, 1 Jan 2015 23:37:24 -0800 Subject: Remove dedicated registers for Z80 bank reg and Z80 bank pointer as they are no longer used --- z80_to_x86.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 5f028b6..c1869b9 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1956,9 +1956,6 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t options->regs[Z80_IX] = RDX; options->regs[Z80_IY] = R8; - options->bank_reg = R15; - options->bank_pointer = R12; - options->gen.context_reg = RSI; options->gen.cycles = RBP; options->gen.limit = RDI; @@ -2003,8 +2000,6 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t } mov_rrdisp(code, options->gen.limit, options->gen.context_reg, offsetof(z80_context, target_cycle), SZ_D); mov_rrdisp(code, options->gen.cycles, options->gen.context_reg, offsetof(z80_context, current_cycle), SZ_D); - mov_rrdisp(code, options->bank_reg, options->gen.context_reg, offsetof(z80_context, bank_reg), SZ_W); - mov_rrdisp(code, options->bank_pointer, options->gen.context_reg, offsetof(z80_context, mem_pointers) + sizeof(uint8_t *) * 1, SZ_PTR); retn(code); options->load_context_scratch = code->cur; @@ -2034,8 +2029,6 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t } mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, target_cycle), options->gen.limit, SZ_D); mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, current_cycle), options->gen.cycles, SZ_D); - mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, bank_reg), options->bank_reg, SZ_W); - mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, mem_pointers) + sizeof(uint8_t *) * 1, options->bank_pointer, SZ_PTR); retn(code); options->native_addr = code->cur; -- cgit v1.2.3 From 7d7892a23668855bb93b979e34a1d89440eb6e7e Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Fri, 2 Jan 2015 00:19:10 -0800 Subject: Sync Z80 when taking an interrupt so that int_cycle gets updated --- z80_to_x86.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index c1869b9..f13ef16 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -2103,7 +2103,16 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t //TODO: Support interrupt mode 0 and 2 mov_ir(code, 0x38, options->gen.scratch1, SZ_W); call(code, options->native_addr); - jmp_r(code, options->gen.scratch1); + mov_rrind(code, options->gen.scratch1, options->gen.context_reg, SZ_PTR); + //restore callee saved registers + pop_r(code, R15); + pop_r(code, R14); + pop_r(code, R13); + pop_r(code, R12); + pop_r(code, RBP); + pop_r(code, RBX); + //return to caller of z80_run to sync + retn(code); *skip_int = code->cur - (skip_int+1); cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); code_ptr skip_sync = code->cur + 1; -- cgit v1.2.3 From f92dd42cd257aba22ce4076d1cab2876babe3471 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Fri, 2 Jan 2015 12:04:58 -0800 Subject: Use SZ_PTR instead of SZ_Q in Z80 core for 32-bit compat --- z80_to_x86.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index f13ef16..83237ca 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1671,7 +1671,7 @@ code_info z80_make_interp_stub(z80_context * context, uint16_t address) mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W); push_r(code, opts->gen.context_reg); call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.scratch2); - mov_rr(code, RAX, opts->gen.scratch1, SZ_Q); + mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); pop_r(code, opts->gen.context_reg); call(code, opts->gen.load_context); jmp_r(code, opts->gen.scratch1); @@ -2276,7 +2276,7 @@ void zcreate_stub(z80_context * context) call(code, opts->gen.save_context); push_r(code, opts->gen.scratch1); call_args_abi(code, context->bp_handler, 2, opts->gen.context_reg, opts->gen.scratch1); - mov_rr(code, RAX, opts->gen.context_reg, SZ_Q); + mov_rr(code, RAX, opts->gen.context_reg, SZ_PTR); //Restore context call(code, opts->gen.load_context); pop_r(code, opts->gen.scratch1); @@ -2285,13 +2285,13 @@ void zcreate_stub(z80_context * context) uint8_t * jmp_off = code->cur+1; jcc(code, CC_NC, code->cur + 7); pop_r(code, opts->gen.scratch1); - add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_Q); + add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR); push_r(code, opts->gen.scratch1); jmp(code, opts->gen.handle_cycle_limit_int); *jmp_off = code->cur - (jmp_off+1); //jump back to body of translated instruction pop_r(code, opts->gen.scratch1); - add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_Q); + add_ir(code, check_int_size - patch_size, opts->gen.scratch1, SZ_PTR); jmp_r(code, opts->gen.scratch1); } -- cgit v1.2.3 From af8bf7f7f18861ef1235e85a72ca100e755d9859 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Fri, 2 Jan 2015 13:14:09 -0800 Subject: Added functions to gen_x86 for saving and restoring callee save registers to better abstract over ABI differences between x86 and x86-64 --- z80_to_x86.c | 31 ++++--------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 83237ca..aac1ceb 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -2052,13 +2052,7 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t sub_ir(code, 5, RAX, SZ_PTR); //adjust return address to point to the call that got us here mov_rrdisp(code, RBX, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); mov_rrind(code, RAX, options->gen.context_reg, SZ_PTR); - //restore callee saved registers - pop_r(code, R15); - pop_r(code, R14); - pop_r(code, R13); - pop_r(code, R12); - pop_r(code, RBP); - pop_r(code, RBX); + restore_callee_save_regs(code); *no_sync = code->cur - (no_sync + 1); //return to caller of z80_run retn(code); @@ -2104,13 +2098,7 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t mov_ir(code, 0x38, options->gen.scratch1, SZ_W); call(code, options->native_addr); mov_rrind(code, options->gen.scratch1, options->gen.context_reg, SZ_PTR); - //restore callee saved registers - pop_r(code, R15); - pop_r(code, R14); - pop_r(code, R13); - pop_r(code, R12); - pop_r(code, RBP); - pop_r(code, RBX); + restore_callee_save_regs(code); //return to caller of z80_run to sync retn(code); *skip_int = code->cur - (skip_int+1); @@ -2121,12 +2109,7 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t call(code, options->gen.save_context); pop_rind(code, options->gen.context_reg); //restore callee saved registers - pop_r(code, R15); - pop_r(code, R14); - pop_r(code, R13); - pop_r(code, R12); - pop_r(code, RBP); - pop_r(code, RBX); + restore_callee_save_regs(code); //return to caller of z80_run *skip_sync = code->cur - (skip_sync+1); retn(code); @@ -2207,13 +2190,7 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t jmp_r(code, options->gen.scratch1); options->run = (z80_run_fun)code->cur; - //save callee save registers - push_r(code, RBX); - push_r(code, RBP); - push_r(code, R12); - push_r(code, R13); - push_r(code, R14); - push_r(code, R15); + save_callee_save_regs(code); mov_rr(code, RDI, options->gen.context_reg, SZ_PTR); call(code, options->load_context_scratch); cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); -- cgit v1.2.3 From 172a8961d9ebb577668dcb150c56b3f2c6da0419 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Fri, 2 Jan 2015 13:47:34 -0800 Subject: In theory, the Z80 core should work on 32-bit builds now; however, I suspect there is some code that cannot deal with most of the Z80 registers not having a native register so more work will be needed --- z80_to_x86.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index aac1ceb..15e42b5 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -124,11 +124,13 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t ea->base = opts->regs[inst->ea_reg]; if (ea->base >= AH && ea->base <= BH && inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED) { uint8_t other_reg = opts->regs[inst->reg]; +#ifdef X86_64 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { //we can't mix an *H reg with a register that requires the REX prefix ea->base = opts->regs[z80_low_reg(inst->ea_reg)]; ror_ir(code, 8, ea->base, SZ_W); } +#endif } } else { ea->mode = MODE_REG_DISPLACE8; @@ -224,10 +226,12 @@ void z80_save_ea(code_info *code, z80inst * inst, z80_options * opts) } } else if (inst->reg != Z80_UNUSED && inst->reg != Z80_USE_IMMED && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { uint8_t other_reg = opts->regs[inst->reg]; +#ifdef X86_64 if (other_reg >= R8 || (other_reg >= RSP && other_reg <= RDI)) { //we can't mix an *H reg with a register that requires the REX prefix ror_ir(code, 8, opts->regs[z80_low_reg(inst->ea_reg)], SZ_W); } +#endif } } } @@ -1255,13 +1259,17 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, bts_ir(code, bit, src_op.base, size); if (inst->reg != Z80_USE_IMMED) { if (size == SZ_W) { +#ifdef X86_64 if (dst_op.base >= R8) { ror_ir(code, 8, src_op.base, SZ_W); mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); ror_ir(code, 8, src_op.base, SZ_W); } else { +#endif mov_rr(code, opts->regs[inst->ea_reg], dst_op.base, SZ_B); +#ifdef X86_64 } +#endif } else { mov_rr(code, src_op.base, dst_op.base, SZ_B); } @@ -1297,13 +1305,17 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, btr_ir(code, bit, src_op.base, size); if (inst->reg != Z80_USE_IMMED) { if (size == SZ_W) { +#ifdef X86_64 if (dst_op.base >= R8) { ror_ir(code, 8, src_op.base, SZ_W); mov_rr(code, opts->regs[z80_low_reg(inst->ea_reg)], dst_op.base, SZ_B); ror_ir(code, 8, src_op.base, SZ_W); } else { +#endif mov_rr(code, opts->regs[inst->ea_reg], dst_op.base, SZ_B); +#ifdef X86_64 } +#endif } else { mov_rr(code, src_op.base, dst_op.base, SZ_B); } @@ -1935,6 +1947,7 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t options->gen.ram_flags_shift = 7; options->flags = 0; +#ifdef X86_64 options->regs[Z80_B] = BH; options->regs[Z80_C] = RBX; options->regs[Z80_D] = CH; @@ -1955,12 +1968,21 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t options->regs[Z80_AF] = -1; options->regs[Z80_IX] = RDX; options->regs[Z80_IY] = R8; - + + options->gen.scratch1 = R13; + options->gen.scratch2 = R14; +#else + memset(options->regs, -1, sizeof(options->regs)); + options->regs[Z80_A] = RAX; + options->regx[Z80_SP] = RBX; + + options->gen.scratch1 = RCX; + options->gen.scratch2 = RDX; +#endif + options->gen.context_reg = RSI; options->gen.cycles = RBP; options->gen.limit = RDI; - options->gen.scratch1 = R13; - options->gen.scratch2 = R14; options->gen.native_code_map = malloc(sizeof(native_map_slot)); memset(options->gen.native_code_map, 0, sizeof(native_map_slot)); -- cgit v1.2.3 From 8ac1e753e1af481b2090a4c1b7395853f30b5e8f Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sat, 3 Jan 2015 16:08:23 -0800 Subject: All cycle counters are now based off the master clock. This seems to have messed up Z80 interrupt timing (music in Sonic 2 is too slow for instance), but things are generally working --- z80_to_x86.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 15e42b5..13f952a 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -879,20 +879,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, } break; case Z80_HALT: { + code_ptr loop_top = code->cur; + //this isn't terribly efficient, but it's good enough for now cycles(&opts->gen, 4); - mov_ir(code, address, opts->gen.scratch1, SZ_W); - uint8_t * call_inst = code->cur; - mov_rr(code, opts->gen.limit, opts->gen.scratch2, SZ_D); - sub_rr(code, opts->gen.cycles, opts->gen.scratch2, SZ_D); - and_ir(code, 0xFFFFFFFC, opts->gen.scratch2, SZ_D); - add_rr(code, opts->gen.scratch2, opts->gen.cycles, SZ_D); - cmp_rr(code, opts->gen.limit, opts->gen.cycles, SZ_D); - code_ptr skip_last = code->cur+1; - jcc(code, CC_NB, code->cur+2); - cycles(&opts->gen, 4); - *skip_last = code->cur - (skip_last+1); - call(code, opts->gen.handle_cycle_limit_int); - jmp(code, call_inst); + check_cycles_int(&opts->gen, address); + jmp(code, loop_top); break; } case Z80_DI: @@ -908,7 +899,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff1), SZ_B); mov_irdisp(code, 1, opts->gen.context_reg, offsetof(z80_context, iff2), SZ_B); //interrupt enable has a one-instruction latency, minimum instruction duration is 4 cycles - add_irdisp(code, 4, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); + add_irdisp(code, 4*opts->gen.clock_divider, opts->gen.context_reg, offsetof(z80_context, int_enable_cycle), SZ_D); call(code, opts->do_sync); break; case Z80_IM: @@ -1932,7 +1923,7 @@ void translate_z80_stream(z80_context * context, uint32_t address) } while (opts->gen.deferred); } -void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks) +void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t num_chunks, uint32_t clock_divider) { memset(options, 0, sizeof(*options)); @@ -1942,6 +1933,7 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t options->gen.address_mask = 0xFFFF; options->gen.max_address = 0x10000; options->gen.bus_cycles = 3; + options->gen.clock_divider = clock_divider; options->gen.mem_ptr_off = offsetof(z80_context, mem_pointers); options->gen.ram_flags_off = offsetof(z80_context, ram_code_flags); options->gen.ram_flags_shift = 7; @@ -1968,18 +1960,18 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t options->regs[Z80_AF] = -1; options->regs[Z80_IX] = RDX; options->regs[Z80_IY] = R8; - + options->gen.scratch1 = R13; options->gen.scratch2 = R14; #else memset(options->regs, -1, sizeof(options->regs)); options->regs[Z80_A] = RAX; options->regx[Z80_SP] = RBX; - + options->gen.scratch1 = RCX; options->gen.scratch2 = RDX; #endif - + options->gen.context_reg = RSI; options->gen.cycles = RBP; options->gen.limit = RDI; -- cgit v1.2.3 From e6216d5d0a9f8cdb700124558d38cd47e63fbdda Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sat, 3 Jan 2015 18:23:04 -0800 Subject: Made the Z80 core more contained by refactoring some code in blastem.c into z80_to_x86.c --- z80_to_x86.c | 114 +++++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 104 insertions(+), 10 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 13f952a..9ae1a7c 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1777,7 +1777,7 @@ z80_context * z80_handle_code_write(uint32_t address, z80_context * context) code_ptr dst = z80_get_native_address(context, inst_start); code_info code = {dst, dst+16}; z80_options * opts = context->options; - dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code, inst_start, address); + dprintf("patching code at %p for Z80 instruction at %X due to write to %X\n", code.cur, inst_start, address); mov_ir(&code, inst_start, opts->gen.scratch1, SZ_D); call(&code, opts->retrans_stub); } @@ -2226,18 +2226,112 @@ void init_z80_context(z80_context * context, z80_options * options) context->banked_code_map = malloc(sizeof(native_map_slot)); memset(context->banked_code_map, 0, sizeof(native_map_slot)); context->options = options; - context->int_cycle = 0xFFFFFFFF; - context->int_pulse_start = 0xFFFFFFFF; - context->int_pulse_end = 0xFFFFFFFF; - context->run = options->run; + context->int_cycle = CYCLE_NEVER; + context->int_pulse_start = CYCLE_NEVER; + context->int_pulse_end = CYCLE_NEVER; } -void z80_reset(z80_context * context) +void z80_run(z80_context * context, uint32_t target_cycle) { - context->im = 0; - context->iff1 = context->iff2 = 0; - context->native_pc = z80_get_native_address_trans(context, 0); - context->extra_pc = NULL; + if (context->reset || context->busack) { + context->current_cycle = target_cycle; + } else { + if (context->current_cycle < target_cycle) { + //busreq is sampled at the end of an m-cycle + //we can approximate that by running for a single m-cycle after a bus request + context->sync_cycle = context->busreq ? context->current_cycle + 3*context->options->gen.clock_divider : target_cycle; + if (!context->native_pc) { + context->native_pc = z80_get_native_address_trans(context, context->pc); + } + while (context->current_cycle < context->sync_cycle) + { + if (context->int_pulse_end < context->current_cycle || context->int_pulse_end == CYCLE_NEVER) { + z80_next_int_pulse(context); + } + if (context->iff1) { + context->int_cycle = context->int_pulse_start < context->int_enable_cycle ? context->int_enable_cycle : context->int_pulse_start; + } else { + context->int_cycle = CYCLE_NEVER; + } + context->target_cycle = context->sync_cycle < context->int_cycle ? context->sync_cycle : context->int_cycle; + dprintf("Running Z80 from cycle %d to cycle %d. Int cycle: %d\n", context->current_cycle, context->sync_cycle, context->int_cycle); + context->options->run(context); + dprintf("Z80 ran to cycle %d\n", context->current_cycle); + } + if (context->busreq) { + context->busack = 1; + context->current_cycle = target_cycle; + } + } + } +} + +void z80_assert_reset(z80_context * context, uint32_t cycle) +{ + z80_run(context, cycle); + context->reset = 1; +} + +void z80_clear_reset(z80_context * context, uint32_t cycle) +{ + z80_run(context, cycle); + if (context->reset) { + //TODO: Handle case where reset is not asserted long enough + context->im = 0; + context->iff1 = context->iff2 = 0; + context->native_pc = NULL; + context->extra_pc = NULL; + context->pc = 0; + context->reset = 0; + } +} + +void z80_assert_busreq(z80_context * context, uint32_t cycle) +{ + z80_run(context, cycle); + context->busreq = 1; +} + +void z80_clear_busreq(z80_context * context, uint32_t cycle) +{ + z80_run(context, cycle); + context->busreq = 0; + context->busack = 0; +} + +uint8_t z80_get_busack(z80_context * context, uint32_t cycle) +{ + z80_run(context, cycle); + return context->busack; +} + +void z80_adjust_cycles(z80_context * context, uint32_t deduction) +{ + if (context->current_cycle < deduction) { + fprintf(stderr, "WARNING: Deduction of %u cycles when Z80 cycle counter is only %u\n", deduction, context->current_cycle); + context->current_cycle = 0; + } else { + context->current_cycle -= deduction; + } + if (context->int_enable_cycle != CYCLE_NEVER) { + if (context->int_enable_cycle < deduction) { + context->int_enable_cycle = 0; + } else { + context->int_enable_cycle -= deduction; + } + } + if (context->int_pulse_start != CYCLE_NEVER) { + if (context->int_pulse_end < deduction) { + context->int_pulse_start = context->int_pulse_end = CYCLE_NEVER; + } else { + context->int_pulse_end -= deduction; + if (context->int_pulse_start < deduction) { + context->int_pulse_start = 0; + } else { + context->int_pulse_start -= deduction; + } + } + } } uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst) -- cgit v1.2.3 From d57873760390a393e784d1339b1d64564fe83234 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sat, 3 Jan 2015 18:49:07 -0800 Subject: Restore Z80 interrupt pulse duration and make a small improvement to debug print output --- z80_to_x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 9ae1a7c..68f3b4f 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -2254,7 +2254,7 @@ void z80_run(z80_context * context, uint32_t target_cycle) context->int_cycle = CYCLE_NEVER; } context->target_cycle = context->sync_cycle < context->int_cycle ? context->sync_cycle : context->int_cycle; - dprintf("Running Z80 from cycle %d to cycle %d. Int cycle: %d\n", context->current_cycle, context->sync_cycle, context->int_cycle); + dprintf("Running Z80 from cycle %d to cycle %d. Int cycle: %d (%d - %d)\n", context->current_cycle, context->sync_cycle, context->int_cycle, context->int_pulse_start, context->int_pulse_end); context->options->run(context); dprintf("Z80 ran to cycle %d\n", context->current_cycle); } -- cgit v1.2.3 From 78a4b6f68e5e53c42fdd62c0269fe5862ce73a32 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sat, 3 Jan 2015 21:35:23 -0800 Subject: When going directly from reset to busreq, do not allow the Z80 to run --- z80_to_x86.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 68f3b4f..13716ff 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -2283,6 +2283,10 @@ void z80_clear_reset(z80_context * context, uint32_t cycle) context->extra_pc = NULL; context->pc = 0; context->reset = 0; + if (context->busreq) { + //TODO: Figure out appropriate delay + context->busack = 1; + } } } -- cgit v1.2.3 From b3e40bd29176e1c4cf53f8542628e48e1eb5dcb0 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Mon, 11 May 2015 00:28:47 -0700 Subject: Sync fixes and logging to fix more sync issues --- z80_to_x86.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index fab31da..6b498f5 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -309,6 +309,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, if (context->breakpoint_flags[address / sizeof(uint8_t)] & (1 << (address % sizeof(uint8_t)))) { zbreakpoint_patch(context, address, start); } + //log_address(&opts->gen, address, "Z80: %X @ %d\n"); } switch(inst->op) { @@ -2293,7 +2294,9 @@ void z80_clear_reset(z80_context * context, uint32_t cycle) void z80_assert_busreq(z80_context * context, uint32_t cycle) { + printf("bus requested at %d\n", cycle); z80_run(context, cycle); + printf("asserted busreq at %d\n", context->current_cycle); context->busreq = 1; } -- cgit v1.2.3 From 9eadca47ed6f00e7a00ac25b6b85ba4426104950 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Mon, 11 May 2015 20:34:33 -0700 Subject: Remove/comment verbose logging added for tracking down sync bug --- z80_to_x86.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 6b498f5..17eb40a 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -2279,10 +2279,10 @@ void z80_clear_reset(z80_context * context, uint32_t cycle) z80_run(context, cycle); if (context->reset) { //TODO: Handle case where reset is not asserted long enough - context->im = 0; - context->iff1 = context->iff2 = 0; + context->im = 0; + context->iff1 = context->iff2 = 0; context->native_pc = NULL; - context->extra_pc = NULL; + context->extra_pc = NULL; context->pc = 0; context->reset = 0; if (context->busreq) { @@ -2294,11 +2294,9 @@ void z80_clear_reset(z80_context * context, uint32_t cycle) void z80_assert_busreq(z80_context * context, uint32_t cycle) { - printf("bus requested at %d\n", cycle); z80_run(context, cycle); - printf("asserted busreq at %d\n", context->current_cycle); context->busreq = 1; - } +} void z80_clear_busreq(z80_context * context, uint32_t cycle) { -- cgit v1.2.3 From 42c1cb8c7c5369c88a8e94c083731b1e93b6119a Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Tue, 12 May 2015 19:14:09 -0700 Subject: Save PC to context struct when syncing Z80 at instruction start. This fixes saving savestates and probably the Z80 debugger as well --- z80_to_x86.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 17eb40a..313a8bb 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -2121,6 +2121,8 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t cmp_rdispr(code, options->gen.context_reg, offsetof(z80_context, sync_cycle), options->gen.cycles, SZ_D); code_ptr skip_sync = code->cur + 1; jcc(code, CC_B, skip_sync); + //save PC + mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, pc), SZ_D); options->do_sync = code->cur; call(code, options->gen.save_context); pop_rind(code, options->gen.context_reg); -- cgit v1.2.3 From fabfa5cb07a4d407146f22afdb5349cdde8016b2 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sun, 17 May 2015 15:40:31 -0700 Subject: Fix crash bug in Z80 interpreter --- z80_to_x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 313a8bb..1f8d477 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1674,7 +1674,7 @@ code_info z80_make_interp_stub(z80_context * context, uint16_t address) call(code, opts->gen.save_context); mov_irdisp(code, address, opts->gen.context_reg, offsetof(z80_context, pc), SZ_W); push_r(code, opts->gen.context_reg); - call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.scratch2); + call_args(code, (code_ptr)z80_interp_handler, 2, opts->gen.scratch1, opts->gen.context_reg); mov_rr(code, RAX, opts->gen.scratch1, SZ_PTR); pop_r(code, opts->gen.context_reg); call(code, opts->gen.load_context); -- cgit v1.2.3 From e51040f08071327061219caa1c54d3dcb346b1c7 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sun, 17 May 2015 19:14:41 -0700 Subject: Call z80_handle_deferred after generating an insruction handler so that instructions like rst work correctly --- z80_to_x86.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 1f8d477..bef749f 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -23,6 +23,7 @@ #endif uint32_t zbreakpoint_patch(z80_context * context, uint16_t address, code_ptr dst); +void z80_handle_deferred(z80_context * context); uint8_t z80_size(z80inst * inst) { @@ -1653,6 +1654,7 @@ uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context) add_ir(code, after - codebuf, opts->gen.scratch1, SZ_W); call(code, opts->native_addr); jmp_r(code, opts->gen.scratch1); + z80_handle_deferred(context); } return context->interp_code[opcode]; } -- cgit v1.2.3 From 9c11d06c90e3d1f8e1c7bfbd5478b5852fcae876 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sun, 17 May 2015 20:03:27 -0700 Subject: Make sure z80_save_reg does nothing when there is no register in the reg field of the instruction. This fixes a bug that corrupted SP in the MDEM 2011 demo --- z80_to_x86.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index bef749f..6d33c14 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -81,6 +81,9 @@ void translate_z80_reg(z80inst * inst, host_ea * ea, z80_options * opts) void z80_save_reg(z80inst * inst, z80_options * opts) { code_info *code = &opts->gen.code; + if (inst->reg == Z80_USE_IMMED || inst->reg == Z80_UNUSED) { + return; + } if (inst->reg == Z80_IYH) { if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); -- cgit v1.2.3 From e010a3570b866e225a269f1e2fc2d0dbb3b26ddc Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sat, 23 May 2015 20:26:20 -0700 Subject: Fix a bunch of assumptions about which Z80 registers are stored in native registers to make the x86-32 build less broken --- z80_to_x86.c | 213 +++++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 162 insertions(+), 51 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 6d33c14..a005369 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -115,7 +115,7 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t switch(inst->addr_mode & 0x1F) { case Z80_REG: - if (inst->ea_reg == Z80_IYH) { + if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IY] >= 0) { if (inst->reg == Z80_IYL) { mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); ror_ir(code, 8, opts->gen.scratch1, SZ_W); @@ -143,7 +143,11 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t } break; case Z80_REG_INDIRECT: - mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W); + if (opts->regs[inst->ea_reg] >= 0) { + mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W); + } else { + mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, regs) + z80_low_reg(inst->ea_reg), areg, SZ_W); + } size = z80_size(inst); if (read) { if (modify) { @@ -188,7 +192,11 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t case Z80_IX_DISPLACE: case Z80_IY_DISPLACE: reg = opts->regs[(inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY]; - mov_rr(code, reg, areg, SZ_W); + if (reg >= 0) { + mov_rr(code, reg, areg, SZ_W); + } else { + mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, regs) + (inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IXL : Z80_IYL, areg, SZ_W); + } add_ir(code, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W); size = z80_size(inst); if (read) { @@ -276,11 +284,40 @@ uint8_t zaf_off(uint8_t flag) return offsetof(z80_context, alt_flags) + flag; } +uint8_t zr_off(uint8_t reg) +{ + if (reg > Z80_A) { + reg = z80_low_reg(reg); + } + return offsetof(z80_context, regs) + reg; +} + uint8_t zar_off(uint8_t reg) { + if (reg > Z80_A) { + reg = z80_low_reg(reg); + } return offsetof(z80_context, alt_regs) + reg; } +void zreg_to_native(z80_options *opts, uint8_t reg, uint8_t native_reg) +{ + if (opts->regs[reg] >= 0) { + mov_rr(&opts->gen.code, opts->regs[reg], native_reg, reg > Z80_A ? SZ_W : SZ_B); + } else { + mov_rdispr(&opts->gen.code, opts->gen.context_reg, zr_off(reg), native_reg, reg > Z80_A ? SZ_W : SZ_B); + } +} + +void native_to_zreg(z80_options *opts, uint8_t native_reg, uint8_t reg) +{ + if (opts->regs[reg] >= 0) { + mov_rr(&opts->gen.code, native_reg, opts->regs[reg], reg > Z80_A ? SZ_W : SZ_B); + } else { + mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, zr_off(reg), reg > Z80_A ? SZ_W : SZ_B); + } +} + void z80_print_regs_exit(z80_context * context) { printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n", @@ -384,7 +421,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, cycles(&opts->gen, (inst->reg == Z80_IX || inst->reg == Z80_IY) ? 9 : 5); sub_ir(code, 2, opts->regs[Z80_SP], SZ_W); if (inst->reg == Z80_AF) { - mov_rr(code, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); + zreg_to_native(opts, Z80_A, opts->gen.scratch1); shl_ir(code, 8, opts->gen.scratch1, SZ_W); mov_rdispr(code, opts->gen.context_reg, zf_off(ZF_S), opts->gen.scratch1, SZ_B); shl_ir(code, 1, opts->gen.scratch1, SZ_B); @@ -426,7 +463,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, bt_ir(code, 7, opts->gen.scratch1, SZ_W); setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_S)); shr_ir(code, 8, opts->gen.scratch1, SZ_W); - mov_rr(code, opts->gen.scratch1, opts->regs[Z80_A], SZ_B); + native_to_zreg(opts, opts->gen.scratch1, Z80_A); } else { translate_z80_reg(inst, &src_op, opts); mov_rr(code, opts->gen.scratch1, src_op.base, SZ_W); @@ -443,9 +480,10 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, cycles(&opts->gen, num_cycles); if (inst->addr_mode == Z80_REG) { if(inst->reg == Z80_AF) { - mov_rr(code, opts->regs[Z80_A], opts->gen.scratch1, SZ_B); - mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->regs[Z80_A], SZ_B); + zreg_to_native(opts, Z80_A, opts->gen.scratch1); + mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_A), opts->gen.scratch2, SZ_B); mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_A), SZ_B); + native_to_zreg(opts, opts->gen.scratch2, Z80_A); //Flags are currently word aligned, so we can move //them efficiently a word at a time @@ -456,56 +494,91 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zf_off(f), SZ_W); } } else { - xchg_rr(code, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); + if (opts->regs[Z80_DE] >= 0 && opts->regs[Z80_HL] >= 0) { + xchg_rr(code, opts->regs[Z80_DE], opts->regs[Z80_HL], SZ_W); + } else { + zreg_to_native(opts, Z80_DE, opts->gen.scratch1); + zreg_to_native(opts, Z80_HL, opts->gen.scratch2); + native_to_zreg(opts, opts->gen.scratch1, Z80_HL); + native_to_zreg(opts, opts->gen.scratch2, Z80_DE); + } } } else { mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); call(code, opts->read_8); - xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B); + if (opts->regs[inst->reg] >= 0) { + xchg_rr(code, opts->regs[inst->reg], opts->gen.scratch1, SZ_B); + } else { + zreg_to_native(opts, inst->reg, opts->gen.scratch2); + xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B); + native_to_zreg(opts, opts->gen.scratch2, inst->reg); + } mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); call(code, opts->write_8); cycles(&opts->gen, 1); uint8_t high_reg = z80_high_reg(inst->reg); - uint8_t use_reg; - //even though some of the upper halves can be used directly - //the limitations on mixing *H regs with the REX prefix - //prevent us from taking advantage of it - use_reg = opts->regs[inst->reg]; - ror_ir(code, 8, use_reg, SZ_W); mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch1, SZ_W); add_ir(code, 1, opts->gen.scratch1, SZ_W); call(code, opts->read_8); - xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B); + if (opts->regs[inst->reg] >= 0) { + //even though some of the upper halves can be used directly + //the limitations on mixing *H regs with the REX prefix + //prevent us from taking advantage of it + uint8_t use_reg = opts->regs[inst->reg]; + ror_ir(code, 8, use_reg, SZ_W); + xchg_rr(code, use_reg, opts->gen.scratch1, SZ_B); + //restore reg to normal rotation + ror_ir(code, 8, use_reg, SZ_W); + } else { + zreg_to_native(opts, high_reg, opts->gen.scratch2); + xchg_rr(code, opts->gen.scratch1, opts->gen.scratch2, SZ_B); + native_to_zreg(opts, opts->gen.scratch2, high_reg); + } mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); add_ir(code, 1, opts->gen.scratch2, SZ_W); call(code, opts->write_8); - //restore reg to normal rotation - ror_ir(code, 8, use_reg, SZ_W); cycles(&opts->gen, 2); } break; case Z80_EXX: cycles(&opts->gen, 4); - mov_rr(code, opts->regs[Z80_BC], opts->gen.scratch1, SZ_W); - mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); - mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_C), opts->regs[Z80_BC], SZ_W); - mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_L), opts->regs[Z80_HL], SZ_W); - mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_C), SZ_W); - mov_rrdisp(code, opts->gen.scratch2, opts->gen.context_reg, zar_off(Z80_L), SZ_W); - mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch1, SZ_W); - mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_E), opts->regs[Z80_DE], SZ_W); - mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_E), SZ_W); + zreg_to_native(opts, Z80_BC, opts->gen.scratch1); + mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_BC), opts->gen.scratch2, SZ_W); + mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_BC), SZ_W); + native_to_zreg(opts, opts->gen.scratch2, Z80_BC); + + zreg_to_native(opts, Z80_HL, opts->gen.scratch1); + mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_HL), opts->gen.scratch2, SZ_W); + mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_HL), SZ_W); + native_to_zreg(opts, opts->gen.scratch2, Z80_HL); + + zreg_to_native(opts, Z80_DE, opts->gen.scratch1); + mov_rdispr(code, opts->gen.context_reg, zar_off(Z80_DE), opts->gen.scratch2, SZ_W); + mov_rrdisp(code, opts->gen.scratch1, opts->gen.context_reg, zar_off(Z80_DE), SZ_W); + native_to_zreg(opts, opts->gen.scratch2, Z80_DE); break; case Z80_LDI: { cycles(&opts->gen, 8); - mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + zreg_to_native(opts, Z80_HL, opts->gen.scratch1); call(code, opts->read_8); - mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); + zreg_to_native(opts, Z80_DE, opts->gen.scratch2); call(code, opts->write_8); cycles(&opts->gen, 2); - add_ir(code, 1, opts->regs[Z80_DE], SZ_W); - add_ir(code, 1, opts->regs[Z80_HL], SZ_W); - sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + if (opts->regs[Z80_DE] >= 0) { + add_ir(code, 1, opts->regs[Z80_DE], SZ_W); + } else { + add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W); + } + if (opts->regs[Z80_HL] >= 0) { + add_ir(code, 1, opts->regs[Z80_HL], SZ_W); + } else { + add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W); + } + if (opts->regs[Z80_BC] >= 0) { + sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + } else { + sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); + } //TODO: Implement half-carry mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); @@ -513,14 +586,25 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, } case Z80_LDIR: { cycles(&opts->gen, 8); - mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + zreg_to_native(opts, Z80_HL, opts->gen.scratch1); call(code, opts->read_8); - mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); + zreg_to_native(opts, Z80_DE, opts->gen.scratch2); call(code, opts->write_8); - add_ir(code, 1, opts->regs[Z80_DE], SZ_W); - add_ir(code, 1, opts->regs[Z80_HL], SZ_W); - - sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + if (opts->regs[Z80_DE] >= 0) { + add_ir(code, 1, opts->regs[Z80_DE], SZ_W); + } else { + add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W); + } + if (opts->regs[Z80_HL] >= 0) { + add_ir(code, 1, opts->regs[Z80_HL], SZ_W); + } else { + add_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W); + } + if (opts->regs[Z80_BC] >= 0) { + sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + } else { + sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); + } uint8_t * cont = code->cur+1; jcc(code, CC_Z, code->cur+2); cycles(&opts->gen, 7); @@ -536,14 +620,26 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, } case Z80_LDD: { cycles(&opts->gen, 8); - mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + zreg_to_native(opts, Z80_HL, opts->gen.scratch1); call(code, opts->read_8); - mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); + zreg_to_native(opts, Z80_DE, opts->gen.scratch2); call(code, opts->write_8); cycles(&opts->gen, 2); - sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); - sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); - sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + if (opts->regs[Z80_DE] >= 0) { + sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); + } else { + sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W); + } + if (opts->regs[Z80_HL] >= 0) { + add_ir(code, 1, opts->regs[Z80_HL], SZ_W); + } else { + sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W); + } + if (opts->regs[Z80_BC] >= 0) { + sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + } else { + sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); + } //TODO: Implement half-carry mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); setcc_rdisp(code, CC_NZ, opts->gen.context_reg, zf_off(ZF_PV)); @@ -551,14 +647,25 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, } case Z80_LDDR: { cycles(&opts->gen, 8); - mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + zreg_to_native(opts, Z80_HL, opts->gen.scratch1); call(code, opts->read_8); - mov_rr(code, opts->regs[Z80_DE], opts->gen.scratch2, SZ_W); + zreg_to_native(opts, Z80_DE, opts->gen.scratch2); call(code, opts->write_8); - sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); - sub_ir(code, 1, opts->regs[Z80_HL], SZ_W); - - sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + if (opts->regs[Z80_DE] >= 0) { + sub_ir(code, 1, opts->regs[Z80_DE], SZ_W); + } else { + sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_DE), SZ_W); + } + if (opts->regs[Z80_HL] >= 0) { + add_ir(code, 1, opts->regs[Z80_HL], SZ_W); + } else { + sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_HL), SZ_W); + } + if (opts->regs[Z80_BC] >= 0) { + sub_ir(code, 1, opts->regs[Z80_BC], SZ_W); + } else { + sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_BC), SZ_W); + } uint8_t * cont = code->cur+1; jcc(code, CC_Z, code->cur+2); cycles(&opts->gen, 7); @@ -1342,7 +1449,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, jmp(code, call_dst); } else { if (inst->addr_mode == Z80_REG_INDIRECT) { - mov_rr(code, opts->regs[inst->ea_reg], opts->gen.scratch1, SZ_W); + zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1); } else { mov_ir(code, inst->immed, opts->gen.scratch1, SZ_W); } @@ -1973,7 +2080,7 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t #else memset(options->regs, -1, sizeof(options->regs)); options->regs[Z80_A] = RAX; - options->regx[Z80_SP] = RBX; + options->regs[Z80_SP] = RBX; options->gen.scratch1 = RCX; options->gen.scratch2 = RDX; @@ -2214,7 +2321,11 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t options->run = (z80_run_fun)code->cur; save_callee_save_regs(code); +#ifdef X86_64 mov_rr(code, RDI, options->gen.context_reg, SZ_PTR); +#else + mov_rdispr(code, RSP, 5 * sizeof(int32_t), options->gen.context_reg, SZ_PTR); +#endif call(code, options->load_context_scratch); cmp_irdisp(code, 0, options->gen.context_reg, offsetof(z80_context, extra_pc), SZ_PTR); code_ptr no_extra = code->cur+1; -- cgit v1.2.3 From b2aeff4d9434393ca621bd424d435f0fb98c8a3c Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sun, 24 May 2015 15:05:18 -0700 Subject: More bugfixes for the 32-bit build of the Z80 core --- z80_to_x86.c | 231 +++++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 178 insertions(+), 53 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index a005369..98b6f49 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -109,7 +109,8 @@ void z80_save_reg(z80inst * inst, z80_options * opts) void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t read, uint8_t modify) { code_info *code = &opts->gen.code; - uint8_t size, reg, areg; + uint8_t size, areg; + int8_t reg; ea->mode = MODE_REG_DIRECT; areg = read ? opts->gen.scratch1 : opts->gen.scratch2; switch(inst->addr_mode & 0x1F) @@ -397,9 +398,18 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, mov_rr(code, src_op.base, dst_op.base, size); } } else if(src_op.mode == MODE_IMMED) { - mov_ir(code, src_op.disp, dst_op.base, size); + if(dst_op.mode == MODE_REG_DISPLACE8) { + mov_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, size); + } else { + mov_ir(code, src_op.disp, dst_op.base, size); + } } else { - mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, size); + if(dst_op.mode == MODE_REG_DISPLACE8) { + mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, size); + mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, size); + } else { + mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, size); + } } if (inst->ea_reg == Z80_I && inst->addr_mode == Z80_REG) { //ld a, i sets some flags @@ -696,10 +706,23 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, cycles(&opts->gen, num_cycles); translate_z80_reg(inst, &dst_op, opts); translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); - if (src_op.mode == MODE_REG_DIRECT) { - add_rr(code, src_op.base, dst_op.base, z80_size(inst)); + if (dst_op.mode == MODE_REG_DIRECT) { + if (src_op.mode == MODE_REG_DIRECT) { + add_rr(code, src_op.base, dst_op.base, z80_size(inst)); + } else if (src_op.mode == MODE_IMMED) { + add_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + } else { + add_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); + } } else { - add_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + if (src_op.mode == MODE_REG_DIRECT) { + add_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst)); + } else if (src_op.mode == MODE_IMMED) { + add_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst)); + } else { + mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst)); + add_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst)); + } } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); @@ -725,10 +748,23 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, translate_z80_reg(inst, &dst_op, opts); translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); - if (src_op.mode == MODE_REG_DIRECT) { - adc_rr(code, src_op.base, dst_op.base, z80_size(inst)); + if (dst_op.mode == MODE_REG_DIRECT) { + if (src_op.mode == MODE_REG_DIRECT) { + adc_rr(code, src_op.base, dst_op.base, z80_size(inst)); + } else if (src_op.mode == MODE_IMMED) { + adc_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + } else { + adc_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); + } } else { - adc_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + if (src_op.mode == MODE_REG_DIRECT) { + adc_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst)); + } else if (src_op.mode == MODE_IMMED) { + adc_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst)); + } else { + mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst)); + adc_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst)); + } } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); @@ -749,10 +785,23 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, cycles(&opts->gen, num_cycles); translate_z80_reg(inst, &dst_op, opts); translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); - if (src_op.mode == MODE_REG_DIRECT) { - sub_rr(code, src_op.base, dst_op.base, z80_size(inst)); + if (dst_op.mode == MODE_REG_DIRECT) { + if (src_op.mode == MODE_REG_DIRECT) { + sub_rr(code, src_op.base, dst_op.base, z80_size(inst)); + } else if (src_op.mode == MODE_IMMED) { + sub_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + } else { + sub_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); + } } else { - sub_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + if (src_op.mode == MODE_REG_DIRECT) { + sub_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst)); + } else if (src_op.mode == MODE_IMMED) { + sub_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst)); + } else { + mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst)); + sub_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst)); + } } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); @@ -776,10 +825,23 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, translate_z80_reg(inst, &dst_op, opts); translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); - if (src_op.mode == MODE_REG_DIRECT) { - sbb_rr(code, src_op.base, dst_op.base, z80_size(inst)); + if (dst_op.mode == MODE_REG_DIRECT) { + if (src_op.mode == MODE_REG_DIRECT) { + sbb_rr(code, src_op.base, dst_op.base, z80_size(inst)); + } else if (src_op.mode == MODE_IMMED) { + sbb_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + } else { + sbb_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); + } } else { - sbb_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + if (src_op.mode == MODE_REG_DIRECT) { + sbb_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, z80_size(inst)); + } else if (src_op.mode == MODE_IMMED) { + sbb_irdisp(code, src_op.disp, dst_op.base, dst_op.disp, z80_size(inst)); + } else { + mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, z80_size(inst)); + sbb_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, z80_size(inst)); + } } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); @@ -804,8 +866,10 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { and_rr(code, src_op.base, dst_op.base, z80_size(inst)); - } else { + } else if (src_op.mode == MODE_IMMED) { and_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + } else { + and_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); } //TODO: Cleanup flags setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); @@ -833,8 +897,10 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { or_rr(code, src_op.base, dst_op.base, z80_size(inst)); - } else { + } else if (src_op.mode == MODE_IMMED) { or_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + } else { + or_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); } //TODO: Cleanup flags setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); @@ -862,8 +928,10 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { xor_rr(code, src_op.base, dst_op.base, z80_size(inst)); - } else { + } else if (src_op.mode == MODE_IMMED) { xor_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + } else { + xor_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); } //TODO: Cleanup flags setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); @@ -889,8 +957,10 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, translate_z80_ea(inst, &src_op, opts, READ, DONT_MODIFY); if (src_op.mode == MODE_REG_DIRECT) { cmp_rr(code, src_op.base, dst_op.base, z80_size(inst)); - } else { + } else if (src_op.mode == MODE_IMMED) { cmp_ir(code, src_op.disp, dst_op.base, z80_size(inst)); + } else { + cmp_rdispr(code, src_op.base, src_op.disp, dst_op.base, z80_size(inst)); } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); @@ -915,7 +985,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, if (dst_op.mode == MODE_UNUSED) { translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); } - add_ir(code, 1, dst_op.base, z80_size(inst)); + if (dst_op.mode == MODE_REG_DIRECT) { + add_ir(code, 1, dst_op.base, z80_size(inst)); + } else { + add_irdisp(code, 1, dst_op.base, dst_op.disp, z80_size(inst)); + } if (z80_size(inst) == SZ_B) { mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag @@ -941,7 +1015,12 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, if (dst_op.mode == MODE_UNUSED) { translate_z80_ea(inst, &dst_op, opts, READ, MODIFY); } - sub_ir(code, 1, dst_op.base, z80_size(inst)); + if (dst_op.mode == MODE_REG_DIRECT) { + sub_ir(code, 1, dst_op.base, z80_size(inst)); + } else { + sub_irdisp(code, 1, dst_op.base, dst_op.disp, z80_size(inst)); + } + if (z80_size(inst) == SZ_B) { mov_irdisp(code, 1, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag @@ -1029,19 +1108,25 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, src_op.mode = MODE_UNUSED; translate_z80_reg(inst, &dst_op, opts); } - rol_ir(code, 1, dst_op.base, SZ_B); - if (src_op.mode != MODE_UNUSED) { + if (dst_op.mode == MODE_REG_DIRECT) { + rol_ir(code, 1, dst_op.base, SZ_B); + } else { + rol_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); + } + if (src_op.mode == MODE_REG_DIRECT) { mov_rr(code, dst_op.base, src_op.base, SZ_B); + } else if(src_op.mode == MODE_REG_DISPLACE8) { + mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (inst->immed) { //rlca does not set these flags - cmp_ir(code, 0, dst_op.base, SZ_B); - setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } if (inst->addr_mode != Z80_UNUSED) { z80_save_result(opts, inst); @@ -1064,19 +1149,25 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, translate_z80_reg(inst, &dst_op, opts); } bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); - rcl_ir(code, 1, dst_op.base, SZ_B); - if (src_op.mode != MODE_UNUSED) { + if (dst_op.mode == MODE_REG_DIRECT) { + rcl_ir(code, 1, dst_op.base, SZ_B); + } else { + rcl_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); + } + if (src_op.mode == MODE_REG_DIRECT) { mov_rr(code, dst_op.base, src_op.base, SZ_B); + } else if(src_op.mode == MODE_REG_DISPLACE8) { + mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (inst->immed) { //rla does not set these flags - cmp_ir(code, 0, dst_op.base, SZ_B); - setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } if (inst->addr_mode != Z80_UNUSED) { z80_save_result(opts, inst); @@ -1098,19 +1189,25 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, src_op.mode = MODE_UNUSED; translate_z80_reg(inst, &dst_op, opts); } - ror_ir(code, 1, dst_op.base, SZ_B); - if (src_op.mode != MODE_UNUSED) { + if (dst_op.mode == MODE_REG_DIRECT) { + ror_ir(code, 1, dst_op.base, SZ_B); + } else { + ror_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); + } + if (src_op.mode == MODE_REG_DIRECT) { mov_rr(code, dst_op.base, src_op.base, SZ_B); + } else if(src_op.mode == MODE_REG_DISPLACE8) { + mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (inst->immed) { //rrca does not set these flags - cmp_ir(code, 0, dst_op.base, SZ_B); - setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } if (inst->addr_mode != Z80_UNUSED) { z80_save_result(opts, inst); @@ -1133,19 +1230,25 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, translate_z80_reg(inst, &dst_op, opts); } bt_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_C), SZ_B); - rcr_ir(code, 1, dst_op.base, SZ_B); - if (src_op.mode != MODE_UNUSED) { + if (dst_op.mode == MODE_REG_DIRECT) { + rcr_ir(code, 1, dst_op.base, SZ_B); + } else { + rcr_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); + } + if (src_op.mode == MODE_REG_DIRECT) { mov_rr(code, dst_op.base, src_op.base, SZ_B); + } else if(src_op.mode == MODE_REG_DISPLACE8) { + mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag if (inst->immed) { //rra does not set these flags - cmp_ir(code, 0, dst_op.base, SZ_B); - setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); - setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); - setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); + cmp_ir(code, 0, dst_op.base, SZ_B); + setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); + setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); + setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } if (inst->addr_mode != Z80_UNUSED) { z80_save_result(opts, inst); @@ -1168,13 +1271,19 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, src_op.mode = MODE_UNUSED; translate_z80_reg(inst, &dst_op, opts); } - shl_ir(code, 1, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + shl_ir(code, 1, dst_op.base, SZ_B); + } else { + shl_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); + } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); if (inst->op == Z80_SLL) { or_ir(code, 1, dst_op.base, SZ_B); } - if (src_op.mode != MODE_UNUSED) { + if (src_op.mode == MODE_REG_DIRECT) { mov_rr(code, dst_op.base, src_op.base, SZ_B); + } else if(src_op.mode == MODE_REG_DISPLACE8) { + mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); } mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag @@ -1202,9 +1311,15 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, src_op.mode = MODE_UNUSED; translate_z80_reg(inst, &dst_op, opts); } - sar_ir(code, 1, dst_op.base, SZ_B); - if (src_op.mode != MODE_UNUSED) { + if (dst_op.mode == MODE_REG_DIRECT) { + sar_ir(code, 1, dst_op.base, SZ_B); + } else { + sar_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); + } + if (src_op.mode == MODE_REG_DIRECT) { mov_rr(code, dst_op.base, src_op.base, SZ_B); + } else if(src_op.mode == MODE_REG_DISPLACE8) { + mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); @@ -1233,9 +1348,15 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, src_op.mode = MODE_UNUSED; translate_z80_reg(inst, &dst_op, opts); } - shr_ir(code, 1, dst_op.base, SZ_B); - if (src_op.mode != MODE_UNUSED) { + if (dst_op.mode == MODE_REG_DIRECT) { + shr_ir(code, 1, dst_op.base, SZ_B); + } else { + shr_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); + } + if (src_op.mode == MODE_REG_DIRECT) { mov_rr(code, dst_op.base, src_op.base, SZ_B); + } else if(src_op.mode == MODE_REG_DISPLACE8) { + mov_rrdisp(code, dst_op.base, src_op.base, src_op.disp, SZ_B); } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); @@ -1542,7 +1663,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, } case Z80_DJNZ: { cycles(&opts->gen, 8);//T States: 5,3 - sub_ir(code, 1, opts->regs[Z80_B], SZ_B); + if (opts->regs[Z80_B] >= 0) { + sub_ir(code, 1, opts->regs[Z80_B], SZ_B); + } else { + sub_irdisp(code, 1, opts->gen.context_reg, zr_off(Z80_B), SZ_B); + } uint8_t *no_jump_off = code->cur+1; jcc(code, CC_Z, code->cur+2); cycles(&opts->gen, 5);//T States: 5 @@ -2311,7 +2436,7 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t pop_r(code, options->gen.scratch2); call(code, options->gen.save_context); //adjust pointer before move and call instructions that got us here - sub_ir(code, 11, options->gen.scratch2, SZ_PTR); + sub_ir(code, options->gen.scratch1 >= R8 ? 11 : 10, options->gen.scratch2, SZ_PTR); push_r(code, options->gen.context_reg); call_args(code, (code_ptr)z80_retranslate_inst, 3, options->gen.scratch1, options->gen.context_reg, options->gen.scratch2); pop_r(code, options->gen.context_reg); -- cgit v1.2.3 From 64a79c95cbbb23079d0da1eca89c3a676725dfae Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sun, 24 May 2015 21:11:18 -0700 Subject: Z80 test cases that passed on 64-bit now pass on 32-bit --- z80_to_x86.c | 231 ++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 150 insertions(+), 81 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 98b6f49..51f855a 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -35,6 +35,50 @@ uint8_t z80_size(z80inst * inst) return SZ_B; } +uint8_t zf_off(uint8_t flag) +{ + return offsetof(z80_context, flags) + flag; +} + +uint8_t zaf_off(uint8_t flag) +{ + return offsetof(z80_context, alt_flags) + flag; +} + +uint8_t zr_off(uint8_t reg) +{ + if (reg > Z80_A) { + reg = z80_low_reg(reg); + } + return offsetof(z80_context, regs) + reg; +} + +uint8_t zar_off(uint8_t reg) +{ + if (reg > Z80_A) { + reg = z80_low_reg(reg); + } + return offsetof(z80_context, alt_regs) + reg; +} + +void zreg_to_native(z80_options *opts, uint8_t reg, uint8_t native_reg) +{ + if (opts->regs[reg] >= 0) { + mov_rr(&opts->gen.code, opts->regs[reg], native_reg, reg > Z80_A ? SZ_W : SZ_B); + } else { + mov_rdispr(&opts->gen.code, opts->gen.context_reg, zr_off(reg), native_reg, reg > Z80_A ? SZ_W : SZ_B); + } +} + +void native_to_zreg(z80_options *opts, uint8_t native_reg, uint8_t reg) +{ + if (opts->regs[reg] >= 0) { + mov_rr(&opts->gen.code, native_reg, opts->regs[reg], reg > Z80_A ? SZ_W : SZ_B); + } else { + mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, zr_off(reg), reg > Z80_A ? SZ_W : SZ_B); + } +} + void translate_z80_reg(z80inst * inst, host_ea * ea, z80_options * opts) { code_info *code = &opts->gen.code; @@ -45,7 +89,7 @@ void translate_z80_reg(z80inst * inst, host_ea * ea, z80_options * opts) ea->mode = MODE_UNUSED; } else { ea->mode = MODE_REG_DIRECT; - if (inst->reg == Z80_IYH) { + if (inst->reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) { if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); ror_ir(code, 8, opts->gen.scratch1, SZ_W); @@ -73,7 +117,7 @@ void translate_z80_reg(z80inst * inst, host_ea * ea, z80_options * opts) } else { ea->mode = MODE_REG_DISPLACE8; ea->base = opts->gen.context_reg; - ea->disp = offsetof(z80_context, regs) + inst->reg; + ea->disp = zr_off(inst->reg); } } } @@ -84,7 +128,7 @@ void z80_save_reg(z80inst * inst, z80_options * opts) if (inst->reg == Z80_USE_IMMED || inst->reg == Z80_UNUSED) { return; } - if (inst->reg == Z80_IYH) { + if (inst->reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) { if ((inst->addr_mode & 0x1F) == Z80_REG && inst->ea_reg == Z80_IYL) { ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); @@ -116,7 +160,7 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t switch(inst->addr_mode & 0x1F) { case Z80_REG: - if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IY] >= 0) { + if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) { if (inst->reg == Z80_IYL) { mov_rr(code, opts->regs[Z80_IY], opts->gen.scratch1, SZ_W); ror_ir(code, 8, opts->gen.scratch1, SZ_W); @@ -140,15 +184,11 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t } else { ea->mode = MODE_REG_DISPLACE8; ea->base = opts->gen.context_reg; - ea->disp = offsetof(z80_context, regs) + inst->ea_reg; + ea->disp = zr_off(inst->ea_reg); } break; case Z80_REG_INDIRECT: - if (opts->regs[inst->ea_reg] >= 0) { - mov_rr(code, opts->regs[inst->ea_reg], areg, SZ_W); - } else { - mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, regs) + z80_low_reg(inst->ea_reg), areg, SZ_W); - } + zreg_to_native(opts, inst->ea_reg, areg); size = z80_size(inst); if (read) { if (modify) { @@ -192,12 +232,7 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t break; case Z80_IX_DISPLACE: case Z80_IY_DISPLACE: - reg = opts->regs[(inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY]; - if (reg >= 0) { - mov_rr(code, reg, areg, SZ_W); - } else { - mov_rdispr(code, opts->gen.context_reg, offsetof(z80_context, regs) + (inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IXL : Z80_IYL, areg, SZ_W); - } + zreg_to_native(opts, (inst->addr_mode & 0x1F) == Z80_IX_DISPLACE ? Z80_IX : Z80_IY, areg); add_ir(code, inst->ea_reg & 0x80 ? inst->ea_reg - 256 : inst->ea_reg, areg, SZ_W); size = z80_size(inst); if (read) { @@ -229,7 +264,7 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t void z80_save_ea(code_info *code, z80inst * inst, z80_options * opts) { if ((inst->addr_mode & 0x1F) == Z80_REG) { - if (inst->ea_reg == Z80_IYH) { + if (inst->ea_reg == Z80_IYH && opts->regs[Z80_IYL] >= 0) { if (inst->reg == Z80_IYL) { ror_ir(code, 8, opts->regs[Z80_IY], SZ_W); mov_rr(code, opts->gen.scratch1, opts->regs[Z80_IYL], SZ_B); @@ -275,50 +310,6 @@ enum { MODIFY }; -uint8_t zf_off(uint8_t flag) -{ - return offsetof(z80_context, flags) + flag; -} - -uint8_t zaf_off(uint8_t flag) -{ - return offsetof(z80_context, alt_flags) + flag; -} - -uint8_t zr_off(uint8_t reg) -{ - if (reg > Z80_A) { - reg = z80_low_reg(reg); - } - return offsetof(z80_context, regs) + reg; -} - -uint8_t zar_off(uint8_t reg) -{ - if (reg > Z80_A) { - reg = z80_low_reg(reg); - } - return offsetof(z80_context, alt_regs) + reg; -} - -void zreg_to_native(z80_options *opts, uint8_t reg, uint8_t native_reg) -{ - if (opts->regs[reg] >= 0) { - mov_rr(&opts->gen.code, opts->regs[reg], native_reg, reg > Z80_A ? SZ_W : SZ_B); - } else { - mov_rdispr(&opts->gen.code, opts->gen.context_reg, zr_off(reg), native_reg, reg > Z80_A ? SZ_W : SZ_B); - } -} - -void native_to_zreg(z80_options *opts, uint8_t native_reg, uint8_t reg) -{ - if (opts->regs[reg] >= 0) { - mov_rr(&opts->gen.code, native_reg, opts->regs[reg], reg > Z80_A ? SZ_W : SZ_B); - } else { - mov_rrdisp(&opts->gen.code, native_reg, opts->gen.context_reg, zr_off(reg), reg > Z80_A ? SZ_W : SZ_B); - } -} - void z80_print_regs_exit(z80_context * context) { printf("A: %X\nB: %X\nC: %X\nD: %X\nE: %X\nHL: %X\nIX: %X\nIY: %X\nSP: %X\n\nIM: %d, IFF1: %d, IFF2: %d\n", @@ -445,8 +436,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, shl_ir(code, 1, opts->gen.scratch1, SZ_B); or_rdispr(code, opts->gen.context_reg, zf_off(ZF_C), opts->gen.scratch1, SZ_B); } else { - translate_z80_reg(inst, &src_op, opts); - mov_rr(code, src_op.base, opts->gen.scratch1, SZ_W); + zreg_to_native(opts, inst->reg, opts->gen.scratch1); } mov_rr(code, opts->regs[Z80_SP], opts->gen.scratch2, SZ_W); call(code, opts->write_16_highfirst); @@ -475,8 +465,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, shr_ir(code, 8, opts->gen.scratch1, SZ_W); native_to_zreg(opts, opts->gen.scratch1, Z80_A); } else { - translate_z80_reg(inst, &src_op, opts); - mov_rr(code, opts->gen.scratch1, src_op.base, SZ_W); + native_to_zreg(opts, opts->gen.scratch1, inst->reg); } //no call to save_z80_reg needed since there's no chance we'll use the only //the upper half of a register pair @@ -1123,7 +1112,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, //TODO: Implement half-carry flag if (inst->immed) { //rlca does not set these flags - cmp_ir(code, 0, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + cmp_ir(code, 0, dst_op.base, SZ_B); + } else { + cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); + } setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); @@ -1164,7 +1157,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, //TODO: Implement half-carry flag if (inst->immed) { //rla does not set these flags - cmp_ir(code, 0, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + cmp_ir(code, 0, dst_op.base, SZ_B); + } else { + cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); + } setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); @@ -1204,7 +1201,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, //TODO: Implement half-carry flag if (inst->immed) { //rrca does not set these flags - cmp_ir(code, 0, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + cmp_ir(code, 0, dst_op.base, SZ_B); + } else { + cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); + } setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); @@ -1245,7 +1246,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, //TODO: Implement half-carry flag if (inst->immed) { //rra does not set these flags - cmp_ir(code, 0, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + cmp_ir(code, 0, dst_op.base, SZ_B); + } else { + cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); + } setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); @@ -1278,7 +1283,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, } setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); if (inst->op == Z80_SLL) { - or_ir(code, 1, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + or_ir(code, 1, dst_op.base, SZ_B); + } else { + or_irdisp(code, 1, dst_op.base, dst_op.disp, SZ_B); + } } if (src_op.mode == MODE_REG_DIRECT) { mov_rr(code, dst_op.base, src_op.base, SZ_B); @@ -1287,7 +1296,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, } mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - cmp_ir(code, 0, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + cmp_ir(code, 0, dst_op.base, SZ_B); + } else { + cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); + } setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); @@ -1324,7 +1337,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - cmp_ir(code, 0, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + cmp_ir(code, 0, dst_op.base, SZ_B); + } else { + cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); + } setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); @@ -1361,7 +1378,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, setcc_rdisp(code, CC_C, opts->gen.context_reg, zf_off(ZF_C)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); //TODO: Implement half-carry flag - cmp_ir(code, 0, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + cmp_ir(code, 0, dst_op.base, SZ_B); + } else { + cmp_irdisp(code, 0, dst_op.base, dst_op.disp, SZ_B); + } setcc_rdisp(code, CC_P, opts->gen.context_reg, zf_off(ZF_PV)); setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); @@ -1448,12 +1469,20 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, //Reads normally take 3 cycles, but the read at the end of a bit instruction takes 4 cycles(&opts->gen, 1); } - bt_ir(code, bit, src_op.base, size); + if (src_op.mode == MODE_REG_DIRECT) { + bt_ir(code, bit, src_op.base, size); + } else { + bt_irdisp(code, bit, src_op.base, src_op.disp, size); + } setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_NC, opts->gen.context_reg, zf_off(ZF_PV)); mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_N), SZ_B); if (inst->immed == 7) { - cmp_ir(code, 0, src_op.base, size); + if (src_op.mode == MODE_REG_DIRECT) { + cmp_ir(code, 0, src_op.base, size); + } else { + cmp_irdisp(code, 0, src_op.base, src_op.disp, size); + } setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); } else { mov_irdisp(code, 0, opts->gen.context_reg, zf_off(ZF_S), SZ_B); @@ -1480,7 +1509,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 cycles(&opts->gen, 1); } - bts_ir(code, bit, src_op.base, size); + if (src_op.mode == MODE_REG_DIRECT) { + bts_ir(code, bit, src_op.base, size); + } else { + bts_irdisp(code, bit, src_op.base, src_op.disp, size); + } if (inst->reg != Z80_USE_IMMED) { if (size == SZ_W) { #ifdef X86_64 @@ -1490,12 +1523,28 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, ror_ir(code, 8, src_op.base, SZ_W); } else { #endif - mov_rr(code, opts->regs[inst->ea_reg], dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + zreg_to_native(opts, inst->ea_reg, dst_op.base); + } else { + zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1); + mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); + } #ifdef X86_64 } #endif } else { - mov_rr(code, src_op.base, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + if (src_op.mode == MODE_REG_DIRECT) { + mov_rr(code, src_op.base, dst_op.base, SZ_B); + } else { + mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, SZ_B); + } + } else if (src_op.mode == MODE_REG_DIRECT) { + mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, SZ_B); + } else { + mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B); + mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); + } } } if ((inst->addr_mode & 0x1F) != Z80_REG) { @@ -1526,7 +1575,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, //Reads normally take 3 cycles, but the read in the middle of a set instruction takes 4 cycles(&opts->gen, 1); } - btr_ir(code, bit, src_op.base, size); + if (src_op.mode == MODE_REG_DIRECT) { + btr_ir(code, bit, src_op.base, size); + } else { + btr_irdisp(code, bit, src_op.base, src_op.disp, size); + } if (inst->reg != Z80_USE_IMMED) { if (size == SZ_W) { #ifdef X86_64 @@ -1536,12 +1589,28 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, ror_ir(code, 8, src_op.base, SZ_W); } else { #endif - mov_rr(code, opts->regs[inst->ea_reg], dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + zreg_to_native(opts, inst->ea_reg, dst_op.base); + } else { + zreg_to_native(opts, inst->ea_reg, opts->gen.scratch1); + mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); + } #ifdef X86_64 } #endif } else { - mov_rr(code, src_op.base, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + if (src_op.mode == MODE_REG_DIRECT) { + mov_rr(code, src_op.base, dst_op.base, SZ_B); + } else { + mov_rdispr(code, src_op.base, src_op.disp, dst_op.base, SZ_B); + } + } else if (src_op.mode == MODE_REG_DIRECT) { + mov_rrdisp(code, src_op.base, dst_op.base, dst_op.disp, SZ_B); + } else { + mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B); + mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); + } } } if (inst->addr_mode != Z80_REG) { -- cgit v1.2.3 From 86bed7b5415c0f2ceef4a3b043eca445b3b4f0ec Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Mon, 25 May 2015 17:08:56 -0700 Subject: Fix RLD and RRD for the case in which HL does not map to a native register --- z80_to_x86.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 51f855a..4cb7f2a 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1397,11 +1397,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, break; case Z80_RLD: cycles(&opts->gen, 8); - mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + zreg_to_native(opts, Z80_HL, opts->gen.scratch1); call(code, opts->read_8); //Before: (HL) = 0x12, A = 0x34 //After: (HL) = 0x24, A = 0x31 - mov_rr(code, opts->regs[Z80_A], opts->gen.scratch2, SZ_B); + zreg_to_native(opts, Z80_A, opts->gen.scratch2); shl_ir(code, 4, opts->gen.scratch1, SZ_W); and_ir(code, 0xF, opts->gen.scratch2, SZ_W); and_ir(code, 0xFFF, opts->gen.scratch1, SZ_W); @@ -1418,17 +1418,17 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); + zreg_to_native(opts, Z80_HL, opts->gen.scratch2); ror_ir(code, 8, opts->gen.scratch1, SZ_W); call(code, opts->write_8); break; case Z80_RRD: cycles(&opts->gen, 8); - mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch1, SZ_W); + zreg_to_native(opts, Z80_HL, opts->gen.scratch1); call(code, opts->read_8); //Before: (HL) = 0x12, A = 0x34 //After: (HL) = 0x41, A = 0x32 - movzx_rr(code, opts->regs[Z80_A], opts->gen.scratch2, SZ_B, SZ_W); + zreg_to_native(opts, Z80_A, opts->gen.scratch2); ror_ir(code, 4, opts->gen.scratch1, SZ_W); shl_ir(code, 4, opts->gen.scratch2, SZ_W); and_ir(code, 0xF00F, opts->gen.scratch1, SZ_W); @@ -1448,7 +1448,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, setcc_rdisp(code, CC_Z, opts->gen.context_reg, zf_off(ZF_Z)); setcc_rdisp(code, CC_S, opts->gen.context_reg, zf_off(ZF_S)); - mov_rr(code, opts->regs[Z80_HL], opts->gen.scratch2, SZ_W); + zreg_to_native(opts, Z80_HL, opts->gen.scratch2); ror_ir(code, 8, opts->gen.scratch1, SZ_W); call(code, opts->write_8); break; -- cgit v1.2.3 From a75fc6b803164c251b62a941a4544f71a2e86892 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Mon, 25 May 2015 18:56:22 -0700 Subject: Add a define in both the source and Makefile for enabling logging of z80 instruction address/cycle counts. Fix Z80 in/out instructions to eliminate assumptions about which registers are stored in native regs. Fix read_16 to not corrupt the low byte when the read has to call into a C function. --- z80_to_x86.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 4cb7f2a..d1d26a9 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -342,7 +342,9 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, if (context->breakpoint_flags[address / sizeof(uint8_t)] & (1 << (address % sizeof(uint8_t)))) { zbreakpoint_patch(context, address, start); } - //log_address(&opts->gen, address, "Z80: %X @ %d\n"); +#ifdef Z80_LOG_ADDRESS + log_address(&opts->gen, address, "Z80: %X @ %d\n"); +#endif } switch(inst->op) { @@ -1897,7 +1899,11 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, } call(code, opts->read_io); translate_z80_reg(inst, &dst_op, opts); - mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B); + if (dst_op.mode == MODE_REG_DIRECT) { + mov_rr(code, opts->gen.scratch1, dst_op.base, SZ_B); + } else { + mov_rrdisp(code, opts->gen.scratch1, dst_op.base, dst_op.disp, SZ_B); + } z80_save_reg(inst, opts); break; /*case Z80_INI: @@ -1909,10 +1915,17 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, if ((inst->addr_mode & 0x1F) == Z80_IMMED_INDIRECT) { mov_ir(code, inst->immed, opts->gen.scratch2, SZ_B); } else { + zreg_to_native(opts, Z80_C, opts->gen.scratch2); mov_rr(code, opts->regs[Z80_C], opts->gen.scratch2, SZ_B); } translate_z80_reg(inst, &src_op, opts); - mov_rr(code, dst_op.base, opts->gen.scratch1, SZ_B); + if (src_op.mode == MODE_REG_DIRECT) { + mov_rr(code, src_op.base, opts->gen.scratch1, SZ_B); + } else if (src_op.mode == MODE_IMMED) { + mov_ir(code, src_op.disp, opts->gen.scratch1, SZ_B); + } else { + mov_rdispr(code, src_op.base, src_op.disp, opts->gen.scratch1, SZ_B); + } call(code, opts->write_io); z80_save_reg(inst, opts); break; @@ -2459,13 +2472,21 @@ void init_z80_opts(z80_options * options, memmap_chunk const * chunks, uint32_t push_r(code, options->gen.scratch1); call(code, options->read_8_noinc); mov_rr(code, options->gen.scratch1, options->gen.scratch2, SZ_B); +#ifndef X86_64 + //scratch 2 is a caller save register in 32-bit builds and may be clobbered by something called from the read8 fun + mov_rrdisp(code, options->gen.scratch1, options->gen.context_reg, offsetof(z80_context, scratch2), SZ_B); +#endif pop_r(code, options->gen.scratch1); add_ir(code, 1, options->gen.scratch1, SZ_W); cycles(&options->gen, 3); check_cycles(&options->gen); call(code, options->read_8_noinc); shl_ir(code, 8, options->gen.scratch1, SZ_W); +#ifdef X86_64 mov_rr(code, options->gen.scratch2, options->gen.scratch1, SZ_B); +#else + mov_rdispr(code, options->gen.context_reg, offsetof(z80_context, scratch2), options->gen.scratch1, SZ_B); +#endif retn(code); options->write_16_highfirst = code->cur; -- cgit v1.2.3 From 81fed3292d80883b76202cf9de913cea60f44627 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Tue, 26 May 2015 20:00:50 -0700 Subject: Fixes for the 32-bit build accidentally introduced a bug into the 64-bit build, this commit fixes the regression --- z80_to_x86.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index d1d26a9..1474da9 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1460,6 +1460,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, uint8_t bit; if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; + src_op.mode = MODE_REG_DIRECT; size = SZ_W; bit = inst->immed + 8; } else { @@ -1497,6 +1498,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, uint8_t bit; if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; + src_op.mode = MODE_REG_DIRECT; size = SZ_W; bit = inst->immed + 8; } else { @@ -1563,6 +1565,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, uint8_t bit; if ((inst->addr_mode & 0x1F) == Z80_REG && opts->regs[inst->ea_reg] >= AH && opts->regs[inst->ea_reg] <= BH) { src_op.base = opts->regs[z80_word_reg(inst->ea_reg)]; + src_op.mode = MODE_REG_DIRECT; size = SZ_W; bit = inst->immed + 8; } else { -- cgit v1.2.3 From 41427a5cbad6eae33ec644d38227fc7301d7a8c6 Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sat, 27 Jun 2015 12:17:18 -0700 Subject: Cleanup some warnings under clang through a combination of code fixes and supressing specific warnings --- z80_to_x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 1474da9..23b19b8 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -1951,7 +1951,7 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context) { if (!context->interp_code[opcode]) { - if (opcode == 0xCB || (opcode >= 0xDD && opcode & 0xF == 0xD)) { + if (opcode == 0xCB || (opcode >= 0xDD && (opcode & 0xF) == 0xD)) { fprintf(stderr, "Encountered prefix byte %X at address %X. Z80 interpeter doesn't support those yet.", opcode, context->pc); exit(1); } -- cgit v1.2.3 From 80ff833dd8ad011b579bff26ac654819e6735bce Mon Sep 17 00:00:00 2001 From: Michael Pavone Date: Sat, 25 Jul 2015 18:22:07 -0700 Subject: Use a new fatal_error function instead of calling fprintf and exit for fatal errors. This new function more gracefully handles the case in which BlastEm was not started from a terminal or disconnected from ther terminal (Windows). --- z80_to_x86.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'z80_to_x86.c') diff --git a/z80_to_x86.c b/z80_to_x86.c index 23b19b8..7205d16 100644 --- a/z80_to_x86.c +++ b/z80_to_x86.c @@ -7,6 +7,7 @@ #include "z80_to_x86.h" #include "gen_x86.h" #include "mem.h" +#include "util.h" #include #include #include @@ -256,8 +257,7 @@ void translate_z80_ea(z80inst * inst, host_ea * ea, z80_options * opts, uint8_t ea->mode = MODE_UNUSED; break; default: - fprintf(stderr, "Unrecognized Z80 addressing mode %d\n", inst->addr_mode & 0x1F); - exit(1); + fatal_error("Unrecognized Z80 addressing mode %d\n", inst->addr_mode & 0x1F); } } @@ -1939,11 +1939,10 @@ void translate_z80inst(z80inst * inst, z80_context * context, uint16_t address, default: { char disbuf[80]; z80_disasm(inst, disbuf, address); - fprintf(stderr, "unimplemented instruction: %s at %X\n", disbuf, address); FILE * f = fopen("zram.bin", "wb"); fwrite(context->mem_pointers[0], 1, 8 * 1024, f); fclose(f); - exit(1); + fatal_error("unimplemented Z80 instruction: %s at %X\nZ80 RAM has been saved to zram.bin for debugging", disbuf, address); } } } @@ -1952,8 +1951,7 @@ uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context) { if (!context->interp_code[opcode]) { if (opcode == 0xCB || (opcode >= 0xDD && (opcode & 0xF) == 0xD)) { - fprintf(stderr, "Encountered prefix byte %X at address %X. Z80 interpeter doesn't support those yet.", opcode, context->pc); - exit(1); + fatal_error("Encountered prefix byte %X at address %X. Z80 interpeter doesn't support those yet.", opcode, context->pc); } uint8_t codebuf[8]; memset(codebuf, 0, sizeof(codebuf)); @@ -1961,8 +1959,7 @@ uint8_t * z80_interp_handler(uint8_t opcode, z80_context * context) z80inst inst; uint8_t * after = z80_decode(codebuf, &inst); if (after - codebuf > 1) { - fprintf(stderr, "Encountered multi-byte Z80 instruction at %X. Z80 interpeter doesn't support those yet.", context->pc); - exit(1); + fatal_error("Encountered multi-byte Z80 instruction at %X. Z80 interpeter doesn't support those yet.", context->pc); } z80_options * opts = context->options; @@ -2243,7 +2240,7 @@ void translate_z80_stream(z80_context * context, uint32_t address) if (opts->gen.deferred) { address = opts->gen.deferred->address; dprintf("defferred address: %X\n", address); - } + } } while (opts->gen.deferred); } @@ -2749,6 +2746,6 @@ void zremove_breakpoint(z80_context * context, uint16_t address) opts->gen.code.last = native + 16; check_cycles_int(&opts->gen, address); opts->gen.code = tmp_code; -} + } } -- cgit v1.2.3