diff options
author | Michael Pavone <pavone@retrodev.com> | 2016-12-14 23:26:12 -0800 |
---|---|---|
committer | Michael Pavone <pavone@retrodev.com> | 2016-12-14 23:26:12 -0800 |
commit | 792a18ecdd66ae00defa7d295734898df7fed728 (patch) | |
tree | 4642f70614c350e588effeaaf71be44425f5f91e /backend_x86.c | |
parent | 89c0bad42f218afd9e4e7719dd3da65f1fe3ff55 (diff) |
Fix a subtle bug in interrupt handling introduced with the move to a single cycle register in the Z80 core. Fixes regression in Puyo Puyo 2
Diffstat (limited to 'backend_x86.c')
-rw-r--r-- | backend_x86.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/backend_x86.c b/backend_x86.c index fcf01cd..9f09466 100644 --- a/backend_x86.c +++ b/backend_x86.c @@ -13,13 +13,16 @@ void cycles(cpu_options *opts, uint32_t num) void check_cycles_int(cpu_options *opts, uint32_t address) { code_info *code = &opts->code; + uint8_t cc; if (opts->limit < 0) { - or_rr(code, opts->cycles, opts->cycles, SZ_D); + cmp_ir(code, 1, opts->cycles, SZ_D); + cc = CC_NS; } else { cmp_rr(code, opts->cycles, opts->limit, SZ_D); + cc = CC_A; } code_ptr jmp_off = code->cur+1; - jcc(code, CC_NS, jmp_off+1); + jcc(code, cc, jmp_off+1); mov_ir(code, address, opts->scratch1, SZ_D); call(code, opts->handle_cycle_limit_int); *jmp_off = code->cur - (jmp_off+1); @@ -28,14 +31,17 @@ void check_cycles_int(cpu_options *opts, uint32_t address) void check_cycles(cpu_options * opts) { code_info *code = &opts->code; + uint8_t cc; if (opts->limit < 0) { - or_rr(code, opts->cycles, opts->cycles, SZ_D); + cmp_ir(code, 1, opts->cycles, SZ_D); + cc = CC_NS; } else { cmp_rr(code, opts->cycles, opts->limit, SZ_D); + cc = CC_A; } check_alloc_code(code, MAX_INST_LEN*2); code_ptr jmp_off = code->cur+1; - jcc(code, CC_NS, jmp_off+1); + jcc(code, cc, jmp_off+1); call(code, opts->handle_cycle_limit); *jmp_off = code->cur - (jmp_off+1); } |