1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
|
/*
Copyright 2013 Michael Pavone
This file is part of BlastEm.
BlastEm is free software distributed under the terms of the GNU General Public License version 3 or greater. See COPYING for full license text.
*/
#include "backend.h"
#include <stdlib.h>
deferred_addr * defer_address(deferred_addr * old_head, uint32_t address, uint8_t *dest)
{
deferred_addr * new_head = malloc(sizeof(deferred_addr));
new_head->next = old_head;
new_head->address = address & 0xFFFFFF;
new_head->dest = dest;
return new_head;
}
void remove_deferred_until(deferred_addr **head_ptr, deferred_addr * remove_to)
{
for(deferred_addr *cur = *head_ptr; cur && cur != remove_to; cur = *head_ptr)
{
*head_ptr = cur->next;
free(cur);
}
}
void process_deferred(deferred_addr ** head_ptr, void * context, native_addr_func get_native)
{
deferred_addr * cur = *head_ptr;
deferred_addr **last_next = head_ptr;
while(cur)
{
code_ptr native = get_native(context, cur->address);//get_native_address(opts->native_code_map, cur->address);
if (native) {
int32_t disp = native - (cur->dest + 4);
code_ptr out = cur->dest;
*(out++) = disp;
disp >>= 8;
*(out++) = disp;
disp >>= 8;
*(out++) = disp;
disp >>= 8;
*out = disp;
*last_next = cur->next;
free(cur);
cur = *last_next;
} else {
last_next = &(cur->next);
cur = cur->next;
}
}
}
memmap_chunk const *find_map_chunk(uint32_t address, cpu_options *opts, uint16_t flags, uint32_t *size_sum)
{
if (size_sum) {
*size_sum = 0;
}
address &= opts->address_mask;
for (memmap_chunk const *cur = opts->memmap, *end = opts->memmap + opts->memmap_chunks; cur != end; cur++)
{
if (address >= cur->start && address < cur->end) {
return cur;
} else if (size_sum && (cur->flags & flags) == flags) {
*size_sum += chunk_size(opts, cur);
}
}
return NULL;
}
void * get_native_pointer(uint32_t address, void ** mem_pointers, cpu_options * opts)
{
memmap_chunk const * memmap = opts->memmap;
address &= opts->address_mask;
for (uint32_t chunk = 0; chunk < opts->memmap_chunks; chunk++)
{
if (address >= memmap[chunk].start && address < memmap[chunk].end) {
if (!(memmap[chunk].flags & MMAP_READ)) {
return NULL;
}
uint8_t * base = memmap[chunk].flags & MMAP_PTR_IDX
? mem_pointers[memmap[chunk].ptr_index]
: memmap[chunk].buffer;
if (!base) {
if (memmap[chunk].flags & MMAP_AUX_BUFF) {
return memmap[chunk].buffer + (address & memmap[chunk].aux_mask);
}
return NULL;
}
return base + (address & memmap[chunk].mask);
}
}
return NULL;
}
uint32_t chunk_size(cpu_options *opts, memmap_chunk const *chunk)
{
if (chunk->mask == opts->address_mask) {
return chunk->end - chunk->start;
} else {
return chunk->mask + 1;
}
}
uint32_t ram_size(cpu_options *opts)
{
uint32_t size = 0;
for (int i = 0; i < opts->memmap_chunks; i++)
{
if (opts->memmap[i].flags & MMAP_CODE) {
if (opts->memmap[i].mask == opts->address_mask) {
size += opts->memmap[i].end - opts->memmap[i].start;
} else {
size += opts->memmap[i].mask + 1;
}
}
}
return size;
}
|