44 |
|
#include "mon_disass.h" |
45 |
|
#endif |
46 |
|
|
47 |
< |
#define DEBUG 1 |
47 |
> |
#define DEBUG 0 |
48 |
|
#include "debug.h" |
49 |
|
|
50 |
|
static void enter_mon(void) |
57 |
|
} |
58 |
|
|
59 |
|
// Enable multicore (main/interrupts) cpu emulation? |
60 |
< |
#define MULTICORE_CPU 0 |
60 |
> |
#define MULTICORE_CPU (ASYNC_IRQ ? 1 : 0) |
61 |
|
|
62 |
|
// Enable Execute68k() safety checks? |
63 |
|
#define SAFE_EXEC_68K 1 |
89 |
|
|
90 |
|
public: |
91 |
|
|
92 |
< |
sheepshaver_cpu() |
93 |
< |
: powerpc_cpu() |
94 |
< |
{ init_decoder(); } |
92 |
> |
// Constructor |
93 |
> |
sheepshaver_cpu(); |
94 |
|
|
95 |
|
// Condition Register accessors |
96 |
|
uint32 get_cr() const { return cr().get(); } |
97 |
|
void set_cr(uint32 v) { cr().set(v); } |
98 |
|
|
99 |
|
// Execution loop |
100 |
< |
void execute(uint32 pc); |
100 |
> |
void execute(uint32 entry, bool enable_cache = false); |
101 |
|
|
102 |
|
// Execute 68k routine |
103 |
|
void execute_68k(uint32 entry, M68kRegisters *r); |
113 |
|
|
114 |
|
// Handle MacOS interrupt |
115 |
|
void interrupt(uint32 entry); |
116 |
+ |
void handle_interrupt(); |
117 |
|
|
118 |
|
// spcflags for interrupts handling |
119 |
|
static uint32 spcflags; |
131 |
|
uint32 sheepshaver_cpu::spcflags = 0; |
132 |
|
lazy_allocator< sheepshaver_cpu > allocator_helper< sheepshaver_cpu, lazy_allocator >::allocator; |
133 |
|
|
134 |
+ |
sheepshaver_cpu::sheepshaver_cpu() |
135 |
+ |
: powerpc_cpu() |
136 |
+ |
{ |
137 |
+ |
init_decoder(); |
138 |
+ |
} |
139 |
+ |
|
140 |
|
void sheepshaver_cpu::init_decoder() |
141 |
|
{ |
142 |
|
#ifndef PPC_NO_STATIC_II_INDEX_TABLE |
187 |
|
case 0: // EMUL_RETURN |
188 |
|
QuitEmulator(); |
189 |
|
break; |
190 |
< |
|
190 |
> |
|
191 |
|
case 1: // EXEC_RETURN |
192 |
|
throw sheepshaver_exec_return(); |
193 |
|
break; |
222 |
|
} |
223 |
|
} |
224 |
|
|
219 |
– |
// Checks for pending interrupts |
220 |
– |
struct execute_nothing { |
221 |
– |
static inline void execute(powerpc_cpu *) { } |
222 |
– |
}; |
223 |
– |
|
224 |
– |
struct execute_spcflags_check { |
225 |
– |
static inline void execute(powerpc_cpu *cpu) { |
226 |
– |
#if !ASYNC_IRQ |
227 |
– |
if (SPCFLAGS_TEST(SPCFLAG_ALL_BUT_EXEC_RETURN)) { |
228 |
– |
if (SPCFLAGS_TEST( SPCFLAG_ENTER_MON )) { |
229 |
– |
SPCFLAGS_CLEAR( SPCFLAG_ENTER_MON ); |
230 |
– |
enter_mon(); |
231 |
– |
} |
232 |
– |
if (SPCFLAGS_TEST( SPCFLAG_DOINT )) { |
233 |
– |
SPCFLAGS_CLEAR( SPCFLAG_DOINT ); |
234 |
– |
HandleInterrupt(); |
235 |
– |
} |
236 |
– |
if (SPCFLAGS_TEST( SPCFLAG_INT )) { |
237 |
– |
SPCFLAGS_CLEAR( SPCFLAG_INT ); |
238 |
– |
SPCFLAGS_SET( SPCFLAG_DOINT ); |
239 |
– |
} |
240 |
– |
} |
241 |
– |
#endif |
242 |
– |
} |
243 |
– |
}; |
244 |
– |
|
225 |
|
// Execution loop |
226 |
< |
void sheepshaver_cpu::execute(uint32 entry) |
226 |
> |
void sheepshaver_cpu::execute(uint32 entry, bool enable_cache) |
227 |
|
{ |
228 |
|
try { |
229 |
< |
pc() = entry; |
250 |
< |
powerpc_cpu::do_execute<execute_nothing, execute_spcflags_check>(); |
229 |
> |
powerpc_cpu::execute(entry, enable_cache); |
230 |
|
} |
231 |
|
catch (sheepshaver_exec_return const &) { |
232 |
|
// Nothing, simply return |
273 |
|
gpr(8) = 0; |
274 |
|
gpr(10) = (uint32)trampoline; |
275 |
|
gpr(12) = (uint32)trampoline; |
276 |
< |
gpr(13) = cr().get(); |
276 |
> |
gpr(13) = get_cr(); |
277 |
|
|
278 |
|
// rlwimi. r7,r7,8,0,0 |
279 |
|
uint32 result = op_ppc_rlwimi::apply(gpr(7), 8, 0x80000000, gpr(7)); |
281 |
|
gpr(7) = result; |
282 |
|
|
283 |
|
gpr(11) = 0xf072; // MSR (SRR1) |
284 |
< |
cr().set((gpr(11) & 0x0fff0000) | (cr().get() & ~0x0fff0000)); |
284 |
> |
cr().set((gpr(11) & 0x0fff0000) | (get_cr() & ~0x0fff0000)); |
285 |
|
|
286 |
|
// Enter nanokernel |
287 |
|
execute(entry); |
307 |
|
uint32 saved_pc = pc(); |
308 |
|
uint32 saved_lr = lr(); |
309 |
|
uint32 saved_ctr= ctr(); |
310 |
+ |
uint32 saved_cr = get_cr(); |
311 |
|
|
312 |
|
// Create MacOS stack frame |
313 |
|
// FIXME: make sure MacOS doesn't expect PPC registers to live on top |
379 |
|
pc() = saved_pc; |
380 |
|
lr() = saved_lr; |
381 |
|
ctr()= saved_ctr; |
382 |
+ |
set_cr(saved_cr); |
383 |
|
} |
384 |
|
|
385 |
|
// Call MacOS PPC code |
575 |
|
void emul_ppc(uint32 entry) |
576 |
|
{ |
577 |
|
current_cpu = main_cpu; |
578 |
+ |
#if DEBUG |
579 |
|
current_cpu->start_log(); |
580 |
< |
current_cpu->execute(entry); |
580 |
> |
#endif |
581 |
> |
// start emulation loop and enable code translation or caching |
582 |
> |
current_cpu->execute(entry, true); |
583 |
|
} |
584 |
|
|
585 |
|
/* |
586 |
|
* Handle PowerPC interrupt |
587 |
|
*/ |
588 |
|
|
589 |
< |
// Atomic operations |
590 |
< |
extern int atomic_add(int *var, int v); |
591 |
< |
extern int atomic_and(int *var, int v); |
592 |
< |
extern int atomic_or(int *var, int v); |
593 |
< |
|
594 |
< |
#if !ASYNC_IRQ |
589 |
> |
#if ASYNC_IRQ |
590 |
> |
void HandleInterrupt(void) |
591 |
> |
{ |
592 |
> |
main_cpu->handle_interrupt(); |
593 |
> |
} |
594 |
> |
#else |
595 |
|
void TriggerInterrupt(void) |
596 |
|
{ |
597 |
|
#if 0 |
598 |
|
WriteMacInt32(0x16a, ReadMacInt32(0x16a) + 1); |
599 |
|
#else |
600 |
< |
SPCFLAGS_SET( SPCFLAG_INT ); |
600 |
> |
// Trigger interrupt to main cpu only |
601 |
> |
if (main_cpu) |
602 |
> |
main_cpu->trigger_interrupt(); |
603 |
|
#endif |
604 |
|
} |
605 |
|
#endif |
606 |
|
|
607 |
< |
void HandleInterrupt(void) |
607 |
> |
void sheepshaver_cpu::handle_interrupt(void) |
608 |
|
{ |
609 |
|
// Do nothing if interrupts are disabled |
610 |
|
if (int32(ReadMacInt32(XLM_IRQ_NEST)) > 0) |
623 |
|
// 68k emulator active, trigger 68k interrupt level 1 |
624 |
|
assert(current_cpu == main_cpu); |
625 |
|
WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1); |
626 |
< |
main_cpu->set_cr(main_cpu->get_cr() | tswap32(kernel_data->v[0x674 >> 2])); |
626 |
> |
set_cr(get_cr() | tswap32(kernel_data->v[0x674 >> 2])); |
627 |
|
break; |
628 |
|
|
629 |
|
#if INTERRUPTS_IN_NATIVE_MODE |
630 |
|
case MODE_NATIVE: |
631 |
|
// 68k emulator inactive, in nanokernel? |
632 |
|
assert(current_cpu == main_cpu); |
633 |
< |
if (main_cpu->gpr(1) != KernelDataAddr) { |
633 |
> |
if (gpr(1) != KernelDataAddr) { |
634 |
|
// Prepare for 68k interrupt level 1 |
635 |
|
WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1); |
636 |
|
WriteMacInt32(tswap32(kernel_data->v[0x658 >> 2]) + 0xdc, |
886 |
|
} |
887 |
|
|
888 |
|
/* |
903 |
– |
* Atomic operations |
904 |
– |
*/ |
905 |
– |
|
906 |
– |
int atomic_add(int *var, int v) |
907 |
– |
{ |
908 |
– |
int ret = *var; |
909 |
– |
*var += v; |
910 |
– |
return ret; |
911 |
– |
} |
912 |
– |
|
913 |
– |
int atomic_and(int *var, int v) |
914 |
– |
{ |
915 |
– |
int ret = *var; |
916 |
– |
*var &= v; |
917 |
– |
return ret; |
918 |
– |
} |
919 |
– |
|
920 |
– |
int atomic_or(int *var, int v) |
921 |
– |
{ |
922 |
– |
int ret = *var; |
923 |
– |
*var |= v; |
924 |
– |
return ret; |
925 |
– |
} |
926 |
– |
|
927 |
– |
/* |
889 |
|
* Resource Manager thunks |
890 |
|
*/ |
891 |
|
|