44 |
|
#include "mon_disass.h" |
45 |
|
#endif |
46 |
|
|
47 |
< |
#define DEBUG 1 |
47 |
> |
#define DEBUG 0 |
48 |
|
#include "debug.h" |
49 |
|
|
50 |
|
static void enter_mon(void) |
57 |
|
} |
58 |
|
|
59 |
|
// Enable multicore (main/interrupts) cpu emulation? |
60 |
< |
#define MULTICORE_CPU 0 |
60 |
> |
#define MULTICORE_CPU (ASYNC_IRQ ? 1 : 0) |
61 |
|
|
62 |
|
// Enable Execute68k() safety checks? |
63 |
|
#define SAFE_EXEC_68K 1 |
89 |
|
|
90 |
|
public: |
91 |
|
|
92 |
< |
sheepshaver_cpu() |
93 |
< |
: powerpc_cpu() |
94 |
< |
{ init_decoder(); } |
92 |
> |
// Constructor |
93 |
> |
sheepshaver_cpu(); |
94 |
|
|
95 |
|
// Condition Register accessors |
96 |
|
uint32 get_cr() const { return cr().get(); } |
97 |
|
void set_cr(uint32 v) { cr().set(v); } |
98 |
|
|
99 |
|
// Execution loop |
100 |
< |
void execute(uint32 pc); |
100 |
> |
void execute(uint32 entry, bool enable_cache = false); |
101 |
|
|
102 |
|
// Execute 68k routine |
103 |
|
void execute_68k(uint32 entry, M68kRegisters *r); |
113 |
|
|
114 |
|
// Handle MacOS interrupt |
115 |
|
void interrupt(uint32 entry); |
116 |
+ |
void handle_interrupt(); |
117 |
|
|
118 |
|
// spcflags for interrupts handling |
119 |
|
static uint32 spcflags; |
131 |
|
uint32 sheepshaver_cpu::spcflags = 0; |
132 |
|
lazy_allocator< sheepshaver_cpu > allocator_helper< sheepshaver_cpu, lazy_allocator >::allocator; |
133 |
|
|
134 |
+ |
sheepshaver_cpu::sheepshaver_cpu() |
135 |
+ |
: powerpc_cpu() |
136 |
+ |
{ |
137 |
+ |
init_decoder(); |
138 |
+ |
} |
139 |
+ |
|
140 |
|
void sheepshaver_cpu::init_decoder() |
141 |
|
{ |
142 |
|
#ifndef PPC_NO_STATIC_II_INDEX_TABLE |
222 |
|
} |
223 |
|
} |
224 |
|
|
219 |
– |
// Checks for pending interrupts |
220 |
– |
struct execute_nothing { |
221 |
– |
static inline void execute(powerpc_cpu *) { } |
222 |
– |
}; |
223 |
– |
|
224 |
– |
struct execute_spcflags_check { |
225 |
– |
static inline void execute(powerpc_cpu *cpu) { |
226 |
– |
#if !ASYNC_IRQ |
227 |
– |
if (SPCFLAGS_TEST(SPCFLAG_ALL_BUT_EXEC_RETURN)) { |
228 |
– |
if (SPCFLAGS_TEST( SPCFLAG_ENTER_MON )) { |
229 |
– |
SPCFLAGS_CLEAR( SPCFLAG_ENTER_MON ); |
230 |
– |
enter_mon(); |
231 |
– |
} |
232 |
– |
if (SPCFLAGS_TEST( SPCFLAG_DOINT )) { |
233 |
– |
SPCFLAGS_CLEAR( SPCFLAG_DOINT ); |
234 |
– |
HandleInterrupt(); |
235 |
– |
} |
236 |
– |
if (SPCFLAGS_TEST( SPCFLAG_INT )) { |
237 |
– |
SPCFLAGS_CLEAR( SPCFLAG_INT ); |
238 |
– |
SPCFLAGS_SET( SPCFLAG_DOINT ); |
239 |
– |
} |
240 |
– |
} |
241 |
– |
#endif |
242 |
– |
} |
243 |
– |
}; |
244 |
– |
|
225 |
|
// Execution loop |
226 |
< |
void sheepshaver_cpu::execute(uint32 entry) |
226 |
> |
void sheepshaver_cpu::execute(uint32 entry, bool enable_cache) |
227 |
|
{ |
228 |
|
try { |
229 |
< |
pc() = entry; |
250 |
< |
powerpc_cpu::do_execute<execute_nothing, execute_spcflags_check>(); |
229 |
> |
powerpc_cpu::execute(entry, enable_cache); |
230 |
|
} |
231 |
|
catch (sheepshaver_exec_return const &) { |
232 |
|
// Nothing, simply return |
575 |
|
void emul_ppc(uint32 entry) |
576 |
|
{ |
577 |
|
current_cpu = main_cpu; |
578 |
+ |
#if DEBUG |
579 |
|
current_cpu->start_log(); |
580 |
< |
current_cpu->execute(entry); |
580 |
> |
#endif |
581 |
> |
// start emulation loop and enable code translation or caching |
582 |
> |
current_cpu->execute(entry, true); |
583 |
|
} |
584 |
|
|
585 |
|
/* |
586 |
|
* Handle PowerPC interrupt |
587 |
|
*/ |
588 |
|
|
607 |
– |
// Atomic operations |
608 |
– |
extern int atomic_add(int *var, int v); |
609 |
– |
extern int atomic_and(int *var, int v); |
610 |
– |
extern int atomic_or(int *var, int v); |
611 |
– |
|
589 |
|
#if !ASYNC_IRQ |
590 |
|
void TriggerInterrupt(void) |
591 |
|
{ |
592 |
|
#if 0 |
593 |
|
WriteMacInt32(0x16a, ReadMacInt32(0x16a) + 1); |
594 |
|
#else |
595 |
< |
SPCFLAGS_SET( SPCFLAG_INT ); |
595 |
> |
// Trigger interrupt to main cpu only |
596 |
> |
if (main_cpu) |
597 |
> |
main_cpu->trigger_interrupt(); |
598 |
|
#endif |
599 |
|
} |
600 |
|
#endif |
601 |
|
|
602 |
< |
void HandleInterrupt(void) |
602 |
> |
void sheepshaver_cpu::handle_interrupt(void) |
603 |
|
{ |
604 |
|
// Do nothing if interrupts are disabled |
605 |
|
if (int32(ReadMacInt32(XLM_IRQ_NEST)) > 0) |
618 |
|
// 68k emulator active, trigger 68k interrupt level 1 |
619 |
|
assert(current_cpu == main_cpu); |
620 |
|
WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1); |
621 |
< |
main_cpu->set_cr(main_cpu->get_cr() | tswap32(kernel_data->v[0x674 >> 2])); |
621 |
> |
set_cr(get_cr() | tswap32(kernel_data->v[0x674 >> 2])); |
622 |
|
break; |
623 |
|
|
624 |
|
#if INTERRUPTS_IN_NATIVE_MODE |
625 |
|
case MODE_NATIVE: |
626 |
|
// 68k emulator inactive, in nanokernel? |
627 |
|
assert(current_cpu == main_cpu); |
628 |
< |
if (main_cpu->gpr(1) != KernelDataAddr) { |
628 |
> |
if (gpr(1) != KernelDataAddr) { |
629 |
|
// Prepare for 68k interrupt level 1 |
630 |
|
WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1); |
631 |
|
WriteMacInt32(tswap32(kernel_data->v[0x658 >> 2]) + 0xdc, |
881 |
|
} |
882 |
|
|
883 |
|
/* |
905 |
– |
* Atomic operations |
906 |
– |
*/ |
907 |
– |
|
908 |
– |
int atomic_add(int *var, int v) |
909 |
– |
{ |
910 |
– |
int ret = *var; |
911 |
– |
*var += v; |
912 |
– |
return ret; |
913 |
– |
} |
914 |
– |
|
915 |
– |
int atomic_and(int *var, int v) |
916 |
– |
{ |
917 |
– |
int ret = *var; |
918 |
– |
*var &= v; |
919 |
– |
return ret; |
920 |
– |
} |
921 |
– |
|
922 |
– |
int atomic_or(int *var, int v) |
923 |
– |
{ |
924 |
– |
int ret = *var; |
925 |
– |
*var |= v; |
926 |
– |
return ret; |
927 |
– |
} |
928 |
– |
|
929 |
– |
/* |
884 |
|
* Resource Manager thunks |
885 |
|
*/ |
886 |
|
|