1 |
|
/* |
2 |
|
* sheepshaver_glue.cpp - Glue Kheperix CPU to SheepShaver CPU engine interface |
3 |
|
* |
4 |
< |
* SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig |
4 |
> |
* SheepShaver (C) 1997-2005 Christian Bauer and Marc Hellwig |
5 |
|
* |
6 |
|
* This program is free software; you can redistribute it and/or modify |
7 |
|
* it under the terms of the GNU General Public License as published by |
42 |
|
|
43 |
|
#include <stdio.h> |
44 |
|
#include <stdlib.h> |
45 |
+ |
#ifdef HAVE_MALLOC_H |
46 |
+ |
#include <malloc.h> |
47 |
+ |
#endif |
48 |
|
|
49 |
|
#ifdef USE_SDL_VIDEO |
50 |
|
#include <SDL_events.h> |
93 |
|
// PowerPC EmulOp to exit from emulation looop |
94 |
|
const uint32 POWERPC_EXEC_RETURN = POWERPC_EMUL_OP | 1; |
95 |
|
|
93 |
– |
// Enable interrupt routine safety checks? |
94 |
– |
#define SAFE_INTERRUPT_PPC 1 |
95 |
– |
|
96 |
|
// Enable Execute68k() safety checks? |
97 |
|
#define SAFE_EXEC_68K 1 |
98 |
|
|
105 |
|
// Interrupts in native mode? |
106 |
|
#define INTERRUPTS_IN_NATIVE_MODE 1 |
107 |
|
|
108 |
– |
// Enable native EMUL_OPs to be run without a mode switch |
109 |
– |
#define ENABLE_NATIVE_EMUL_OP 1 |
110 |
– |
|
108 |
|
// Pointer to Kernel Data |
109 |
< |
static KernelData * const kernel_data = (KernelData *)KERNEL_DATA_BASE; |
109 |
> |
static KernelData * kernel_data; |
110 |
|
|
111 |
|
// SIGSEGV handler |
112 |
|
sigsegv_return_t sigsegv_handler(sigsegv_address_t, sigsegv_address_t); |
139 |
|
void init_decoder(); |
140 |
|
void execute_sheep(uint32 opcode); |
141 |
|
|
145 |
– |
// Filter out EMUL_OP routines that only call native code |
146 |
– |
bool filter_execute_emul_op(uint32 emul_op); |
147 |
– |
|
148 |
– |
// "Native" EMUL_OP routines |
149 |
– |
void execute_emul_op_microseconds(); |
150 |
– |
void execute_emul_op_idle_time_1(); |
151 |
– |
void execute_emul_op_idle_time_2(); |
152 |
– |
|
153 |
– |
// CPU context to preserve on interrupt |
154 |
– |
class interrupt_context { |
155 |
– |
uint32 gpr[32]; |
156 |
– |
uint32 pc; |
157 |
– |
uint32 lr; |
158 |
– |
uint32 ctr; |
159 |
– |
uint32 cr; |
160 |
– |
uint32 xer; |
161 |
– |
sheepshaver_cpu *cpu; |
162 |
– |
const char *where; |
163 |
– |
public: |
164 |
– |
interrupt_context(sheepshaver_cpu *_cpu, const char *_where); |
165 |
– |
~interrupt_context(); |
166 |
– |
}; |
167 |
– |
|
142 |
|
public: |
143 |
|
|
144 |
|
// Constructor |
165 |
|
// Execute MacOS/PPC code |
166 |
|
uint32 execute_macos_code(uint32 tvect, int nargs, uint32 const *args); |
167 |
|
|
168 |
+ |
#if PPC_ENABLE_JIT |
169 |
|
// Compile one instruction |
170 |
|
virtual int compile1(codegen_context_t & cg_context); |
171 |
< |
|
171 |
> |
#endif |
172 |
|
// Resource manager thunk |
173 |
|
void get_resource(uint32 old_get_resource); |
174 |
|
|
175 |
|
// Handle MacOS interrupt |
176 |
|
void interrupt(uint32 entry); |
202 |
– |
void handle_interrupt(); |
177 |
|
|
178 |
|
// Make sure the SIGSEGV handler can access CPU registers |
179 |
|
friend sigsegv_return_t sigsegv_handler(sigsegv_address_t, sigsegv_address_t); |
180 |
|
}; |
181 |
|
|
208 |
– |
// Memory allocator returning areas aligned on 16-byte boundaries |
209 |
– |
void *operator new(size_t size) |
210 |
– |
{ |
211 |
– |
void *p; |
212 |
– |
|
213 |
– |
#if defined(HAVE_POSIX_MEMALIGN) |
214 |
– |
if (posix_memalign(&p, 16, size) != 0) |
215 |
– |
throw std::bad_alloc(); |
216 |
– |
#elif defined(HAVE_MEMALIGN) |
217 |
– |
p = memalign(16, size); |
218 |
– |
#elif defined(HAVE_VALLOC) |
219 |
– |
p = valloc(size); // page-aligned! |
220 |
– |
#else |
221 |
– |
/* XXX: handle padding ourselves */ |
222 |
– |
p = malloc(size); |
223 |
– |
#endif |
224 |
– |
|
225 |
– |
return p; |
226 |
– |
} |
227 |
– |
|
228 |
– |
void operator delete(void *p) |
229 |
– |
{ |
230 |
– |
#if defined(HAVE_MEMALIGN) || defined(HAVE_VALLOC) |
231 |
– |
#if defined(__GLIBC__) |
232 |
– |
// this is known to work only with GNU libc |
233 |
– |
free(p); |
234 |
– |
#endif |
235 |
– |
#else |
236 |
– |
free(p); |
237 |
– |
#endif |
238 |
– |
} |
239 |
– |
|
182 |
|
sheepshaver_cpu::sheepshaver_cpu() |
183 |
|
: powerpc_cpu(enable_jit_p()) |
184 |
|
{ |
216 |
|
typedef bit_field< 20, 25 > NATIVE_OP_field; |
217 |
|
typedef bit_field< 26, 31 > EMUL_OP_field; |
218 |
|
|
277 |
– |
// "Native" EMUL_OP routines |
278 |
– |
#define GPR_A(REG) gpr(16 + (REG)) |
279 |
– |
#define GPR_D(REG) gpr( 8 + (REG)) |
280 |
– |
|
281 |
– |
void sheepshaver_cpu::execute_emul_op_microseconds() |
282 |
– |
{ |
283 |
– |
Microseconds(GPR_A(0), GPR_D(0)); |
284 |
– |
} |
285 |
– |
|
286 |
– |
void sheepshaver_cpu::execute_emul_op_idle_time_1() |
287 |
– |
{ |
288 |
– |
// Sleep if no events pending |
289 |
– |
if (ReadMacInt32(0x14c) == 0) |
290 |
– |
Delay_usec(16667); |
291 |
– |
GPR_A(0) = ReadMacInt32(0x2b6); |
292 |
– |
} |
293 |
– |
|
294 |
– |
void sheepshaver_cpu::execute_emul_op_idle_time_2() |
295 |
– |
{ |
296 |
– |
// Sleep if no events pending |
297 |
– |
if (ReadMacInt32(0x14c) == 0) |
298 |
– |
Delay_usec(16667); |
299 |
– |
GPR_D(0) = (uint32)-2; |
300 |
– |
} |
301 |
– |
|
302 |
– |
// Filter out EMUL_OP routines that only call native code |
303 |
– |
bool sheepshaver_cpu::filter_execute_emul_op(uint32 emul_op) |
304 |
– |
{ |
305 |
– |
switch (emul_op) { |
306 |
– |
case OP_MICROSECONDS: |
307 |
– |
execute_emul_op_microseconds(); |
308 |
– |
return true; |
309 |
– |
case OP_IDLE_TIME: |
310 |
– |
execute_emul_op_idle_time_1(); |
311 |
– |
return true; |
312 |
– |
case OP_IDLE_TIME_2: |
313 |
– |
execute_emul_op_idle_time_2(); |
314 |
– |
return true; |
315 |
– |
} |
316 |
– |
return false; |
317 |
– |
} |
318 |
– |
|
219 |
|
// Execute EMUL_OP routine |
220 |
|
void sheepshaver_cpu::execute_emul_op(uint32 emul_op) |
221 |
|
{ |
322 |
– |
#if ENABLE_NATIVE_EMUL_OP |
323 |
– |
// First, filter out EMUL_OPs that can be executed without a mode switch |
324 |
– |
if (filter_execute_emul_op(emul_op)) |
325 |
– |
return; |
326 |
– |
#endif |
327 |
– |
|
222 |
|
M68kRegisters r68; |
223 |
|
WriteMacInt32(XLM_68K_R25, gpr(25)); |
224 |
|
WriteMacInt32(XLM_RUN_MODE, MODE_EMUL_OP); |
227 |
|
for (int i = 0; i < 7; i++) |
228 |
|
r68.a[i] = gpr(16 + i); |
229 |
|
r68.a[7] = gpr(1); |
230 |
< |
uint32 saved_cr = get_cr() & CR_field<2>::mask(); |
230 |
> |
uint32 saved_cr = get_cr() & 0xff9fffff; // mask_operand::compute(11, 8) |
231 |
|
uint32 saved_xer = get_xer(); |
232 |
|
EmulOp(&r68, gpr(24), emul_op); |
233 |
|
set_cr(saved_cr); |
271 |
|
} |
272 |
|
|
273 |
|
// Compile one instruction |
274 |
+ |
#if PPC_ENABLE_JIT |
275 |
|
int sheepshaver_cpu::compile1(codegen_context_t & cg_context) |
276 |
|
{ |
382 |
– |
#if PPC_ENABLE_JIT |
277 |
|
const instr_info_t *ii = cg_context.instr_info; |
278 |
|
if (ii->mnemo != PPC_I(SHEEP)) |
279 |
|
return COMPILE_FAILURE; |
400 |
|
|
401 |
|
default: { // EMUL_OP |
402 |
|
uint32 emul_op = EMUL_OP_field::extract(opcode) - 3; |
509 |
– |
#if ENABLE_NATIVE_EMUL_OP |
510 |
– |
typedef void (*emul_op_func_t)(dyngen_cpu_base); |
511 |
– |
emul_op_func_t emul_op_func = 0; |
512 |
– |
switch (emul_op) { |
513 |
– |
case OP_MICROSECONDS: |
514 |
– |
emul_op_func = (emul_op_func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op_microseconds).ptr(); |
515 |
– |
break; |
516 |
– |
case OP_IDLE_TIME: |
517 |
– |
emul_op_func = (emul_op_func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op_idle_time_1).ptr(); |
518 |
– |
break; |
519 |
– |
case OP_IDLE_TIME_2: |
520 |
– |
emul_op_func = (emul_op_func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op_idle_time_2).ptr(); |
521 |
– |
break; |
522 |
– |
} |
523 |
– |
if (emul_op_func) { |
524 |
– |
dg.gen_invoke_CPU(emul_op_func); |
525 |
– |
cg_context.done_compile = false; |
526 |
– |
status = COMPILE_CODE_OK; |
527 |
– |
break; |
528 |
– |
} |
529 |
– |
#endif |
403 |
|
#if PPC_REENTRANT_JIT |
404 |
|
// Try to execute EmulOp trampoline |
405 |
|
dg.gen_set_PC_im(cg_context.pc + 4); |
419 |
|
} |
420 |
|
} |
421 |
|
return status; |
549 |
– |
#endif |
550 |
– |
return COMPILE_FAILURE; |
422 |
|
} |
552 |
– |
|
553 |
– |
// CPU context to preserve on interrupt |
554 |
– |
sheepshaver_cpu::interrupt_context::interrupt_context(sheepshaver_cpu *_cpu, const char *_where) |
555 |
– |
{ |
556 |
– |
#if SAFE_INTERRUPT_PPC >= 2 |
557 |
– |
cpu = _cpu; |
558 |
– |
where = _where; |
559 |
– |
|
560 |
– |
// Save interrupt context |
561 |
– |
memcpy(&gpr[0], &cpu->gpr(0), sizeof(gpr)); |
562 |
– |
pc = cpu->pc(); |
563 |
– |
lr = cpu->lr(); |
564 |
– |
ctr = cpu->ctr(); |
565 |
– |
cr = cpu->get_cr(); |
566 |
– |
xer = cpu->get_xer(); |
423 |
|
#endif |
568 |
– |
} |
569 |
– |
|
570 |
– |
sheepshaver_cpu::interrupt_context::~interrupt_context() |
571 |
– |
{ |
572 |
– |
#if SAFE_INTERRUPT_PPC >= 2 |
573 |
– |
// Check whether CPU context was preserved by interrupt |
574 |
– |
if (memcmp(&gpr[0], &cpu->gpr(0), sizeof(gpr)) != 0) { |
575 |
– |
printf("FATAL: %s: interrupt clobbers registers\n", where); |
576 |
– |
for (int i = 0; i < 32; i++) |
577 |
– |
if (gpr[i] != cpu->gpr(i)) |
578 |
– |
printf(" r%d: %08x -> %08x\n", i, gpr[i], cpu->gpr(i)); |
579 |
– |
} |
580 |
– |
if (pc != cpu->pc()) |
581 |
– |
printf("FATAL: %s: interrupt clobbers PC\n", where); |
582 |
– |
if (lr != cpu->lr()) |
583 |
– |
printf("FATAL: %s: interrupt clobbers LR\n", where); |
584 |
– |
if (ctr != cpu->ctr()) |
585 |
– |
printf("FATAL: %s: interrupt clobbers CTR\n", where); |
586 |
– |
if (cr != cpu->get_cr()) |
587 |
– |
printf("FATAL: %s: interrupt clobbers CR\n", where); |
588 |
– |
if (xer != cpu->get_xer()) |
589 |
– |
printf("FATAL: %s: interrupt clobbers XER\n", where); |
590 |
– |
#endif |
591 |
– |
} |
424 |
|
|
425 |
|
// Handle MacOS interrupt |
426 |
|
void sheepshaver_cpu::interrupt(uint32 entry) |
430 |
|
const clock_t interrupt_start = clock(); |
431 |
|
#endif |
432 |
|
|
601 |
– |
#if SAFE_INTERRUPT_PPC |
602 |
– |
static int depth = 0; |
603 |
– |
if (depth != 0) |
604 |
– |
printf("FATAL: sheepshaver_cpu::interrupt() called more than once: %d\n", depth); |
605 |
– |
depth++; |
606 |
– |
#endif |
607 |
– |
|
433 |
|
// Save program counters and branch registers |
434 |
|
uint32 saved_pc = pc(); |
435 |
|
uint32 saved_lr = lr(); |
483 |
|
#if EMUL_TIME_STATS |
484 |
|
interrupt_time += (clock() - interrupt_start); |
485 |
|
#endif |
661 |
– |
|
662 |
– |
#if SAFE_INTERRUPT_PPC |
663 |
– |
depth--; |
664 |
– |
#endif |
486 |
|
} |
487 |
|
|
488 |
|
// Execute 68k routine |
713 |
|
const uintptr addr = (uintptr)fault_address; |
714 |
|
#if HAVE_SIGSEGV_SKIP_INSTRUCTION |
715 |
|
// Ignore writes to ROM |
716 |
< |
if ((addr - ROM_BASE) < ROM_SIZE) |
716 |
> |
if ((addr - (uintptr)ROMBaseHost) < ROM_SIZE) |
717 |
|
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
718 |
|
|
719 |
|
// Get program counter of target CPU |
760 |
|
#error "FIXME: You don't have the capability to skip instruction within signal handlers" |
761 |
|
#endif |
762 |
|
|
763 |
< |
printf("SIGSEGV\n"); |
764 |
< |
printf(" pc %p\n", fault_instruction); |
765 |
< |
printf(" ea %p\n", fault_address); |
763 |
> |
fprintf(stderr, "SIGSEGV\n"); |
764 |
> |
fprintf(stderr, " pc %p\n", fault_instruction); |
765 |
> |
fprintf(stderr, " ea %p\n", fault_address); |
766 |
|
dump_registers(); |
767 |
|
ppc_cpu->dump_log(); |
768 |
|
enter_mon(); |
773 |
|
|
774 |
|
void init_emul_ppc(void) |
775 |
|
{ |
776 |
+ |
// Get pointer to KernelData in host address space |
777 |
+ |
kernel_data = (KernelData *)Mac2HostAddr(KERNEL_DATA_BASE); |
778 |
+ |
|
779 |
|
// Initialize main CPU emulator |
780 |
|
ppc_cpu = new sheepshaver_cpu(); |
781 |
|
ppc_cpu->set_register(powerpc_registers::GPR(3), any_register((uint32)ROM_BASE + 0x30d000)); |
826 |
|
#endif |
827 |
|
|
828 |
|
delete ppc_cpu; |
829 |
+ |
ppc_cpu = NULL; |
830 |
|
} |
831 |
|
|
832 |
|
#if PPC_ENABLE_JIT && PPC_REENTRANT_JIT |
874 |
|
|
875 |
|
void TriggerInterrupt(void) |
876 |
|
{ |
877 |
+ |
idle_resume(); |
878 |
|
#if 0 |
879 |
|
WriteMacInt32(0x16a, ReadMacInt32(0x16a) + 1); |
880 |
|
#else |
884 |
|
#endif |
885 |
|
} |
886 |
|
|
887 |
< |
void sheepshaver_cpu::handle_interrupt(void) |
887 |
> |
void HandleInterrupt(powerpc_registers *r) |
888 |
|
{ |
889 |
|
#ifdef USE_SDL_VIDEO |
890 |
|
// We must fill in the events queue in the same thread that did call SDL_SetVideoMode() |
895 |
|
if (int32(ReadMacInt32(XLM_IRQ_NEST)) > 0) |
896 |
|
return; |
897 |
|
|
898 |
< |
// Current interrupt nest level |
1073 |
< |
static int interrupt_depth = 0; |
1074 |
< |
++interrupt_depth; |
898 |
> |
// Update interrupt count |
899 |
|
#if EMUL_TIME_STATS |
900 |
|
interrupt_count++; |
901 |
|
#endif |
902 |
|
|
1079 |
– |
// Disable MacOS stack sniffer |
1080 |
– |
WriteMacInt32(0x110, 0); |
1081 |
– |
|
903 |
|
// Interrupt action depends on current run mode |
904 |
|
switch (ReadMacInt32(XLM_RUN_MODE)) { |
905 |
|
case MODE_68K: |
906 |
|
// 68k emulator active, trigger 68k interrupt level 1 |
907 |
|
WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1); |
908 |
< |
set_cr(get_cr() | tswap32(kernel_data->v[0x674 >> 2])); |
908 |
> |
r->cr.set(r->cr.get() | tswap32(kernel_data->v[0x674 >> 2])); |
909 |
|
break; |
910 |
|
|
911 |
|
#if INTERRUPTS_IN_NATIVE_MODE |
912 |
|
case MODE_NATIVE: |
913 |
|
// 68k emulator inactive, in nanokernel? |
914 |
< |
if (gpr(1) != KernelDataAddr && interrupt_depth == 1) { |
1094 |
< |
interrupt_context ctx(this, "PowerPC mode"); |
914 |
> |
if (r->gpr[1] != KernelDataAddr) { |
915 |
|
|
916 |
|
// Prepare for 68k interrupt level 1 |
917 |
|
WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1); |
933 |
|
case MODE_EMUL_OP: |
934 |
|
// 68k emulator active, within EMUL_OP routine, execute 68k interrupt routine directly when interrupt level is 0 |
935 |
|
if ((ReadMacInt32(XLM_68K_R25) & 7) == 0) { |
1116 |
– |
interrupt_context ctx(this, "68k mode"); |
936 |
|
#if EMUL_TIME_STATS |
937 |
|
const clock_t interrupt_start = clock(); |
938 |
|
#endif |
941 |
|
M68kRegisters r; |
942 |
|
uint32 old_r25 = ReadMacInt32(XLM_68K_R25); // Save interrupt level |
943 |
|
WriteMacInt32(XLM_68K_R25, 0x21); // Execute with interrupt level 1 |
944 |
< |
static const uint8 proc[] = { |
944 |
> |
static const uint8 proc_template[] = { |
945 |
|
0x3f, 0x3c, 0x00, 0x00, // move.w #$0000,-(sp) (fake format word) |
946 |
|
0x48, 0x7a, 0x00, 0x0a, // pea @1(pc) (return address) |
947 |
|
0x40, 0xe7, // move sr,-(sp) (saved SR) |
949 |
|
0x4e, 0xd0, // jmp (a0) |
950 |
|
M68K_RTS >> 8, M68K_RTS & 0xff // @1 |
951 |
|
}; |
952 |
< |
Execute68k((uint32)proc, &r); |
952 |
> |
BUILD_SHEEPSHAVER_PROCEDURE(proc); |
953 |
> |
Execute68k(proc, &r); |
954 |
|
WriteMacInt32(XLM_68K_R25, old_r25); // Restore interrupt level |
955 |
|
#else |
956 |
|
// Only update cursor |
969 |
|
break; |
970 |
|
#endif |
971 |
|
} |
1152 |
– |
|
1153 |
– |
// We are done with this interrupt |
1154 |
– |
--interrupt_depth; |
972 |
|
} |
973 |
|
|
974 |
|
static void get_resource(void); |
996 |
|
VideoVBL(); |
997 |
|
break; |
998 |
|
case NATIVE_VIDEO_DO_DRIVER_IO: |
999 |
< |
gpr(3) = (int32)(int16)VideoDoDriverIO((void *)gpr(3), (void *)gpr(4), |
1000 |
< |
(void *)gpr(5), gpr(6), gpr(7)); |
999 |
> |
gpr(3) = (int32)(int16)VideoDoDriverIO(gpr(3), gpr(4), gpr(5), gpr(6), gpr(7)); |
1000 |
> |
break; |
1001 |
> |
case NATIVE_ETHER_AO_GET_HWADDR: |
1002 |
> |
AO_get_ethernet_address(gpr(3)); |
1003 |
> |
break; |
1004 |
> |
case NATIVE_ETHER_AO_ADD_MULTI: |
1005 |
> |
AO_enable_multicast(gpr(3)); |
1006 |
> |
break; |
1007 |
> |
case NATIVE_ETHER_AO_DEL_MULTI: |
1008 |
> |
AO_disable_multicast(gpr(3)); |
1009 |
> |
break; |
1010 |
> |
case NATIVE_ETHER_AO_SEND_PACKET: |
1011 |
> |
AO_transmit_packet(gpr(3)); |
1012 |
|
break; |
1185 |
– |
#ifdef WORDS_BIGENDIAN |
1013 |
|
case NATIVE_ETHER_IRQ: |
1014 |
|
EtherIRQ(); |
1015 |
|
break; |
1031 |
|
case NATIVE_ETHER_RSRV: |
1032 |
|
gpr(3) = ether_rsrv((queue_t *)gpr(3)); |
1033 |
|
break; |
1207 |
– |
#else |
1208 |
– |
case NATIVE_ETHER_INIT: |
1209 |
– |
// FIXME: needs more complicated thunks |
1210 |
– |
gpr(3) = false; |
1211 |
– |
break; |
1212 |
– |
#endif |
1034 |
|
case NATIVE_SYNC_HOOK: |
1035 |
|
gpr(3) = NQD_sync_hook(gpr(3)); |
1036 |
|
break; |
1086 |
|
break; |
1087 |
|
} |
1088 |
|
case NATIVE_MAKE_EXECUTABLE: |
1089 |
< |
MakeExecutable(0, (void *)gpr(4), gpr(5)); |
1089 |
> |
MakeExecutable(0, gpr(4), gpr(5)); |
1090 |
|
break; |
1091 |
|
case NATIVE_CHECK_LOAD_INVOC: |
1092 |
|
check_load_invoc(gpr(3), gpr(4), gpr(5)); |