1 |
|
/* |
2 |
|
* sheepshaver_glue.cpp - Glue Kheperix CPU to SheepShaver CPU engine interface |
3 |
|
* |
4 |
< |
* SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig |
4 |
> |
* SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig |
5 |
|
* |
6 |
|
* This program is free software; you can redistribute it and/or modify |
7 |
|
* it under the terms of the GNU General Public License as published by |
31 |
|
#include "cpu/ppc/ppc-cpu.hpp" |
32 |
|
#include "cpu/ppc/ppc-operations.hpp" |
33 |
|
#include "cpu/ppc/ppc-instructions.hpp" |
34 |
+ |
#include "thunks.h" |
35 |
|
|
36 |
|
// Used for NativeOp trampolines |
37 |
|
#include "video.h" |
38 |
|
#include "name_registry.h" |
39 |
|
#include "serial.h" |
40 |
|
#include "ether.h" |
41 |
+ |
#include "timer.h" |
42 |
|
|
43 |
|
#include <stdio.h> |
44 |
+ |
#include <stdlib.h> |
45 |
+ |
|
46 |
+ |
#ifdef USE_SDL_VIDEO |
47 |
+ |
#include <SDL_events.h> |
48 |
+ |
#endif |
49 |
|
|
50 |
|
#if ENABLE_MON |
51 |
|
#include "mon.h" |
56 |
|
#include "debug.h" |
57 |
|
|
58 |
|
// Emulation time statistics |
59 |
< |
#define EMUL_TIME_STATS 1 |
59 |
> |
#ifndef EMUL_TIME_STATS |
60 |
> |
#define EMUL_TIME_STATS 0 |
61 |
> |
#endif |
62 |
|
|
63 |
|
#if EMUL_TIME_STATS |
64 |
|
static clock_t emul_start_time; |
65 |
< |
static uint32 interrupt_count = 0; |
65 |
> |
static uint32 interrupt_count = 0, ppc_interrupt_count = 0; |
66 |
|
static clock_t interrupt_time = 0; |
67 |
|
static uint32 exec68k_count = 0; |
68 |
|
static clock_t exec68k_time = 0; |
81 |
|
#endif |
82 |
|
} |
83 |
|
|
84 |
< |
// Enable multicore (main/interrupts) cpu emulation? |
85 |
< |
#define MULTICORE_CPU (ASYNC_IRQ ? 1 : 0) |
84 |
> |
// From main_*.cpp |
85 |
> |
extern uintptr SignalStackBase(); |
86 |
> |
|
87 |
> |
// From rsrc_patches.cpp |
88 |
> |
extern "C" void check_load_invoc(uint32 type, int16 id, uint32 h); |
89 |
> |
|
90 |
> |
// PowerPC EmulOp to exit from emulation looop |
91 |
> |
const uint32 POWERPC_EXEC_RETURN = POWERPC_EMUL_OP | 1; |
92 |
> |
|
93 |
> |
// Enable interrupt routine safety checks? |
94 |
> |
#define SAFE_INTERRUPT_PPC 1 |
95 |
|
|
96 |
|
// Enable Execute68k() safety checks? |
97 |
|
#define SAFE_EXEC_68K 1 |
105 |
|
// Interrupts in native mode? |
106 |
|
#define INTERRUPTS_IN_NATIVE_MODE 1 |
107 |
|
|
108 |
+ |
// Enable native EMUL_OPs to be run without a mode switch |
109 |
+ |
#define ENABLE_NATIVE_EMUL_OP 1 |
110 |
+ |
|
111 |
|
// Pointer to Kernel Data |
112 |
|
static KernelData * const kernel_data = (KernelData *)KERNEL_DATA_BASE; |
113 |
|
|
114 |
|
// SIGSEGV handler |
115 |
< |
static sigsegv_return_t sigsegv_handler(sigsegv_address_t, sigsegv_address_t); |
115 |
> |
sigsegv_return_t sigsegv_handler(sigsegv_address_t, sigsegv_address_t); |
116 |
> |
|
117 |
> |
#if PPC_ENABLE_JIT && PPC_REENTRANT_JIT |
118 |
> |
// Special trampolines for EmulOp and NativeOp |
119 |
> |
static uint8 *emul_op_trampoline; |
120 |
> |
static uint8 *native_op_trampoline; |
121 |
> |
#endif |
122 |
|
|
123 |
|
// JIT Compiler enabled? |
124 |
|
static inline bool enable_jit_p() |
142 |
|
void init_decoder(); |
143 |
|
void execute_sheep(uint32 opcode); |
144 |
|
|
145 |
+ |
// Filter out EMUL_OP routines that only call native code |
146 |
+ |
bool filter_execute_emul_op(uint32 emul_op); |
147 |
+ |
|
148 |
+ |
// "Native" EMUL_OP routines |
149 |
+ |
void execute_emul_op_microseconds(); |
150 |
+ |
void execute_emul_op_idle_time_1(); |
151 |
+ |
void execute_emul_op_idle_time_2(); |
152 |
+ |
|
153 |
+ |
// CPU context to preserve on interrupt |
154 |
+ |
class interrupt_context { |
155 |
+ |
uint32 gpr[32]; |
156 |
+ |
uint32 pc; |
157 |
+ |
uint32 lr; |
158 |
+ |
uint32 ctr; |
159 |
+ |
uint32 cr; |
160 |
+ |
uint32 xer; |
161 |
+ |
sheepshaver_cpu *cpu; |
162 |
+ |
const char *where; |
163 |
+ |
public: |
164 |
+ |
interrupt_context(sheepshaver_cpu *_cpu, const char *_where); |
165 |
+ |
~interrupt_context(); |
166 |
+ |
}; |
167 |
+ |
|
168 |
|
public: |
169 |
|
|
170 |
|
// Constructor |
171 |
|
sheepshaver_cpu(); |
172 |
|
|
173 |
< |
// Condition Register accessors |
173 |
> |
// CR & XER accessors |
174 |
|
uint32 get_cr() const { return cr().get(); } |
175 |
|
void set_cr(uint32 v) { cr().set(v); } |
176 |
+ |
uint32 get_xer() const { return xer().get(); } |
177 |
+ |
void set_xer(uint32 v) { xer().set(v); } |
178 |
+ |
|
179 |
+ |
// Execute NATIVE_OP routine |
180 |
+ |
void execute_native_op(uint32 native_op); |
181 |
+ |
|
182 |
+ |
// Execute EMUL_OP routine |
183 |
+ |
void execute_emul_op(uint32 emul_op); |
184 |
|
|
185 |
|
// Execute 68k routine |
186 |
|
void execute_68k(uint32 entry, M68kRegisters *r); |
191 |
|
// Execute MacOS/PPC code |
192 |
|
uint32 execute_macos_code(uint32 tvect, int nargs, uint32 const *args); |
193 |
|
|
194 |
+ |
// Compile one instruction |
195 |
+ |
virtual int compile1(codegen_context_t & cg_context); |
196 |
+ |
|
197 |
|
// Resource manager thunk |
198 |
|
void get_resource(uint32 old_get_resource); |
199 |
|
|
201 |
|
void interrupt(uint32 entry); |
202 |
|
void handle_interrupt(); |
203 |
|
|
143 |
– |
// Lazy memory allocator (one item at a time) |
144 |
– |
void *operator new(size_t size) |
145 |
– |
{ return allocator_helper< sheepshaver_cpu, lazy_allocator >::allocate(); } |
146 |
– |
void operator delete(void *p) |
147 |
– |
{ allocator_helper< sheepshaver_cpu, lazy_allocator >::deallocate(p); } |
148 |
– |
// FIXME: really make surre array allocation fail at link time? |
149 |
– |
void *operator new[](size_t); |
150 |
– |
void operator delete[](void *p); |
151 |
– |
|
204 |
|
// Make sure the SIGSEGV handler can access CPU registers |
205 |
|
friend sigsegv_return_t sigsegv_handler(sigsegv_address_t, sigsegv_address_t); |
206 |
|
}; |
207 |
|
|
208 |
< |
lazy_allocator< sheepshaver_cpu > allocator_helper< sheepshaver_cpu, lazy_allocator >::allocator; |
208 |
> |
// Memory allocator returning areas aligned on 16-byte boundaries |
209 |
> |
void *operator new(size_t size) |
210 |
> |
{ |
211 |
> |
void *p; |
212 |
> |
|
213 |
> |
#if defined(HAVE_POSIX_MEMALIGN) |
214 |
> |
if (posix_memalign(&p, 16, size) != 0) |
215 |
> |
throw std::bad_alloc(); |
216 |
> |
#elif defined(HAVE_MEMALIGN) |
217 |
> |
p = memalign(16, size); |
218 |
> |
#elif defined(HAVE_VALLOC) |
219 |
> |
p = valloc(size); // page-aligned! |
220 |
> |
#else |
221 |
> |
/* XXX: handle padding ourselves */ |
222 |
> |
p = malloc(size); |
223 |
> |
#endif |
224 |
> |
|
225 |
> |
return p; |
226 |
> |
} |
227 |
> |
|
228 |
> |
void operator delete(void *p) |
229 |
> |
{ |
230 |
> |
#if defined(HAVE_MEMALIGN) || defined(HAVE_VALLOC) |
231 |
> |
#if defined(__GLIBC__) |
232 |
> |
// this is known to work only with GNU libc |
233 |
> |
free(p); |
234 |
> |
#endif |
235 |
> |
#else |
236 |
> |
free(p); |
237 |
> |
#endif |
238 |
> |
} |
239 |
|
|
240 |
|
sheepshaver_cpu::sheepshaver_cpu() |
241 |
|
: powerpc_cpu(enable_jit_p()) |
245 |
|
|
246 |
|
void sheepshaver_cpu::init_decoder() |
247 |
|
{ |
166 |
– |
#ifndef PPC_NO_STATIC_II_INDEX_TABLE |
167 |
– |
static bool initialized = false; |
168 |
– |
if (initialized) |
169 |
– |
return; |
170 |
– |
initialized = true; |
171 |
– |
#endif |
172 |
– |
|
248 |
|
static const instr_info_t sheep_ii_table[] = { |
249 |
|
{ "sheep", |
250 |
|
(execute_pmf)&sheepshaver_cpu::execute_sheep, |
263 |
|
} |
264 |
|
} |
265 |
|
|
191 |
– |
// Forward declaration for native opcode handler |
192 |
– |
static void NativeOp(int selector); |
193 |
– |
|
266 |
|
/* NativeOp instruction format: |
267 |
< |
+------------+--------------------------+--+----------+------------+ |
268 |
< |
| 6 | |FN| OP | 2 | |
269 |
< |
+------------+--------------------------+--+----------+------------+ |
270 |
< |
0 5 |6 19 20 21 25 26 31 |
267 |
> |
+------------+-------------------------+--+-----------+------------+ |
268 |
> |
| 6 | |FN| OP | 2 | |
269 |
> |
+------------+-------------------------+--+-----------+------------+ |
270 |
> |
0 5 |6 18 19 20 25 26 31 |
271 |
|
*/ |
272 |
|
|
273 |
< |
typedef bit_field< 20, 20 > FN_field; |
274 |
< |
typedef bit_field< 21, 25 > NATIVE_OP_field; |
273 |
> |
typedef bit_field< 19, 19 > FN_field; |
274 |
> |
typedef bit_field< 20, 25 > NATIVE_OP_field; |
275 |
|
typedef bit_field< 26, 31 > EMUL_OP_field; |
276 |
|
|
277 |
+ |
// "Native" EMUL_OP routines |
278 |
+ |
#define GPR_A(REG) gpr(16 + (REG)) |
279 |
+ |
#define GPR_D(REG) gpr( 8 + (REG)) |
280 |
+ |
|
281 |
+ |
void sheepshaver_cpu::execute_emul_op_microseconds() |
282 |
+ |
{ |
283 |
+ |
Microseconds(GPR_A(0), GPR_D(0)); |
284 |
+ |
} |
285 |
+ |
|
286 |
+ |
void sheepshaver_cpu::execute_emul_op_idle_time_1() |
287 |
+ |
{ |
288 |
+ |
// Sleep if no events pending |
289 |
+ |
if (ReadMacInt32(0x14c) == 0) |
290 |
+ |
Delay_usec(16667); |
291 |
+ |
GPR_A(0) = ReadMacInt32(0x2b6); |
292 |
+ |
} |
293 |
+ |
|
294 |
+ |
void sheepshaver_cpu::execute_emul_op_idle_time_2() |
295 |
+ |
{ |
296 |
+ |
// Sleep if no events pending |
297 |
+ |
if (ReadMacInt32(0x14c) == 0) |
298 |
+ |
Delay_usec(16667); |
299 |
+ |
GPR_D(0) = (uint32)-2; |
300 |
+ |
} |
301 |
+ |
|
302 |
+ |
// Filter out EMUL_OP routines that only call native code |
303 |
+ |
bool sheepshaver_cpu::filter_execute_emul_op(uint32 emul_op) |
304 |
+ |
{ |
305 |
+ |
switch (emul_op) { |
306 |
+ |
case OP_MICROSECONDS: |
307 |
+ |
execute_emul_op_microseconds(); |
308 |
+ |
return true; |
309 |
+ |
case OP_IDLE_TIME: |
310 |
+ |
execute_emul_op_idle_time_1(); |
311 |
+ |
return true; |
312 |
+ |
case OP_IDLE_TIME_2: |
313 |
+ |
execute_emul_op_idle_time_2(); |
314 |
+ |
return true; |
315 |
+ |
} |
316 |
+ |
return false; |
317 |
+ |
} |
318 |
+ |
|
319 |
+ |
// Execute EMUL_OP routine |
320 |
+ |
void sheepshaver_cpu::execute_emul_op(uint32 emul_op) |
321 |
+ |
{ |
322 |
+ |
#if ENABLE_NATIVE_EMUL_OP |
323 |
+ |
// First, filter out EMUL_OPs that can be executed without a mode switch |
324 |
+ |
if (filter_execute_emul_op(emul_op)) |
325 |
+ |
return; |
326 |
+ |
#endif |
327 |
+ |
|
328 |
+ |
M68kRegisters r68; |
329 |
+ |
WriteMacInt32(XLM_68K_R25, gpr(25)); |
330 |
+ |
WriteMacInt32(XLM_RUN_MODE, MODE_EMUL_OP); |
331 |
+ |
for (int i = 0; i < 8; i++) |
332 |
+ |
r68.d[i] = gpr(8 + i); |
333 |
+ |
for (int i = 0; i < 7; i++) |
334 |
+ |
r68.a[i] = gpr(16 + i); |
335 |
+ |
r68.a[7] = gpr(1); |
336 |
+ |
uint32 saved_cr = get_cr() & CR_field<2>::mask(); |
337 |
+ |
uint32 saved_xer = get_xer(); |
338 |
+ |
EmulOp(&r68, gpr(24), emul_op); |
339 |
+ |
set_cr(saved_cr); |
340 |
+ |
set_xer(saved_xer); |
341 |
+ |
for (int i = 0; i < 8; i++) |
342 |
+ |
gpr(8 + i) = r68.d[i]; |
343 |
+ |
for (int i = 0; i < 7; i++) |
344 |
+ |
gpr(16 + i) = r68.a[i]; |
345 |
+ |
gpr(1) = r68.a[7]; |
346 |
+ |
WriteMacInt32(XLM_RUN_MODE, MODE_68K); |
347 |
+ |
} |
348 |
+ |
|
349 |
|
// Execute SheepShaver instruction |
350 |
|
void sheepshaver_cpu::execute_sheep(uint32 opcode) |
351 |
|
{ |
362 |
|
break; |
363 |
|
|
364 |
|
case 2: // EXEC_NATIVE |
365 |
< |
NativeOp(NATIVE_OP_field::extract(opcode)); |
365 |
> |
execute_native_op(NATIVE_OP_field::extract(opcode)); |
366 |
|
if (FN_field::test(opcode)) |
367 |
|
pc() = lr(); |
368 |
|
else |
369 |
|
pc() += 4; |
370 |
|
break; |
371 |
|
|
372 |
< |
default: { // EMUL_OP |
373 |
< |
M68kRegisters r68; |
230 |
< |
WriteMacInt32(XLM_68K_R25, gpr(25)); |
231 |
< |
WriteMacInt32(XLM_RUN_MODE, MODE_EMUL_OP); |
232 |
< |
for (int i = 0; i < 8; i++) |
233 |
< |
r68.d[i] = gpr(8 + i); |
234 |
< |
for (int i = 0; i < 7; i++) |
235 |
< |
r68.a[i] = gpr(16 + i); |
236 |
< |
r68.a[7] = gpr(1); |
237 |
< |
EmulOp(&r68, gpr(24), EMUL_OP_field::extract(opcode) - 3); |
238 |
< |
for (int i = 0; i < 8; i++) |
239 |
< |
gpr(8 + i) = r68.d[i]; |
240 |
< |
for (int i = 0; i < 7; i++) |
241 |
< |
gpr(16 + i) = r68.a[i]; |
242 |
< |
gpr(1) = r68.a[7]; |
243 |
< |
WriteMacInt32(XLM_RUN_MODE, MODE_68K); |
372 |
> |
default: // EMUL_OP |
373 |
> |
execute_emul_op(EMUL_OP_field::extract(opcode) - 3); |
374 |
|
pc() += 4; |
375 |
|
break; |
376 |
|
} |
377 |
+ |
} |
378 |
+ |
|
379 |
+ |
// Compile one instruction |
380 |
+ |
int sheepshaver_cpu::compile1(codegen_context_t & cg_context) |
381 |
+ |
{ |
382 |
+ |
#if PPC_ENABLE_JIT |
383 |
+ |
const instr_info_t *ii = cg_context.instr_info; |
384 |
+ |
if (ii->mnemo != PPC_I(SHEEP)) |
385 |
+ |
return COMPILE_FAILURE; |
386 |
+ |
|
387 |
+ |
int status = COMPILE_FAILURE; |
388 |
+ |
powerpc_dyngen & dg = cg_context.codegen; |
389 |
+ |
uint32 opcode = cg_context.opcode; |
390 |
+ |
|
391 |
+ |
switch (opcode & 0x3f) { |
392 |
+ |
case 0: // EMUL_RETURN |
393 |
+ |
dg.gen_invoke(QuitEmulator); |
394 |
+ |
status = COMPILE_CODE_OK; |
395 |
+ |
break; |
396 |
+ |
|
397 |
+ |
case 1: // EXEC_RETURN |
398 |
+ |
dg.gen_spcflags_set(SPCFLAG_CPU_EXEC_RETURN); |
399 |
+ |
// Don't check for pending interrupts, we do know we have to |
400 |
+ |
// get out of this block ASAP |
401 |
+ |
dg.gen_exec_return(); |
402 |
+ |
status = COMPILE_EPILOGUE_OK; |
403 |
+ |
break; |
404 |
+ |
|
405 |
+ |
case 2: { // EXEC_NATIVE |
406 |
+ |
uint32 selector = NATIVE_OP_field::extract(opcode); |
407 |
+ |
switch (selector) { |
408 |
+ |
#if !PPC_REENTRANT_JIT |
409 |
+ |
// Filter out functions that may invoke Execute68k() or |
410 |
+ |
// CallMacOS(), this would break reentrancy as they could |
411 |
+ |
// invalidate the translation cache and even overwrite |
412 |
+ |
// continuation code when we are done with them. |
413 |
+ |
case NATIVE_PATCH_NAME_REGISTRY: |
414 |
+ |
dg.gen_invoke(DoPatchNameRegistry); |
415 |
+ |
status = COMPILE_CODE_OK; |
416 |
+ |
break; |
417 |
+ |
case NATIVE_VIDEO_INSTALL_ACCEL: |
418 |
+ |
dg.gen_invoke(VideoInstallAccel); |
419 |
+ |
status = COMPILE_CODE_OK; |
420 |
+ |
break; |
421 |
+ |
case NATIVE_VIDEO_VBL: |
422 |
+ |
dg.gen_invoke(VideoVBL); |
423 |
+ |
status = COMPILE_CODE_OK; |
424 |
+ |
break; |
425 |
+ |
case NATIVE_GET_RESOURCE: |
426 |
+ |
case NATIVE_GET_1_RESOURCE: |
427 |
+ |
case NATIVE_GET_IND_RESOURCE: |
428 |
+ |
case NATIVE_GET_1_IND_RESOURCE: |
429 |
+ |
case NATIVE_R_GET_RESOURCE: { |
430 |
+ |
static const uint32 get_resource_ptr[] = { |
431 |
+ |
XLM_GET_RESOURCE, |
432 |
+ |
XLM_GET_1_RESOURCE, |
433 |
+ |
XLM_GET_IND_RESOURCE, |
434 |
+ |
XLM_GET_1_IND_RESOURCE, |
435 |
+ |
XLM_R_GET_RESOURCE |
436 |
+ |
}; |
437 |
+ |
uint32 old_get_resource = ReadMacInt32(get_resource_ptr[selector - NATIVE_GET_RESOURCE]); |
438 |
+ |
typedef void (*func_t)(dyngen_cpu_base, uint32); |
439 |
+ |
func_t func = (func_t)nv_mem_fun(&sheepshaver_cpu::get_resource).ptr(); |
440 |
+ |
dg.gen_invoke_CPU_im(func, old_get_resource); |
441 |
+ |
status = COMPILE_CODE_OK; |
442 |
+ |
break; |
443 |
+ |
} |
444 |
+ |
case NATIVE_CHECK_LOAD_INVOC: |
445 |
+ |
dg.gen_load_T0_GPR(3); |
446 |
+ |
dg.gen_load_T1_GPR(4); |
447 |
+ |
dg.gen_se_16_32_T1(); |
448 |
+ |
dg.gen_load_T2_GPR(5); |
449 |
+ |
dg.gen_invoke_T0_T1_T2((void (*)(uint32, uint32, uint32))check_load_invoc); |
450 |
+ |
status = COMPILE_CODE_OK; |
451 |
+ |
break; |
452 |
+ |
#endif |
453 |
+ |
case NATIVE_BITBLT: |
454 |
+ |
dg.gen_load_T0_GPR(3); |
455 |
+ |
dg.gen_invoke_T0((void (*)(uint32))NQD_bitblt); |
456 |
+ |
status = COMPILE_CODE_OK; |
457 |
+ |
break; |
458 |
+ |
case NATIVE_INVRECT: |
459 |
+ |
dg.gen_load_T0_GPR(3); |
460 |
+ |
dg.gen_invoke_T0((void (*)(uint32))NQD_invrect); |
461 |
+ |
status = COMPILE_CODE_OK; |
462 |
+ |
break; |
463 |
+ |
case NATIVE_FILLRECT: |
464 |
+ |
dg.gen_load_T0_GPR(3); |
465 |
+ |
dg.gen_invoke_T0((void (*)(uint32))NQD_fillrect); |
466 |
+ |
status = COMPILE_CODE_OK; |
467 |
+ |
break; |
468 |
+ |
} |
469 |
+ |
// Could we fully translate this NativeOp? |
470 |
+ |
if (status == COMPILE_CODE_OK) { |
471 |
+ |
if (!FN_field::test(opcode)) |
472 |
+ |
cg_context.done_compile = false; |
473 |
+ |
else { |
474 |
+ |
dg.gen_load_A0_LR(); |
475 |
+ |
dg.gen_set_PC_A0(); |
476 |
+ |
cg_context.done_compile = true; |
477 |
+ |
} |
478 |
+ |
break; |
479 |
+ |
} |
480 |
+ |
#if PPC_REENTRANT_JIT |
481 |
+ |
// Try to execute NativeOp trampoline |
482 |
+ |
if (!FN_field::test(opcode)) |
483 |
+ |
dg.gen_set_PC_im(cg_context.pc + 4); |
484 |
+ |
else { |
485 |
+ |
dg.gen_load_A0_LR(); |
486 |
+ |
dg.gen_set_PC_A0(); |
487 |
+ |
} |
488 |
+ |
dg.gen_mov_32_T0_im(selector); |
489 |
+ |
dg.gen_jmp(native_op_trampoline); |
490 |
+ |
cg_context.done_compile = true; |
491 |
+ |
status = COMPILE_EPILOGUE_OK; |
492 |
+ |
break; |
493 |
+ |
#endif |
494 |
+ |
// Invoke NativeOp handler |
495 |
+ |
if (!FN_field::test(opcode)) { |
496 |
+ |
typedef void (*func_t)(dyngen_cpu_base, uint32); |
497 |
+ |
func_t func = (func_t)nv_mem_fun(&sheepshaver_cpu::execute_native_op).ptr(); |
498 |
+ |
dg.gen_invoke_CPU_im(func, selector); |
499 |
+ |
cg_context.done_compile = false; |
500 |
+ |
status = COMPILE_CODE_OK; |
501 |
+ |
} |
502 |
+ |
// Otherwise, let it generate a call to execute_sheep() which |
503 |
+ |
// will cause necessary updates to the program counter |
504 |
+ |
break; |
505 |
+ |
} |
506 |
+ |
|
507 |
+ |
default: { // EMUL_OP |
508 |
+ |
uint32 emul_op = EMUL_OP_field::extract(opcode) - 3; |
509 |
+ |
#if ENABLE_NATIVE_EMUL_OP |
510 |
+ |
typedef void (*emul_op_func_t)(dyngen_cpu_base); |
511 |
+ |
emul_op_func_t emul_op_func = 0; |
512 |
+ |
switch (emul_op) { |
513 |
+ |
case OP_MICROSECONDS: |
514 |
+ |
emul_op_func = (emul_op_func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op_microseconds).ptr(); |
515 |
+ |
break; |
516 |
+ |
case OP_IDLE_TIME: |
517 |
+ |
emul_op_func = (emul_op_func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op_idle_time_1).ptr(); |
518 |
+ |
break; |
519 |
+ |
case OP_IDLE_TIME_2: |
520 |
+ |
emul_op_func = (emul_op_func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op_idle_time_2).ptr(); |
521 |
+ |
break; |
522 |
+ |
} |
523 |
+ |
if (emul_op_func) { |
524 |
+ |
dg.gen_invoke_CPU(emul_op_func); |
525 |
+ |
cg_context.done_compile = false; |
526 |
+ |
status = COMPILE_CODE_OK; |
527 |
+ |
break; |
528 |
+ |
} |
529 |
+ |
#endif |
530 |
+ |
#if PPC_REENTRANT_JIT |
531 |
+ |
// Try to execute EmulOp trampoline |
532 |
+ |
dg.gen_set_PC_im(cg_context.pc + 4); |
533 |
+ |
dg.gen_mov_32_T0_im(emul_op); |
534 |
+ |
dg.gen_jmp(emul_op_trampoline); |
535 |
+ |
cg_context.done_compile = true; |
536 |
+ |
status = COMPILE_EPILOGUE_OK; |
537 |
+ |
break; |
538 |
+ |
#endif |
539 |
+ |
// Invoke EmulOp handler |
540 |
+ |
typedef void (*func_t)(dyngen_cpu_base, uint32); |
541 |
+ |
func_t func = (func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op).ptr(); |
542 |
+ |
dg.gen_invoke_CPU_im(func, emul_op); |
543 |
+ |
cg_context.done_compile = false; |
544 |
+ |
status = COMPILE_CODE_OK; |
545 |
+ |
break; |
546 |
+ |
} |
547 |
|
} |
548 |
+ |
return status; |
549 |
+ |
#endif |
550 |
+ |
return COMPILE_FAILURE; |
551 |
+ |
} |
552 |
+ |
|
553 |
+ |
// CPU context to preserve on interrupt |
554 |
+ |
sheepshaver_cpu::interrupt_context::interrupt_context(sheepshaver_cpu *_cpu, const char *_where) |
555 |
+ |
{ |
556 |
+ |
#if SAFE_INTERRUPT_PPC >= 2 |
557 |
+ |
cpu = _cpu; |
558 |
+ |
where = _where; |
559 |
+ |
|
560 |
+ |
// Save interrupt context |
561 |
+ |
memcpy(&gpr[0], &cpu->gpr(0), sizeof(gpr)); |
562 |
+ |
pc = cpu->pc(); |
563 |
+ |
lr = cpu->lr(); |
564 |
+ |
ctr = cpu->ctr(); |
565 |
+ |
cr = cpu->get_cr(); |
566 |
+ |
xer = cpu->get_xer(); |
567 |
+ |
#endif |
568 |
+ |
} |
569 |
+ |
|
570 |
+ |
sheepshaver_cpu::interrupt_context::~interrupt_context() |
571 |
+ |
{ |
572 |
+ |
#if SAFE_INTERRUPT_PPC >= 2 |
573 |
+ |
// Check whether CPU context was preserved by interrupt |
574 |
+ |
if (memcmp(&gpr[0], &cpu->gpr(0), sizeof(gpr)) != 0) { |
575 |
+ |
printf("FATAL: %s: interrupt clobbers registers\n", where); |
576 |
+ |
for (int i = 0; i < 32; i++) |
577 |
+ |
if (gpr[i] != cpu->gpr(i)) |
578 |
+ |
printf(" r%d: %08x -> %08x\n", i, gpr[i], cpu->gpr(i)); |
579 |
+ |
} |
580 |
+ |
if (pc != cpu->pc()) |
581 |
+ |
printf("FATAL: %s: interrupt clobbers PC\n", where); |
582 |
+ |
if (lr != cpu->lr()) |
583 |
+ |
printf("FATAL: %s: interrupt clobbers LR\n", where); |
584 |
+ |
if (ctr != cpu->ctr()) |
585 |
+ |
printf("FATAL: %s: interrupt clobbers CTR\n", where); |
586 |
+ |
if (cr != cpu->get_cr()) |
587 |
+ |
printf("FATAL: %s: interrupt clobbers CR\n", where); |
588 |
+ |
if (xer != cpu->get_xer()) |
589 |
+ |
printf("FATAL: %s: interrupt clobbers XER\n", where); |
590 |
+ |
#endif |
591 |
|
} |
592 |
|
|
593 |
|
// Handle MacOS interrupt |
594 |
|
void sheepshaver_cpu::interrupt(uint32 entry) |
595 |
|
{ |
596 |
|
#if EMUL_TIME_STATS |
597 |
< |
interrupt_count++; |
597 |
> |
ppc_interrupt_count++; |
598 |
|
const clock_t interrupt_start = clock(); |
599 |
|
#endif |
600 |
|
|
601 |
< |
#if !MULTICORE_CPU |
601 |
> |
#if SAFE_INTERRUPT_PPC |
602 |
> |
static int depth = 0; |
603 |
> |
if (depth != 0) |
604 |
> |
printf("FATAL: sheepshaver_cpu::interrupt() called more than once: %d\n", depth); |
605 |
> |
depth++; |
606 |
> |
#endif |
607 |
> |
|
608 |
|
// Save program counters and branch registers |
609 |
|
uint32 saved_pc = pc(); |
610 |
|
uint32 saved_lr = lr(); |
611 |
|
uint32 saved_ctr= ctr(); |
612 |
|
uint32 saved_sp = gpr(1); |
264 |
– |
#endif |
613 |
|
|
614 |
|
// Initialize stack pointer to SheepShaver alternate stack base |
615 |
< |
gpr(1) = SheepStack1Base - 64; |
615 |
> |
gpr(1) = SignalStackBase() - 64; |
616 |
|
|
617 |
|
// Build trampoline to return from interrupt |
618 |
< |
uint32 trampoline[] = { htonl(POWERPC_EMUL_OP | 1) }; |
618 |
> |
SheepVar32 trampoline = POWERPC_EXEC_RETURN; |
619 |
|
|
620 |
|
// Prepare registers for nanokernel interrupt routine |
621 |
|
kernel_data->v[0x004 >> 2] = htonl(gpr(1)); |
634 |
|
gpr(1) = KernelDataAddr; |
635 |
|
gpr(7) = ntohl(kernel_data->v[0x660 >> 2]); |
636 |
|
gpr(8) = 0; |
637 |
< |
gpr(10) = (uint32)trampoline; |
638 |
< |
gpr(12) = (uint32)trampoline; |
637 |
> |
gpr(10) = trampoline.addr(); |
638 |
> |
gpr(12) = trampoline.addr(); |
639 |
|
gpr(13) = get_cr(); |
640 |
|
|
641 |
|
// rlwimi. r7,r7,8,0,0 |
649 |
|
// Enter nanokernel |
650 |
|
execute(entry); |
651 |
|
|
304 |
– |
#if !MULTICORE_CPU |
652 |
|
// Restore program counters and branch registers |
653 |
|
pc() = saved_pc; |
654 |
|
lr() = saved_lr; |
655 |
|
ctr()= saved_ctr; |
656 |
|
gpr(1) = saved_sp; |
310 |
– |
#endif |
657 |
|
|
658 |
|
#if EMUL_TIME_STATS |
659 |
|
interrupt_time += (clock() - interrupt_start); |
660 |
|
#endif |
661 |
+ |
|
662 |
+ |
#if SAFE_INTERRUPT_PPC |
663 |
+ |
depth--; |
664 |
+ |
#endif |
665 |
|
} |
666 |
|
|
667 |
|
// Execute 68k routine |
774 |
|
uint32 saved_ctr= ctr(); |
775 |
|
|
776 |
|
// Build trampoline with EXEC_RETURN |
777 |
< |
uint32 trampoline[] = { htonl(POWERPC_EMUL_OP | 1) }; |
778 |
< |
lr() = (uint32)trampoline; |
777 |
> |
SheepVar32 trampoline = POWERPC_EXEC_RETURN; |
778 |
> |
lr() = trampoline.addr(); |
779 |
|
|
780 |
|
gpr(1) -= 64; // Create stack frame |
781 |
|
uint32 proc = ReadMacInt32(tvect); // Get routine address |
819 |
|
// Save branch registers |
820 |
|
uint32 saved_lr = lr(); |
821 |
|
|
822 |
< |
const uint32 trampoline[] = { htonl(POWERPC_EMUL_OP | 1) }; |
823 |
< |
lr() = (uint32)trampoline; |
822 |
> |
SheepVar32 trampoline = POWERPC_EXEC_RETURN; |
823 |
> |
WriteMacInt32(trampoline.addr(), POWERPC_EXEC_RETURN); |
824 |
> |
lr() = trampoline.addr(); |
825 |
|
|
826 |
|
execute(entry); |
827 |
|
|
830 |
|
} |
831 |
|
|
832 |
|
// Resource Manager thunk |
482 |
– |
extern "C" void check_load_invoc(uint32 type, int16 id, uint32 h); |
483 |
– |
|
833 |
|
inline void sheepshaver_cpu::get_resource(uint32 old_get_resource) |
834 |
|
{ |
835 |
|
uint32 type = gpr(3); |
855 |
|
* SheepShaver CPU engine interface |
856 |
|
**/ |
857 |
|
|
858 |
< |
static sheepshaver_cpu *main_cpu = NULL; // CPU emulator to handle usual control flow |
859 |
< |
static sheepshaver_cpu *interrupt_cpu = NULL; // CPU emulator to handle interrupts |
511 |
< |
static sheepshaver_cpu *current_cpu = NULL; // Current CPU emulator context |
858 |
> |
// PowerPC CPU emulator |
859 |
> |
static sheepshaver_cpu *ppc_cpu = NULL; |
860 |
|
|
861 |
|
void FlushCodeCache(uintptr start, uintptr end) |
862 |
|
{ |
863 |
|
D(bug("FlushCodeCache(%08x, %08x)\n", start, end)); |
864 |
< |
main_cpu->invalidate_cache_range(start, end); |
517 |
< |
#if MULTICORE_CPU |
518 |
< |
interrupt_cpu->invalidate_cache_range(start, end); |
519 |
< |
#endif |
520 |
< |
} |
521 |
< |
|
522 |
< |
static inline void cpu_push(sheepshaver_cpu *new_cpu) |
523 |
< |
{ |
524 |
< |
#if MULTICORE_CPU |
525 |
< |
current_cpu = new_cpu; |
526 |
< |
#endif |
527 |
< |
} |
528 |
< |
|
529 |
< |
static inline void cpu_pop() |
530 |
< |
{ |
531 |
< |
#if MULTICORE_CPU |
532 |
< |
current_cpu = main_cpu; |
533 |
< |
#endif |
864 |
> |
ppc_cpu->invalidate_cache_range(start, end); |
865 |
|
} |
866 |
|
|
867 |
|
// Dump PPC registers |
868 |
|
static void dump_registers(void) |
869 |
|
{ |
870 |
< |
current_cpu->dump_registers(); |
870 |
> |
ppc_cpu->dump_registers(); |
871 |
|
} |
872 |
|
|
873 |
|
// Dump log |
874 |
|
static void dump_log(void) |
875 |
|
{ |
876 |
< |
current_cpu->dump_log(); |
876 |
> |
ppc_cpu->dump_log(); |
877 |
|
} |
878 |
|
|
879 |
|
/* |
880 |
|
* Initialize CPU emulation |
881 |
|
*/ |
882 |
|
|
883 |
< |
static sigsegv_return_t sigsegv_handler(sigsegv_address_t fault_address, sigsegv_address_t fault_instruction) |
883 |
> |
sigsegv_return_t sigsegv_handler(sigsegv_address_t fault_address, sigsegv_address_t fault_instruction) |
884 |
|
{ |
885 |
|
#if ENABLE_VOSF |
886 |
|
// Handle screen fault |
896 |
|
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
897 |
|
|
898 |
|
// Get program counter of target CPU |
899 |
< |
sheepshaver_cpu * const cpu = current_cpu; |
899 |
> |
sheepshaver_cpu * const cpu = ppc_cpu; |
900 |
|
const uint32 pc = cpu->pc(); |
901 |
|
|
902 |
|
// Fault in Mac ROM or RAM? |
903 |
< |
bool mac_fault = (pc >= ROM_BASE) && (pc < (ROM_BASE + ROM_AREA_SIZE)) || (pc >= RAMBase) && (pc < (RAMBase + RAMSize)); |
903 |
> |
bool mac_fault = (pc >= ROM_BASE) && (pc < (ROM_BASE + ROM_AREA_SIZE)) || (pc >= RAMBase) && (pc < (RAMBase + RAMSize)) || (pc >= DR_CACHE_BASE && pc < (DR_CACHE_BASE + DR_CACHE_SIZE)); |
904 |
|
if (mac_fault) { |
905 |
|
|
906 |
|
// "VM settings" during MacOS 8 installation |
920 |
|
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
921 |
|
else if (pc == ROM_BASE + 0x4a10a0 && (cpu->gpr(20) == 0xf3012002 || cpu->gpr(20) == 0xf3012000)) |
922 |
|
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
923 |
+ |
|
924 |
+ |
// MacOS 8.6 serial drivers on startup (with DR Cache and OldWorld ROM) |
925 |
+ |
else if ((pc - DR_CACHE_BASE) < DR_CACHE_SIZE && (cpu->gpr(16) == 0xf3012002 || cpu->gpr(16) == 0xf3012000)) |
926 |
+ |
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
927 |
+ |
else if ((pc - DR_CACHE_BASE) < DR_CACHE_SIZE && (cpu->gpr(20) == 0xf3012002 || cpu->gpr(20) == 0xf3012000)) |
928 |
+ |
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
929 |
+ |
|
930 |
+ |
// Ignore writes to the zero page |
931 |
+ |
else if ((uint32)(addr - SheepMem::ZeroPage()) < (uint32)SheepMem::PageSize()) |
932 |
+ |
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
933 |
|
|
934 |
|
// Ignore all other faults, if requested |
935 |
|
if (PrefsFindBool("ignoresegv")) |
942 |
|
printf("SIGSEGV\n"); |
943 |
|
printf(" pc %p\n", fault_instruction); |
944 |
|
printf(" ea %p\n", fault_address); |
604 |
– |
printf(" cpu %s\n", current_cpu == main_cpu ? "main" : "interrupts"); |
945 |
|
dump_registers(); |
946 |
< |
current_cpu->dump_log(); |
946 |
> |
ppc_cpu->dump_log(); |
947 |
|
enter_mon(); |
948 |
|
QuitEmulator(); |
949 |
|
|
953 |
|
void init_emul_ppc(void) |
954 |
|
{ |
955 |
|
// Initialize main CPU emulator |
956 |
< |
main_cpu = new sheepshaver_cpu(); |
957 |
< |
main_cpu->set_register(powerpc_registers::GPR(3), any_register((uint32)ROM_BASE + 0x30d000)); |
956 |
> |
ppc_cpu = new sheepshaver_cpu(); |
957 |
> |
ppc_cpu->set_register(powerpc_registers::GPR(3), any_register((uint32)ROM_BASE + 0x30d000)); |
958 |
> |
ppc_cpu->set_register(powerpc_registers::GPR(4), any_register(KernelDataAddr + 0x1000)); |
959 |
|
WriteMacInt32(XLM_RUN_MODE, MODE_68K); |
960 |
|
|
620 |
– |
#if MULTICORE_CPU |
621 |
– |
// Initialize alternate CPU emulator to handle interrupts |
622 |
– |
interrupt_cpu = new sheepshaver_cpu(); |
623 |
– |
#endif |
624 |
– |
|
625 |
– |
// Install the handler for SIGSEGV |
626 |
– |
sigsegv_install_handler(sigsegv_handler); |
627 |
– |
|
961 |
|
#if ENABLE_MON |
962 |
|
// Install "regs" command in cxmon |
963 |
|
mon_add_command("regs", dump_registers, "regs Dump PowerPC registers\n"); |
983 |
|
printf("Total emulation time : %.1f sec\n", double(emul_time) / double(CLOCKS_PER_SEC)); |
984 |
|
printf("Total interrupt count: %d (%2.1f Hz)\n", interrupt_count, |
985 |
|
(double(interrupt_count) * CLOCKS_PER_SEC) / double(emul_time)); |
986 |
+ |
printf("Total ppc interrupt count: %d (%2.1f %%)\n", ppc_interrupt_count, |
987 |
+ |
(double(ppc_interrupt_count) * 100.0) / double(interrupt_count)); |
988 |
|
|
989 |
|
#define PRINT_STATS(LABEL, VAR_PREFIX) do { \ |
990 |
|
printf("Total " LABEL " count : %d\n", VAR_PREFIX##_count); \ |
1001 |
|
printf("\n"); |
1002 |
|
#endif |
1003 |
|
|
1004 |
< |
delete main_cpu; |
1005 |
< |
#if MULTICORE_CPU |
1006 |
< |
delete interrupt_cpu; |
1007 |
< |
#endif |
1004 |
> |
delete ppc_cpu; |
1005 |
> |
} |
1006 |
> |
|
1007 |
> |
#if PPC_ENABLE_JIT && PPC_REENTRANT_JIT |
1008 |
> |
// Initialize EmulOp trampolines |
1009 |
> |
void init_emul_op_trampolines(basic_dyngen & dg) |
1010 |
> |
{ |
1011 |
> |
typedef void (*func_t)(dyngen_cpu_base, uint32); |
1012 |
> |
func_t func; |
1013 |
> |
|
1014 |
> |
// EmulOp |
1015 |
> |
emul_op_trampoline = dg.gen_start(); |
1016 |
> |
func = (func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op).ptr(); |
1017 |
> |
dg.gen_invoke_CPU_T0(func); |
1018 |
> |
dg.gen_exec_return(); |
1019 |
> |
dg.gen_end(); |
1020 |
> |
|
1021 |
> |
// NativeOp |
1022 |
> |
native_op_trampoline = dg.gen_start(); |
1023 |
> |
func = (func_t)nv_mem_fun(&sheepshaver_cpu::execute_native_op).ptr(); |
1024 |
> |
dg.gen_invoke_CPU_T0(func); |
1025 |
> |
dg.gen_exec_return(); |
1026 |
> |
dg.gen_end(); |
1027 |
> |
|
1028 |
> |
D(bug("EmulOp trampoline: %p\n", emul_op_trampoline)); |
1029 |
> |
D(bug("NativeOp trampoline: %p\n", native_op_trampoline)); |
1030 |
|
} |
1031 |
+ |
#endif |
1032 |
|
|
1033 |
|
/* |
1034 |
|
* Emulation loop |
1036 |
|
|
1037 |
|
void emul_ppc(uint32 entry) |
1038 |
|
{ |
1039 |
< |
current_cpu = main_cpu; |
1040 |
< |
#if DEBUG |
683 |
< |
current_cpu->start_log(); |
1039 |
> |
#if 0 |
1040 |
> |
ppc_cpu->start_log(); |
1041 |
|
#endif |
1042 |
|
// start emulation loop and enable code translation or caching |
1043 |
< |
current_cpu->execute(entry); |
1043 |
> |
ppc_cpu->execute(entry); |
1044 |
|
} |
1045 |
|
|
1046 |
|
/* |
1047 |
|
* Handle PowerPC interrupt |
1048 |
|
*/ |
1049 |
|
|
693 |
– |
#if ASYNC_IRQ |
694 |
– |
void HandleInterrupt(void) |
695 |
– |
{ |
696 |
– |
main_cpu->handle_interrupt(); |
697 |
– |
} |
698 |
– |
#else |
1050 |
|
void TriggerInterrupt(void) |
1051 |
|
{ |
1052 |
|
#if 0 |
1053 |
|
WriteMacInt32(0x16a, ReadMacInt32(0x16a) + 1); |
1054 |
|
#else |
1055 |
|
// Trigger interrupt to main cpu only |
1056 |
< |
if (main_cpu) |
1057 |
< |
main_cpu->trigger_interrupt(); |
1056 |
> |
if (ppc_cpu) |
1057 |
> |
ppc_cpu->trigger_interrupt(); |
1058 |
|
#endif |
1059 |
|
} |
709 |
– |
#endif |
1060 |
|
|
1061 |
|
void sheepshaver_cpu::handle_interrupt(void) |
1062 |
|
{ |
1063 |
+ |
#ifdef USE_SDL_VIDEO |
1064 |
+ |
// We must fill in the events queue in the same thread that did call SDL_SetVideoMode() |
1065 |
+ |
SDL_PumpEvents(); |
1066 |
+ |
#endif |
1067 |
+ |
|
1068 |
|
// Do nothing if interrupts are disabled |
1069 |
< |
if (*(int32 *)XLM_IRQ_NEST > 0) |
1069 |
> |
if (int32(ReadMacInt32(XLM_IRQ_NEST)) > 0) |
1070 |
|
return; |
1071 |
|
|
1072 |
< |
// Do nothing if there is no interrupt pending |
1073 |
< |
if (InterruptFlags == 0) |
1074 |
< |
return; |
1072 |
> |
// Current interrupt nest level |
1073 |
> |
static int interrupt_depth = 0; |
1074 |
> |
++interrupt_depth; |
1075 |
> |
#if EMUL_TIME_STATS |
1076 |
> |
interrupt_count++; |
1077 |
> |
#endif |
1078 |
|
|
1079 |
|
// Disable MacOS stack sniffer |
1080 |
|
WriteMacInt32(0x110, 0); |
1083 |
|
switch (ReadMacInt32(XLM_RUN_MODE)) { |
1084 |
|
case MODE_68K: |
1085 |
|
// 68k emulator active, trigger 68k interrupt level 1 |
728 |
– |
assert(current_cpu == main_cpu); |
1086 |
|
WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1); |
1087 |
|
set_cr(get_cr() | tswap32(kernel_data->v[0x674 >> 2])); |
1088 |
|
break; |
1090 |
|
#if INTERRUPTS_IN_NATIVE_MODE |
1091 |
|
case MODE_NATIVE: |
1092 |
|
// 68k emulator inactive, in nanokernel? |
1093 |
< |
assert(current_cpu == main_cpu); |
1094 |
< |
if (gpr(1) != KernelDataAddr) { |
1093 |
> |
if (gpr(1) != KernelDataAddr && interrupt_depth == 1) { |
1094 |
> |
interrupt_context ctx(this, "PowerPC mode"); |
1095 |
> |
|
1096 |
|
// Prepare for 68k interrupt level 1 |
1097 |
|
WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1); |
1098 |
|
WriteMacInt32(tswap32(kernel_data->v[0x658 >> 2]) + 0xdc, |
1101 |
|
|
1102 |
|
// Execute nanokernel interrupt routine (this will activate the 68k emulator) |
1103 |
|
DisableInterrupt(); |
746 |
– |
cpu_push(interrupt_cpu); |
1104 |
|
if (ROMType == ROMTYPE_NEWWORLD) |
1105 |
< |
current_cpu->interrupt(ROM_BASE + 0x312b1c); |
1105 |
> |
ppc_cpu->interrupt(ROM_BASE + 0x312b1c); |
1106 |
|
else |
1107 |
< |
current_cpu->interrupt(ROM_BASE + 0x312a3c); |
751 |
< |
cpu_pop(); |
1107 |
> |
ppc_cpu->interrupt(ROM_BASE + 0x312a3c); |
1108 |
|
} |
1109 |
|
break; |
1110 |
|
#endif |
1113 |
|
case MODE_EMUL_OP: |
1114 |
|
// 68k emulator active, within EMUL_OP routine, execute 68k interrupt routine directly when interrupt level is 0 |
1115 |
|
if ((ReadMacInt32(XLM_68K_R25) & 7) == 0) { |
1116 |
+ |
interrupt_context ctx(this, "68k mode"); |
1117 |
+ |
#if EMUL_TIME_STATS |
1118 |
+ |
const clock_t interrupt_start = clock(); |
1119 |
+ |
#endif |
1120 |
|
#if 1 |
1121 |
|
// Execute full 68k interrupt routine |
1122 |
|
M68kRegisters r; |
1138 |
|
if (InterruptFlags & INTFLAG_VIA) { |
1139 |
|
ClearInterruptFlag(INTFLAG_VIA); |
1140 |
|
ADBInterrupt(); |
1141 |
< |
ExecutePPC(VideoVBL); |
1141 |
> |
ExecuteNative(NATIVE_VIDEO_VBL); |
1142 |
|
} |
1143 |
|
} |
1144 |
|
#endif |
1145 |
+ |
#if EMUL_TIME_STATS |
1146 |
+ |
interrupt_time += (clock() - interrupt_start); |
1147 |
+ |
#endif |
1148 |
|
} |
1149 |
|
break; |
1150 |
|
#endif |
1151 |
|
} |
789 |
– |
} |
790 |
– |
|
791 |
– |
/* |
792 |
– |
* Execute NATIVE_OP opcode (called by PowerPC emulator) |
793 |
– |
*/ |
794 |
– |
|
795 |
– |
#define POWERPC_NATIVE_OP_INIT(LR, OP) \ |
796 |
– |
tswap32(POWERPC_EMUL_OP | ((LR) << 11) | (((uint32)OP) << 6) | 2) |
1152 |
|
|
1153 |
< |
// FIXME: Make sure 32-bit relocations are used |
1154 |
< |
const uint32 NativeOpTable[NATIVE_OP_MAX] = { |
1155 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_PATCH_NAME_REGISTRY), |
801 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_VIDEO_INSTALL_ACCEL), |
802 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_VIDEO_VBL), |
803 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_VIDEO_DO_DRIVER_IO), |
804 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_ETHER_IRQ), |
805 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_ETHER_INIT), |
806 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_ETHER_TERM), |
807 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_ETHER_OPEN), |
808 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_ETHER_CLOSE), |
809 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_ETHER_WPUT), |
810 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_ETHER_RSRV), |
811 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_SERIAL_NOTHING), |
812 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_SERIAL_OPEN), |
813 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_SERIAL_PRIME_IN), |
814 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_SERIAL_PRIME_OUT), |
815 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_SERIAL_CONTROL), |
816 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_SERIAL_STATUS), |
817 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_SERIAL_CLOSE), |
818 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_GET_RESOURCE), |
819 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_GET_1_RESOURCE), |
820 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_GET_IND_RESOURCE), |
821 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_GET_1_IND_RESOURCE), |
822 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_R_GET_RESOURCE), |
823 |
< |
POWERPC_NATIVE_OP_INIT(0, NATIVE_DISABLE_INTERRUPT), |
824 |
< |
POWERPC_NATIVE_OP_INIT(0, NATIVE_ENABLE_INTERRUPT), |
825 |
< |
POWERPC_NATIVE_OP_INIT(1, NATIVE_MAKE_EXECUTABLE), |
826 |
< |
}; |
1153 |
> |
// We are done with this interrupt |
1154 |
> |
--interrupt_depth; |
1155 |
> |
} |
1156 |
|
|
1157 |
|
static void get_resource(void); |
1158 |
|
static void get_1_resource(void); |
1160 |
|
static void get_1_ind_resource(void); |
1161 |
|
static void r_get_resource(void); |
1162 |
|
|
1163 |
< |
#define GPR(REG) current_cpu->gpr(REG) |
1164 |
< |
|
836 |
< |
static void NativeOp(int selector) |
1163 |
> |
// Execute NATIVE_OP routine |
1164 |
> |
void sheepshaver_cpu::execute_native_op(uint32 selector) |
1165 |
|
{ |
1166 |
|
#if EMUL_TIME_STATS |
1167 |
|
native_exec_count++; |
1179 |
|
VideoVBL(); |
1180 |
|
break; |
1181 |
|
case NATIVE_VIDEO_DO_DRIVER_IO: |
1182 |
< |
GPR(3) = (int32)(int16)VideoDoDriverIO((void *)GPR(3), (void *)GPR(4), |
1183 |
< |
(void *)GPR(5), GPR(6), GPR(7)); |
1182 |
> |
gpr(3) = (int32)(int16)VideoDoDriverIO((void *)gpr(3), (void *)gpr(4), |
1183 |
> |
(void *)gpr(5), gpr(6), gpr(7)); |
1184 |
|
break; |
1185 |
|
#ifdef WORDS_BIGENDIAN |
1186 |
|
case NATIVE_ETHER_IRQ: |
1187 |
|
EtherIRQ(); |
1188 |
|
break; |
1189 |
|
case NATIVE_ETHER_INIT: |
1190 |
< |
GPR(3) = InitStreamModule((void *)GPR(3)); |
1190 |
> |
gpr(3) = InitStreamModule((void *)gpr(3)); |
1191 |
|
break; |
1192 |
|
case NATIVE_ETHER_TERM: |
1193 |
|
TerminateStreamModule(); |
1194 |
|
break; |
1195 |
|
case NATIVE_ETHER_OPEN: |
1196 |
< |
GPR(3) = ether_open((queue_t *)GPR(3), (void *)GPR(4), GPR(5), GPR(6), (void*)GPR(7)); |
1196 |
> |
gpr(3) = ether_open((queue_t *)gpr(3), (void *)gpr(4), gpr(5), gpr(6), (void*)gpr(7)); |
1197 |
|
break; |
1198 |
|
case NATIVE_ETHER_CLOSE: |
1199 |
< |
GPR(3) = ether_close((queue_t *)GPR(3), GPR(4), (void *)GPR(5)); |
1199 |
> |
gpr(3) = ether_close((queue_t *)gpr(3), gpr(4), (void *)gpr(5)); |
1200 |
|
break; |
1201 |
|
case NATIVE_ETHER_WPUT: |
1202 |
< |
GPR(3) = ether_wput((queue_t *)GPR(3), (mblk_t *)GPR(4)); |
1202 |
> |
gpr(3) = ether_wput((queue_t *)gpr(3), (mblk_t *)gpr(4)); |
1203 |
|
break; |
1204 |
|
case NATIVE_ETHER_RSRV: |
1205 |
< |
GPR(3) = ether_rsrv((queue_t *)GPR(3)); |
1205 |
> |
gpr(3) = ether_rsrv((queue_t *)gpr(3)); |
1206 |
|
break; |
1207 |
|
#else |
1208 |
|
case NATIVE_ETHER_INIT: |
1209 |
|
// FIXME: needs more complicated thunks |
1210 |
< |
GPR(3) = false; |
1210 |
> |
gpr(3) = false; |
1211 |
|
break; |
1212 |
|
#endif |
1213 |
+ |
case NATIVE_SYNC_HOOK: |
1214 |
+ |
gpr(3) = NQD_sync_hook(gpr(3)); |
1215 |
+ |
break; |
1216 |
+ |
case NATIVE_BITBLT_HOOK: |
1217 |
+ |
gpr(3) = NQD_bitblt_hook(gpr(3)); |
1218 |
+ |
break; |
1219 |
+ |
case NATIVE_BITBLT: |
1220 |
+ |
NQD_bitblt(gpr(3)); |
1221 |
+ |
break; |
1222 |
+ |
case NATIVE_FILLRECT_HOOK: |
1223 |
+ |
gpr(3) = NQD_fillrect_hook(gpr(3)); |
1224 |
+ |
break; |
1225 |
+ |
case NATIVE_INVRECT: |
1226 |
+ |
NQD_invrect(gpr(3)); |
1227 |
+ |
break; |
1228 |
+ |
case NATIVE_FILLRECT: |
1229 |
+ |
NQD_fillrect(gpr(3)); |
1230 |
+ |
break; |
1231 |
|
case NATIVE_SERIAL_NOTHING: |
1232 |
|
case NATIVE_SERIAL_OPEN: |
1233 |
|
case NATIVE_SERIAL_PRIME_IN: |
1245 |
|
SerialStatus, |
1246 |
|
SerialClose |
1247 |
|
}; |
1248 |
< |
GPR(3) = serial_callbacks[selector - NATIVE_SERIAL_NOTHING](GPR(3), GPR(4)); |
1248 |
> |
gpr(3) = serial_callbacks[selector - NATIVE_SERIAL_NOTHING](gpr(3), gpr(4)); |
1249 |
|
break; |
1250 |
|
} |
1251 |
|
case NATIVE_GET_RESOURCE: |
1255 |
|
case NATIVE_R_GET_RESOURCE: { |
1256 |
|
typedef void (*GetResourceCallback)(void); |
1257 |
|
static const GetResourceCallback get_resource_callbacks[] = { |
1258 |
< |
get_resource, |
1259 |
< |
get_1_resource, |
1260 |
< |
get_ind_resource, |
1261 |
< |
get_1_ind_resource, |
1262 |
< |
r_get_resource |
1258 |
> |
::get_resource, |
1259 |
> |
::get_1_resource, |
1260 |
> |
::get_ind_resource, |
1261 |
> |
::get_1_ind_resource, |
1262 |
> |
::r_get_resource |
1263 |
|
}; |
1264 |
|
get_resource_callbacks[selector - NATIVE_GET_RESOURCE](); |
1265 |
|
break; |
1266 |
|
} |
921 |
– |
case NATIVE_DISABLE_INTERRUPT: |
922 |
– |
DisableInterrupt(); |
923 |
– |
break; |
924 |
– |
case NATIVE_ENABLE_INTERRUPT: |
925 |
– |
EnableInterrupt(); |
926 |
– |
break; |
1267 |
|
case NATIVE_MAKE_EXECUTABLE: |
1268 |
< |
MakeExecutable(0, (void *)GPR(4), GPR(5)); |
1268 |
> |
MakeExecutable(0, (void *)gpr(4), gpr(5)); |
1269 |
> |
break; |
1270 |
> |
case NATIVE_CHECK_LOAD_INVOC: |
1271 |
> |
check_load_invoc(gpr(3), gpr(4), gpr(5)); |
1272 |
|
break; |
1273 |
|
default: |
1274 |
|
printf("FATAL: NATIVE_OP called with bogus selector %d\n", selector); |
1282 |
|
} |
1283 |
|
|
1284 |
|
/* |
942 |
– |
* Execute native subroutine (LR must contain return address) |
943 |
– |
*/ |
944 |
– |
|
945 |
– |
void ExecuteNative(int selector) |
946 |
– |
{ |
947 |
– |
uint32 tvect[2]; |
948 |
– |
tvect[0] = tswap32(POWERPC_NATIVE_OP_FUNC(selector)); |
949 |
– |
tvect[1] = 0; // Fake TVECT |
950 |
– |
RoutineDescriptor desc = BUILD_PPC_ROUTINE_DESCRIPTOR(0, tvect); |
951 |
– |
M68kRegisters r; |
952 |
– |
Execute68k((uint32)&desc, &r); |
953 |
– |
} |
954 |
– |
|
955 |
– |
/* |
1285 |
|
* Execute 68k subroutine (must be ended with EXEC_RETURN) |
1286 |
|
* This must only be called by the emul_thread when in EMUL_OP mode |
1287 |
|
* r->a[7] is unused, the routine runs on the caller's stack |
1289 |
|
|
1290 |
|
void Execute68k(uint32 pc, M68kRegisters *r) |
1291 |
|
{ |
1292 |
< |
current_cpu->execute_68k(pc, r); |
1292 |
> |
ppc_cpu->execute_68k(pc, r); |
1293 |
|
} |
1294 |
|
|
1295 |
|
/* |
1299 |
|
|
1300 |
|
void Execute68kTrap(uint16 trap, M68kRegisters *r) |
1301 |
|
{ |
1302 |
< |
uint16 proc[2]; |
1303 |
< |
proc[0] = htons(trap); |
1304 |
< |
proc[1] = htons(M68K_RTS); |
1305 |
< |
Execute68k((uint32)proc, r); |
1302 |
> |
SheepVar proc_var(4); |
1303 |
> |
uint32 proc = proc_var.addr(); |
1304 |
> |
WriteMacInt16(proc, trap); |
1305 |
> |
WriteMacInt16(proc + 2, M68K_RTS); |
1306 |
> |
Execute68k(proc, r); |
1307 |
|
} |
1308 |
|
|
1309 |
|
/* |
1312 |
|
|
1313 |
|
uint32 call_macos(uint32 tvect) |
1314 |
|
{ |
1315 |
< |
return current_cpu->execute_macos_code(tvect, 0, NULL); |
1315 |
> |
return ppc_cpu->execute_macos_code(tvect, 0, NULL); |
1316 |
|
} |
1317 |
|
|
1318 |
|
uint32 call_macos1(uint32 tvect, uint32 arg1) |
1319 |
|
{ |
1320 |
|
const uint32 args[] = { arg1 }; |
1321 |
< |
return current_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1321 |
> |
return ppc_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1322 |
|
} |
1323 |
|
|
1324 |
|
uint32 call_macos2(uint32 tvect, uint32 arg1, uint32 arg2) |
1325 |
|
{ |
1326 |
|
const uint32 args[] = { arg1, arg2 }; |
1327 |
< |
return current_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1327 |
> |
return ppc_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1328 |
|
} |
1329 |
|
|
1330 |
|
uint32 call_macos3(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3) |
1331 |
|
{ |
1332 |
|
const uint32 args[] = { arg1, arg2, arg3 }; |
1333 |
< |
return current_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1333 |
> |
return ppc_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1334 |
|
} |
1335 |
|
|
1336 |
|
uint32 call_macos4(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4) |
1337 |
|
{ |
1338 |
|
const uint32 args[] = { arg1, arg2, arg3, arg4 }; |
1339 |
< |
return current_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1339 |
> |
return ppc_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1340 |
|
} |
1341 |
|
|
1342 |
|
uint32 call_macos5(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4, uint32 arg5) |
1343 |
|
{ |
1344 |
|
const uint32 args[] = { arg1, arg2, arg3, arg4, arg5 }; |
1345 |
< |
return current_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1345 |
> |
return ppc_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1346 |
|
} |
1347 |
|
|
1348 |
|
uint32 call_macos6(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4, uint32 arg5, uint32 arg6) |
1349 |
|
{ |
1350 |
|
const uint32 args[] = { arg1, arg2, arg3, arg4, arg5, arg6 }; |
1351 |
< |
return current_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1351 |
> |
return ppc_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1352 |
|
} |
1353 |
|
|
1354 |
|
uint32 call_macos7(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4, uint32 arg5, uint32 arg6, uint32 arg7) |
1355 |
|
{ |
1356 |
|
const uint32 args[] = { arg1, arg2, arg3, arg4, arg5, arg6, arg7 }; |
1357 |
< |
return current_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1357 |
> |
return ppc_cpu->execute_macos_code(tvect, sizeof(args)/sizeof(args[0]), args); |
1358 |
|
} |
1359 |
|
|
1360 |
|
/* |
1363 |
|
|
1364 |
|
void get_resource(void) |
1365 |
|
{ |
1366 |
< |
current_cpu->get_resource(ReadMacInt32(XLM_GET_RESOURCE)); |
1366 |
> |
ppc_cpu->get_resource(ReadMacInt32(XLM_GET_RESOURCE)); |
1367 |
|
} |
1368 |
|
|
1369 |
|
void get_1_resource(void) |
1370 |
|
{ |
1371 |
< |
current_cpu->get_resource(ReadMacInt32(XLM_GET_1_RESOURCE)); |
1371 |
> |
ppc_cpu->get_resource(ReadMacInt32(XLM_GET_1_RESOURCE)); |
1372 |
|
} |
1373 |
|
|
1374 |
|
void get_ind_resource(void) |
1375 |
|
{ |
1376 |
< |
current_cpu->get_resource(ReadMacInt32(XLM_GET_IND_RESOURCE)); |
1376 |
> |
ppc_cpu->get_resource(ReadMacInt32(XLM_GET_IND_RESOURCE)); |
1377 |
|
} |
1378 |
|
|
1379 |
|
void get_1_ind_resource(void) |
1380 |
|
{ |
1381 |
< |
current_cpu->get_resource(ReadMacInt32(XLM_GET_1_IND_RESOURCE)); |
1381 |
> |
ppc_cpu->get_resource(ReadMacInt32(XLM_GET_1_IND_RESOURCE)); |
1382 |
|
} |
1383 |
|
|
1384 |
|
void r_get_resource(void) |
1385 |
|
{ |
1386 |
< |
current_cpu->get_resource(ReadMacInt32(XLM_R_GET_RESOURCE)); |
1386 |
> |
ppc_cpu->get_resource(ReadMacInt32(XLM_R_GET_RESOURCE)); |
1387 |
|
} |