1 |
|
/* |
2 |
|
* sheepshaver_glue.cpp - Glue Kheperix CPU to SheepShaver CPU engine interface |
3 |
|
* |
4 |
< |
* SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig |
4 |
> |
* SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig |
5 |
|
* |
6 |
|
* This program is free software; you can redistribute it and/or modify |
7 |
|
* it under the terms of the GNU General Public License as published by |
38 |
|
#include "name_registry.h" |
39 |
|
#include "serial.h" |
40 |
|
#include "ether.h" |
41 |
+ |
#include "timer.h" |
42 |
|
|
43 |
|
#include <stdio.h> |
44 |
+ |
#include <stdlib.h> |
45 |
|
|
46 |
|
#if ENABLE_MON |
47 |
|
#include "mon.h" |
75 |
|
#endif |
76 |
|
} |
77 |
|
|
78 |
+ |
// From main_*.cpp |
79 |
+ |
extern uintptr SignalStackBase(); |
80 |
+ |
|
81 |
+ |
// From rsrc_patches.cpp |
82 |
+ |
extern "C" void check_load_invoc(uint32 type, int16 id, uint32 h); |
83 |
+ |
|
84 |
|
// PowerPC EmulOp to exit from emulation looop |
85 |
|
const uint32 POWERPC_EXEC_RETURN = POWERPC_EMUL_OP | 1; |
86 |
|
|
87 |
|
// Enable multicore (main/interrupts) cpu emulation? |
88 |
|
#define MULTICORE_CPU (ASYNC_IRQ ? 1 : 0) |
89 |
|
|
90 |
+ |
// Enable interrupt routine safety checks? |
91 |
+ |
#define SAFE_INTERRUPT_PPC 1 |
92 |
+ |
|
93 |
|
// Enable Execute68k() safety checks? |
94 |
|
#define SAFE_EXEC_68K 1 |
95 |
|
|
102 |
|
// Interrupts in native mode? |
103 |
|
#define INTERRUPTS_IN_NATIVE_MODE 1 |
104 |
|
|
105 |
+ |
// Enable native EMUL_OPs to be run without a mode switch |
106 |
+ |
#define ENABLE_NATIVE_EMUL_OP 1 |
107 |
+ |
|
108 |
|
// Pointer to Kernel Data |
109 |
|
static KernelData * const kernel_data = (KernelData *)KERNEL_DATA_BASE; |
110 |
|
|
111 |
|
// SIGSEGV handler |
112 |
|
static sigsegv_return_t sigsegv_handler(sigsegv_address_t, sigsegv_address_t); |
113 |
|
|
114 |
+ |
#if PPC_ENABLE_JIT && PPC_REENTRANT_JIT |
115 |
+ |
// Special trampolines for EmulOp and NativeOp |
116 |
+ |
static uint8 *emul_op_trampoline; |
117 |
+ |
static uint8 *native_op_trampoline; |
118 |
+ |
#endif |
119 |
+ |
|
120 |
|
// JIT Compiler enabled? |
121 |
|
static inline bool enable_jit_p() |
122 |
|
{ |
139 |
|
void init_decoder(); |
140 |
|
void execute_sheep(uint32 opcode); |
141 |
|
|
142 |
+ |
// Filter out EMUL_OP routines that only call native code |
143 |
+ |
bool filter_execute_emul_op(uint32 emul_op); |
144 |
+ |
|
145 |
+ |
// "Native" EMUL_OP routines |
146 |
+ |
void execute_emul_op_microseconds(); |
147 |
+ |
void execute_emul_op_idle_time_1(); |
148 |
+ |
void execute_emul_op_idle_time_2(); |
149 |
+ |
|
150 |
|
public: |
151 |
|
|
152 |
|
// Constructor |
153 |
|
sheepshaver_cpu(); |
154 |
|
|
155 |
< |
// Condition Register accessors |
155 |
> |
// CR & XER accessors |
156 |
|
uint32 get_cr() const { return cr().get(); } |
157 |
|
void set_cr(uint32 v) { cr().set(v); } |
158 |
+ |
uint32 get_xer() const { return xer().get(); } |
159 |
+ |
void set_xer(uint32 v) { xer().set(v); } |
160 |
+ |
|
161 |
+ |
// Execute NATIVE_OP routine |
162 |
+ |
void execute_native_op(uint32 native_op); |
163 |
+ |
|
164 |
+ |
// Execute EMUL_OP routine |
165 |
+ |
void execute_emul_op(uint32 emul_op); |
166 |
|
|
167 |
|
// Execute 68k routine |
168 |
|
void execute_68k(uint32 entry, M68kRegisters *r); |
173 |
|
// Execute MacOS/PPC code |
174 |
|
uint32 execute_macos_code(uint32 tvect, int nargs, uint32 const *args); |
175 |
|
|
176 |
+ |
// Compile one instruction |
177 |
+ |
virtual int compile1(codegen_context_t & cg_context); |
178 |
+ |
|
179 |
|
// Resource manager thunk |
180 |
|
void get_resource(uint32 old_get_resource); |
181 |
|
|
183 |
|
void interrupt(uint32 entry); |
184 |
|
void handle_interrupt(); |
185 |
|
|
147 |
– |
// Lazy memory allocator (one item at a time) |
148 |
– |
void *operator new(size_t size) |
149 |
– |
{ return allocator_helper< sheepshaver_cpu, lazy_allocator >::allocate(); } |
150 |
– |
void operator delete(void *p) |
151 |
– |
{ allocator_helper< sheepshaver_cpu, lazy_allocator >::deallocate(p); } |
152 |
– |
// FIXME: really make surre array allocation fail at link time? |
153 |
– |
void *operator new[](size_t); |
154 |
– |
void operator delete[](void *p); |
155 |
– |
|
186 |
|
// Make sure the SIGSEGV handler can access CPU registers |
187 |
|
friend sigsegv_return_t sigsegv_handler(sigsegv_address_t, sigsegv_address_t); |
188 |
|
}; |
189 |
|
|
190 |
< |
lazy_allocator< sheepshaver_cpu > allocator_helper< sheepshaver_cpu, lazy_allocator >::allocator; |
190 |
> |
// Memory allocator returning areas aligned on 16-byte boundaries |
191 |
> |
void *operator new(size_t size) |
192 |
> |
{ |
193 |
> |
void *p; |
194 |
> |
|
195 |
> |
#if defined(HAVE_POSIX_MEMALIGN) |
196 |
> |
if (posix_memalign(&p, 16, size) != 0) |
197 |
> |
throw std::bad_alloc(); |
198 |
> |
#elif defined(HAVE_MEMALIGN) |
199 |
> |
p = memalign(16, size); |
200 |
> |
#elif defined(HAVE_VALLOC) |
201 |
> |
p = valloc(size); // page-aligned! |
202 |
> |
#else |
203 |
> |
/* XXX: handle padding ourselves */ |
204 |
> |
p = malloc(size); |
205 |
> |
#endif |
206 |
> |
|
207 |
> |
return p; |
208 |
> |
} |
209 |
> |
|
210 |
> |
void operator delete(void *p) |
211 |
> |
{ |
212 |
> |
#if defined(HAVE_MEMALIGN) || defined(HAVE_VALLOC) |
213 |
> |
#if defined(__GLIBC__) |
214 |
> |
// this is known to work only with GNU libc |
215 |
> |
free(p); |
216 |
> |
#endif |
217 |
> |
#else |
218 |
> |
free(p); |
219 |
> |
#endif |
220 |
> |
} |
221 |
|
|
222 |
|
sheepshaver_cpu::sheepshaver_cpu() |
223 |
|
: powerpc_cpu(enable_jit_p()) |
227 |
|
|
228 |
|
void sheepshaver_cpu::init_decoder() |
229 |
|
{ |
170 |
– |
#ifndef PPC_NO_STATIC_II_INDEX_TABLE |
171 |
– |
static bool initialized = false; |
172 |
– |
if (initialized) |
173 |
– |
return; |
174 |
– |
initialized = true; |
175 |
– |
#endif |
176 |
– |
|
230 |
|
static const instr_info_t sheep_ii_table[] = { |
231 |
|
{ "sheep", |
232 |
|
(execute_pmf)&sheepshaver_cpu::execute_sheep, |
245 |
|
} |
246 |
|
} |
247 |
|
|
195 |
– |
// Forward declaration for native opcode handler |
196 |
– |
static void NativeOp(int selector); |
197 |
– |
|
248 |
|
/* NativeOp instruction format: |
249 |
< |
+------------+--------------------------+--+----------+------------+ |
250 |
< |
| 6 | |FN| OP | 2 | |
251 |
< |
+------------+--------------------------+--+----------+------------+ |
252 |
< |
0 5 |6 19 20 21 25 26 31 |
249 |
> |
+------------+-------------------------+--+-----------+------------+ |
250 |
> |
| 6 | |FN| OP | 2 | |
251 |
> |
+------------+-------------------------+--+-----------+------------+ |
252 |
> |
0 5 |6 18 19 20 25 26 31 |
253 |
|
*/ |
254 |
|
|
255 |
< |
typedef bit_field< 20, 20 > FN_field; |
256 |
< |
typedef bit_field< 21, 25 > NATIVE_OP_field; |
255 |
> |
typedef bit_field< 19, 19 > FN_field; |
256 |
> |
typedef bit_field< 20, 25 > NATIVE_OP_field; |
257 |
|
typedef bit_field< 26, 31 > EMUL_OP_field; |
258 |
|
|
259 |
+ |
// "Native" EMUL_OP routines |
260 |
+ |
#define GPR_A(REG) gpr(16 + (REG)) |
261 |
+ |
#define GPR_D(REG) gpr( 8 + (REG)) |
262 |
+ |
|
263 |
+ |
void sheepshaver_cpu::execute_emul_op_microseconds() |
264 |
+ |
{ |
265 |
+ |
Microseconds(GPR_A(0), GPR_D(0)); |
266 |
+ |
} |
267 |
+ |
|
268 |
+ |
void sheepshaver_cpu::execute_emul_op_idle_time_1() |
269 |
+ |
{ |
270 |
+ |
// Sleep if no events pending |
271 |
+ |
if (ReadMacInt32(0x14c) == 0) |
272 |
+ |
Delay_usec(16667); |
273 |
+ |
GPR_A(0) = ReadMacInt32(0x2b6); |
274 |
+ |
} |
275 |
+ |
|
276 |
+ |
void sheepshaver_cpu::execute_emul_op_idle_time_2() |
277 |
+ |
{ |
278 |
+ |
// Sleep if no events pending |
279 |
+ |
if (ReadMacInt32(0x14c) == 0) |
280 |
+ |
Delay_usec(16667); |
281 |
+ |
GPR_D(0) = (uint32)-2; |
282 |
+ |
} |
283 |
+ |
|
284 |
+ |
// Filter out EMUL_OP routines that only call native code |
285 |
+ |
bool sheepshaver_cpu::filter_execute_emul_op(uint32 emul_op) |
286 |
+ |
{ |
287 |
+ |
switch (emul_op) { |
288 |
+ |
case OP_MICROSECONDS: |
289 |
+ |
execute_emul_op_microseconds(); |
290 |
+ |
return true; |
291 |
+ |
case OP_IDLE_TIME: |
292 |
+ |
execute_emul_op_idle_time_1(); |
293 |
+ |
return true; |
294 |
+ |
case OP_IDLE_TIME_2: |
295 |
+ |
execute_emul_op_idle_time_2(); |
296 |
+ |
return true; |
297 |
+ |
} |
298 |
+ |
return false; |
299 |
+ |
} |
300 |
+ |
|
301 |
+ |
// Execute EMUL_OP routine |
302 |
+ |
void sheepshaver_cpu::execute_emul_op(uint32 emul_op) |
303 |
+ |
{ |
304 |
+ |
#if ENABLE_NATIVE_EMUL_OP |
305 |
+ |
// First, filter out EMUL_OPs that can be executed without a mode switch |
306 |
+ |
if (filter_execute_emul_op(emul_op)) |
307 |
+ |
return; |
308 |
+ |
#endif |
309 |
+ |
|
310 |
+ |
M68kRegisters r68; |
311 |
+ |
WriteMacInt32(XLM_68K_R25, gpr(25)); |
312 |
+ |
WriteMacInt32(XLM_RUN_MODE, MODE_EMUL_OP); |
313 |
+ |
for (int i = 0; i < 8; i++) |
314 |
+ |
r68.d[i] = gpr(8 + i); |
315 |
+ |
for (int i = 0; i < 7; i++) |
316 |
+ |
r68.a[i] = gpr(16 + i); |
317 |
+ |
r68.a[7] = gpr(1); |
318 |
+ |
uint32 saved_cr = get_cr() & CR_field<2>::mask(); |
319 |
+ |
uint32 saved_xer = get_xer(); |
320 |
+ |
EmulOp(&r68, gpr(24), emul_op); |
321 |
+ |
set_cr(saved_cr); |
322 |
+ |
set_xer(saved_xer); |
323 |
+ |
for (int i = 0; i < 8; i++) |
324 |
+ |
gpr(8 + i) = r68.d[i]; |
325 |
+ |
for (int i = 0; i < 7; i++) |
326 |
+ |
gpr(16 + i) = r68.a[i]; |
327 |
+ |
gpr(1) = r68.a[7]; |
328 |
+ |
WriteMacInt32(XLM_RUN_MODE, MODE_68K); |
329 |
+ |
} |
330 |
+ |
|
331 |
|
// Execute SheepShaver instruction |
332 |
|
void sheepshaver_cpu::execute_sheep(uint32 opcode) |
333 |
|
{ |
344 |
|
break; |
345 |
|
|
346 |
|
case 2: // EXEC_NATIVE |
347 |
< |
NativeOp(NATIVE_OP_field::extract(opcode)); |
347 |
> |
execute_native_op(NATIVE_OP_field::extract(opcode)); |
348 |
|
if (FN_field::test(opcode)) |
349 |
|
pc() = lr(); |
350 |
|
else |
351 |
|
pc() += 4; |
352 |
|
break; |
353 |
|
|
354 |
< |
default: { // EMUL_OP |
355 |
< |
M68kRegisters r68; |
234 |
< |
WriteMacInt32(XLM_68K_R25, gpr(25)); |
235 |
< |
WriteMacInt32(XLM_RUN_MODE, MODE_EMUL_OP); |
236 |
< |
for (int i = 0; i < 8; i++) |
237 |
< |
r68.d[i] = gpr(8 + i); |
238 |
< |
for (int i = 0; i < 7; i++) |
239 |
< |
r68.a[i] = gpr(16 + i); |
240 |
< |
r68.a[7] = gpr(1); |
241 |
< |
EmulOp(&r68, gpr(24), EMUL_OP_field::extract(opcode) - 3); |
242 |
< |
for (int i = 0; i < 8; i++) |
243 |
< |
gpr(8 + i) = r68.d[i]; |
244 |
< |
for (int i = 0; i < 7; i++) |
245 |
< |
gpr(16 + i) = r68.a[i]; |
246 |
< |
gpr(1) = r68.a[7]; |
247 |
< |
WriteMacInt32(XLM_RUN_MODE, MODE_68K); |
354 |
> |
default: // EMUL_OP |
355 |
> |
execute_emul_op(EMUL_OP_field::extract(opcode) - 3); |
356 |
|
pc() += 4; |
357 |
|
break; |
358 |
|
} |
359 |
+ |
} |
360 |
+ |
|
361 |
+ |
// Compile one instruction |
362 |
+ |
int sheepshaver_cpu::compile1(codegen_context_t & cg_context) |
363 |
+ |
{ |
364 |
+ |
#if PPC_ENABLE_JIT |
365 |
+ |
const instr_info_t *ii = cg_context.instr_info; |
366 |
+ |
if (ii->mnemo != PPC_I(SHEEP)) |
367 |
+ |
return COMPILE_FAILURE; |
368 |
+ |
|
369 |
+ |
int status = COMPILE_FAILURE; |
370 |
+ |
powerpc_dyngen & dg = cg_context.codegen; |
371 |
+ |
uint32 opcode = cg_context.opcode; |
372 |
+ |
|
373 |
+ |
switch (opcode & 0x3f) { |
374 |
+ |
case 0: // EMUL_RETURN |
375 |
+ |
dg.gen_invoke(QuitEmulator); |
376 |
+ |
status = COMPILE_CODE_OK; |
377 |
+ |
break; |
378 |
+ |
|
379 |
+ |
case 1: // EXEC_RETURN |
380 |
+ |
dg.gen_spcflags_set(SPCFLAG_CPU_EXEC_RETURN); |
381 |
+ |
// Don't check for pending interrupts, we do know we have to |
382 |
+ |
// get out of this block ASAP |
383 |
+ |
dg.gen_exec_return(); |
384 |
+ |
status = COMPILE_EPILOGUE_OK; |
385 |
+ |
break; |
386 |
+ |
|
387 |
+ |
case 2: { // EXEC_NATIVE |
388 |
+ |
uint32 selector = NATIVE_OP_field::extract(opcode); |
389 |
+ |
switch (selector) { |
390 |
+ |
#if !PPC_REENTRANT_JIT |
391 |
+ |
// Filter out functions that may invoke Execute68k() or |
392 |
+ |
// CallMacOS(), this would break reentrancy as they could |
393 |
+ |
// invalidate the translation cache and even overwrite |
394 |
+ |
// continuation code when we are done with them. |
395 |
+ |
case NATIVE_PATCH_NAME_REGISTRY: |
396 |
+ |
dg.gen_invoke(DoPatchNameRegistry); |
397 |
+ |
status = COMPILE_CODE_OK; |
398 |
+ |
break; |
399 |
+ |
case NATIVE_VIDEO_INSTALL_ACCEL: |
400 |
+ |
dg.gen_invoke(VideoInstallAccel); |
401 |
+ |
status = COMPILE_CODE_OK; |
402 |
+ |
break; |
403 |
+ |
case NATIVE_VIDEO_VBL: |
404 |
+ |
dg.gen_invoke(VideoVBL); |
405 |
+ |
status = COMPILE_CODE_OK; |
406 |
+ |
break; |
407 |
+ |
case NATIVE_GET_RESOURCE: |
408 |
+ |
case NATIVE_GET_1_RESOURCE: |
409 |
+ |
case NATIVE_GET_IND_RESOURCE: |
410 |
+ |
case NATIVE_GET_1_IND_RESOURCE: |
411 |
+ |
case NATIVE_R_GET_RESOURCE: { |
412 |
+ |
static const uint32 get_resource_ptr[] = { |
413 |
+ |
XLM_GET_RESOURCE, |
414 |
+ |
XLM_GET_1_RESOURCE, |
415 |
+ |
XLM_GET_IND_RESOURCE, |
416 |
+ |
XLM_GET_1_IND_RESOURCE, |
417 |
+ |
XLM_R_GET_RESOURCE |
418 |
+ |
}; |
419 |
+ |
uint32 old_get_resource = ReadMacInt32(get_resource_ptr[selector - NATIVE_GET_RESOURCE]); |
420 |
+ |
typedef void (*func_t)(dyngen_cpu_base, uint32); |
421 |
+ |
func_t func = (func_t)nv_mem_fun(&sheepshaver_cpu::get_resource).ptr(); |
422 |
+ |
dg.gen_invoke_CPU_im(func, old_get_resource); |
423 |
+ |
status = COMPILE_CODE_OK; |
424 |
+ |
break; |
425 |
+ |
} |
426 |
+ |
case NATIVE_CHECK_LOAD_INVOC: |
427 |
+ |
dg.gen_load_T0_GPR(3); |
428 |
+ |
dg.gen_load_T1_GPR(4); |
429 |
+ |
dg.gen_se_16_32_T1(); |
430 |
+ |
dg.gen_load_T2_GPR(5); |
431 |
+ |
dg.gen_invoke_T0_T1_T2((void (*)(uint32, uint32, uint32))check_load_invoc); |
432 |
+ |
status = COMPILE_CODE_OK; |
433 |
+ |
break; |
434 |
+ |
#endif |
435 |
+ |
case NATIVE_DISABLE_INTERRUPT: |
436 |
+ |
dg.gen_invoke(DisableInterrupt); |
437 |
+ |
status = COMPILE_CODE_OK; |
438 |
+ |
break; |
439 |
+ |
case NATIVE_ENABLE_INTERRUPT: |
440 |
+ |
dg.gen_invoke(EnableInterrupt); |
441 |
+ |
status = COMPILE_CODE_OK; |
442 |
+ |
break; |
443 |
+ |
case NATIVE_BITBLT: |
444 |
+ |
dg.gen_load_T0_GPR(3); |
445 |
+ |
dg.gen_invoke_T0((void (*)(uint32))NQD_bitblt); |
446 |
+ |
status = COMPILE_CODE_OK; |
447 |
+ |
break; |
448 |
+ |
case NATIVE_INVRECT: |
449 |
+ |
dg.gen_load_T0_GPR(3); |
450 |
+ |
dg.gen_invoke_T0((void (*)(uint32))NQD_invrect); |
451 |
+ |
status = COMPILE_CODE_OK; |
452 |
+ |
break; |
453 |
+ |
case NATIVE_FILLRECT: |
454 |
+ |
dg.gen_load_T0_GPR(3); |
455 |
+ |
dg.gen_invoke_T0((void (*)(uint32))NQD_fillrect); |
456 |
+ |
status = COMPILE_CODE_OK; |
457 |
+ |
break; |
458 |
+ |
} |
459 |
+ |
// Could we fully translate this NativeOp? |
460 |
+ |
if (FN_field::test(opcode)) { |
461 |
+ |
if (status != COMPILE_FAILURE) { |
462 |
+ |
dg.gen_load_A0_LR(); |
463 |
+ |
dg.gen_set_PC_A0(); |
464 |
+ |
} |
465 |
+ |
cg_context.done_compile = true; |
466 |
+ |
break; |
467 |
+ |
} |
468 |
+ |
else if (status != COMPILE_FAILURE) { |
469 |
+ |
cg_context.done_compile = false; |
470 |
+ |
break; |
471 |
+ |
} |
472 |
+ |
#if PPC_REENTRANT_JIT |
473 |
+ |
// Try to execute NativeOp trampoline |
474 |
+ |
dg.gen_set_PC_im(cg_context.pc + 4); |
475 |
+ |
dg.gen_mov_32_T0_im(selector); |
476 |
+ |
dg.gen_jmp(native_op_trampoline); |
477 |
+ |
cg_context.done_compile = true; |
478 |
+ |
status = COMPILE_EPILOGUE_OK; |
479 |
+ |
break; |
480 |
+ |
#endif |
481 |
+ |
// Invoke NativeOp handler |
482 |
+ |
typedef void (*func_t)(dyngen_cpu_base, uint32); |
483 |
+ |
func_t func = (func_t)nv_mem_fun(&sheepshaver_cpu::execute_native_op).ptr(); |
484 |
+ |
dg.gen_invoke_CPU_im(func, selector); |
485 |
+ |
cg_context.done_compile = false; |
486 |
+ |
status = COMPILE_CODE_OK; |
487 |
+ |
break; |
488 |
+ |
} |
489 |
+ |
|
490 |
+ |
default: { // EMUL_OP |
491 |
+ |
uint32 emul_op = EMUL_OP_field::extract(opcode) - 3; |
492 |
+ |
#if ENABLE_NATIVE_EMUL_OP |
493 |
+ |
typedef void (*emul_op_func_t)(dyngen_cpu_base); |
494 |
+ |
emul_op_func_t emul_op_func = 0; |
495 |
+ |
switch (emul_op) { |
496 |
+ |
case OP_MICROSECONDS: |
497 |
+ |
emul_op_func = (emul_op_func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op_microseconds).ptr(); |
498 |
+ |
break; |
499 |
+ |
case OP_IDLE_TIME: |
500 |
+ |
emul_op_func = (emul_op_func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op_idle_time_1).ptr(); |
501 |
+ |
break; |
502 |
+ |
case OP_IDLE_TIME_2: |
503 |
+ |
emul_op_func = (emul_op_func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op_idle_time_2).ptr(); |
504 |
+ |
break; |
505 |
+ |
} |
506 |
+ |
if (emul_op_func) { |
507 |
+ |
dg.gen_invoke_CPU(emul_op_func); |
508 |
+ |
cg_context.done_compile = false; |
509 |
+ |
status = COMPILE_CODE_OK; |
510 |
+ |
break; |
511 |
+ |
} |
512 |
+ |
#endif |
513 |
+ |
#if PPC_REENTRANT_JIT |
514 |
+ |
// Try to execute EmulOp trampoline |
515 |
+ |
dg.gen_set_PC_im(cg_context.pc + 4); |
516 |
+ |
dg.gen_mov_32_T0_im(emul_op); |
517 |
+ |
dg.gen_jmp(emul_op_trampoline); |
518 |
+ |
cg_context.done_compile = true; |
519 |
+ |
status = COMPILE_EPILOGUE_OK; |
520 |
+ |
break; |
521 |
+ |
#endif |
522 |
+ |
// Invoke EmulOp handler |
523 |
+ |
typedef void (*func_t)(dyngen_cpu_base, uint32); |
524 |
+ |
func_t func = (func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op).ptr(); |
525 |
+ |
dg.gen_invoke_CPU_im(func, emul_op); |
526 |
+ |
cg_context.done_compile = false; |
527 |
+ |
status = COMPILE_CODE_OK; |
528 |
+ |
break; |
529 |
|
} |
530 |
+ |
} |
531 |
+ |
return status; |
532 |
+ |
#endif |
533 |
+ |
return COMPILE_FAILURE; |
534 |
|
} |
535 |
|
|
536 |
|
// Handle MacOS interrupt |
541 |
|
const clock_t interrupt_start = clock(); |
542 |
|
#endif |
543 |
|
|
544 |
+ |
#if SAFE_INTERRUPT_PPC |
545 |
+ |
static int depth = 0; |
546 |
+ |
if (depth != 0) |
547 |
+ |
printf("FATAL: sheepshaver_cpu::interrupt() called more than once: %d\n", depth); |
548 |
+ |
depth++; |
549 |
+ |
#endif |
550 |
+ |
#if SAFE_INTERRUPT_PPC >= 2 |
551 |
+ |
uint32 saved_regs[32]; |
552 |
+ |
memcpy(&saved_regs[0], &gpr(0), sizeof(saved_regs)); |
553 |
+ |
#endif |
554 |
+ |
|
555 |
|
#if !MULTICORE_CPU |
556 |
|
// Save program counters and branch registers |
557 |
|
uint32 saved_pc = pc(); |
561 |
|
#endif |
562 |
|
|
563 |
|
// Initialize stack pointer to SheepShaver alternate stack base |
564 |
< |
SheepArray<64> stack_area; |
272 |
< |
gpr(1) = stack_area.addr(); |
564 |
> |
gpr(1) = SignalStackBase() - 64; |
565 |
|
|
566 |
|
// Build trampoline to return from interrupt |
567 |
|
SheepVar32 trampoline = POWERPC_EXEC_RETURN; |
609 |
|
#if EMUL_TIME_STATS |
610 |
|
interrupt_time += (clock() - interrupt_start); |
611 |
|
#endif |
612 |
+ |
|
613 |
+ |
#if SAFE_INTERRUPT_PPC >= 2 |
614 |
+ |
if (memcmp(&saved_regs[0], &gpr(0), sizeof(saved_regs)) != 0) |
615 |
+ |
printf("FATAL: dirty PowerPC registers\n"); |
616 |
+ |
#endif |
617 |
+ |
#if SAFE_INTERRUPT_PPC |
618 |
+ |
depth--; |
619 |
+ |
#endif |
620 |
|
} |
621 |
|
|
622 |
|
// Execute 68k routine |
785 |
|
} |
786 |
|
|
787 |
|
// Resource Manager thunk |
488 |
– |
extern "C" void check_load_invoc(uint32 type, int16 id, uint32 h); |
489 |
– |
|
788 |
|
inline void sheepshaver_cpu::get_resource(uint32 old_get_resource) |
789 |
|
{ |
790 |
|
uint32 type = gpr(3); |
894 |
|
else if (pc == ROM_BASE + 0x4a10a0 && (cpu->gpr(20) == 0xf3012002 || cpu->gpr(20) == 0xf3012000)) |
895 |
|
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
896 |
|
|
897 |
+ |
// Ignore writes to the zero page |
898 |
+ |
else if ((uint32)(addr - SheepMem::ZeroPage()) < (uint32)SheepMem::PageSize()) |
899 |
+ |
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
900 |
+ |
|
901 |
|
// Ignore all other faults, if requested |
902 |
|
if (PrefsFindBool("ignoresegv")) |
903 |
|
return SIGSEGV_RETURN_SKIP_INSTRUCTION; |
923 |
|
// Initialize main CPU emulator |
924 |
|
main_cpu = new sheepshaver_cpu(); |
925 |
|
main_cpu->set_register(powerpc_registers::GPR(3), any_register((uint32)ROM_BASE + 0x30d000)); |
926 |
+ |
main_cpu->set_register(powerpc_registers::GPR(4), any_register(KernelDataAddr + 0x1000)); |
927 |
|
WriteMacInt32(XLM_RUN_MODE, MODE_68K); |
928 |
|
|
929 |
|
#if MULTICORE_CPU |
981 |
|
#endif |
982 |
|
} |
983 |
|
|
984 |
+ |
#if PPC_ENABLE_JIT && PPC_REENTRANT_JIT |
985 |
+ |
// Initialize EmulOp trampolines |
986 |
+ |
void init_emul_op_trampolines(basic_dyngen & dg) |
987 |
+ |
{ |
988 |
+ |
typedef void (*func_t)(dyngen_cpu_base, uint32); |
989 |
+ |
func_t func; |
990 |
+ |
|
991 |
+ |
// EmulOp |
992 |
+ |
emul_op_trampoline = dg.gen_start(); |
993 |
+ |
func = (func_t)nv_mem_fun(&sheepshaver_cpu::execute_emul_op).ptr(); |
994 |
+ |
dg.gen_invoke_CPU_T0(func); |
995 |
+ |
dg.gen_exec_return(); |
996 |
+ |
dg.gen_end(); |
997 |
+ |
|
998 |
+ |
// NativeOp |
999 |
+ |
native_op_trampoline = dg.gen_start(); |
1000 |
+ |
func = (func_t)nv_mem_fun(&sheepshaver_cpu::execute_native_op).ptr(); |
1001 |
+ |
dg.gen_invoke_CPU_T0(func); |
1002 |
+ |
dg.gen_exec_return(); |
1003 |
+ |
dg.gen_end(); |
1004 |
+ |
|
1005 |
+ |
D(bug("EmulOp trampoline: %p\n", emul_op_trampoline)); |
1006 |
+ |
D(bug("NativeOp trampoline: %p\n", native_op_trampoline)); |
1007 |
+ |
} |
1008 |
+ |
#endif |
1009 |
+ |
|
1010 |
|
/* |
1011 |
|
* Emulation loop |
1012 |
|
*/ |
1014 |
|
void emul_ppc(uint32 entry) |
1015 |
|
{ |
1016 |
|
current_cpu = main_cpu; |
1017 |
< |
#if DEBUG |
1017 |
> |
#if 0 |
1018 |
|
current_cpu->start_log(); |
1019 |
|
#endif |
1020 |
|
// start emulation loop and enable code translation or caching |
1113 |
|
if (InterruptFlags & INTFLAG_VIA) { |
1114 |
|
ClearInterruptFlag(INTFLAG_VIA); |
1115 |
|
ADBInterrupt(); |
1116 |
< |
ExecutePPC(VideoVBL); |
1116 |
> |
ExecuteNative(NATIVE_VIDEO_VBL); |
1117 |
|
} |
1118 |
|
} |
1119 |
|
#endif |
1129 |
|
static void get_1_ind_resource(void); |
1130 |
|
static void r_get_resource(void); |
1131 |
|
|
1132 |
< |
#define GPR(REG) current_cpu->gpr(REG) |
1133 |
< |
|
805 |
< |
static void NativeOp(int selector) |
1132 |
> |
// Execute NATIVE_OP routine |
1133 |
> |
void sheepshaver_cpu::execute_native_op(uint32 selector) |
1134 |
|
{ |
1135 |
|
#if EMUL_TIME_STATS |
1136 |
|
native_exec_count++; |
1148 |
|
VideoVBL(); |
1149 |
|
break; |
1150 |
|
case NATIVE_VIDEO_DO_DRIVER_IO: |
1151 |
< |
GPR(3) = (int32)(int16)VideoDoDriverIO((void *)GPR(3), (void *)GPR(4), |
1152 |
< |
(void *)GPR(5), GPR(6), GPR(7)); |
1151 |
> |
gpr(3) = (int32)(int16)VideoDoDriverIO((void *)gpr(3), (void *)gpr(4), |
1152 |
> |
(void *)gpr(5), gpr(6), gpr(7)); |
1153 |
|
break; |
1154 |
|
#ifdef WORDS_BIGENDIAN |
1155 |
|
case NATIVE_ETHER_IRQ: |
1156 |
|
EtherIRQ(); |
1157 |
|
break; |
1158 |
|
case NATIVE_ETHER_INIT: |
1159 |
< |
GPR(3) = InitStreamModule((void *)GPR(3)); |
1159 |
> |
gpr(3) = InitStreamModule((void *)gpr(3)); |
1160 |
|
break; |
1161 |
|
case NATIVE_ETHER_TERM: |
1162 |
|
TerminateStreamModule(); |
1163 |
|
break; |
1164 |
|
case NATIVE_ETHER_OPEN: |
1165 |
< |
GPR(3) = ether_open((queue_t *)GPR(3), (void *)GPR(4), GPR(5), GPR(6), (void*)GPR(7)); |
1165 |
> |
gpr(3) = ether_open((queue_t *)gpr(3), (void *)gpr(4), gpr(5), gpr(6), (void*)gpr(7)); |
1166 |
|
break; |
1167 |
|
case NATIVE_ETHER_CLOSE: |
1168 |
< |
GPR(3) = ether_close((queue_t *)GPR(3), GPR(4), (void *)GPR(5)); |
1168 |
> |
gpr(3) = ether_close((queue_t *)gpr(3), gpr(4), (void *)gpr(5)); |
1169 |
|
break; |
1170 |
|
case NATIVE_ETHER_WPUT: |
1171 |
< |
GPR(3) = ether_wput((queue_t *)GPR(3), (mblk_t *)GPR(4)); |
1171 |
> |
gpr(3) = ether_wput((queue_t *)gpr(3), (mblk_t *)gpr(4)); |
1172 |
|
break; |
1173 |
|
case NATIVE_ETHER_RSRV: |
1174 |
< |
GPR(3) = ether_rsrv((queue_t *)GPR(3)); |
1174 |
> |
gpr(3) = ether_rsrv((queue_t *)gpr(3)); |
1175 |
|
break; |
1176 |
|
#else |
1177 |
|
case NATIVE_ETHER_INIT: |
1178 |
|
// FIXME: needs more complicated thunks |
1179 |
< |
GPR(3) = false; |
1179 |
> |
gpr(3) = false; |
1180 |
|
break; |
1181 |
|
#endif |
1182 |
+ |
case NATIVE_SYNC_HOOK: |
1183 |
+ |
gpr(3) = NQD_sync_hook(gpr(3)); |
1184 |
+ |
break; |
1185 |
+ |
case NATIVE_BITBLT_HOOK: |
1186 |
+ |
gpr(3) = NQD_bitblt_hook(gpr(3)); |
1187 |
+ |
break; |
1188 |
+ |
case NATIVE_BITBLT: |
1189 |
+ |
NQD_bitblt(gpr(3)); |
1190 |
+ |
break; |
1191 |
+ |
case NATIVE_FILLRECT_HOOK: |
1192 |
+ |
gpr(3) = NQD_fillrect_hook(gpr(3)); |
1193 |
+ |
break; |
1194 |
+ |
case NATIVE_INVRECT: |
1195 |
+ |
NQD_invrect(gpr(3)); |
1196 |
+ |
break; |
1197 |
+ |
case NATIVE_FILLRECT: |
1198 |
+ |
NQD_fillrect(gpr(3)); |
1199 |
+ |
break; |
1200 |
|
case NATIVE_SERIAL_NOTHING: |
1201 |
|
case NATIVE_SERIAL_OPEN: |
1202 |
|
case NATIVE_SERIAL_PRIME_IN: |
1214 |
|
SerialStatus, |
1215 |
|
SerialClose |
1216 |
|
}; |
1217 |
< |
GPR(3) = serial_callbacks[selector - NATIVE_SERIAL_NOTHING](GPR(3), GPR(4)); |
1217 |
> |
gpr(3) = serial_callbacks[selector - NATIVE_SERIAL_NOTHING](gpr(3), gpr(4)); |
1218 |
|
break; |
1219 |
|
} |
1220 |
|
case NATIVE_GET_RESOURCE: |
1224 |
|
case NATIVE_R_GET_RESOURCE: { |
1225 |
|
typedef void (*GetResourceCallback)(void); |
1226 |
|
static const GetResourceCallback get_resource_callbacks[] = { |
1227 |
< |
get_resource, |
1228 |
< |
get_1_resource, |
1229 |
< |
get_ind_resource, |
1230 |
< |
get_1_ind_resource, |
1231 |
< |
r_get_resource |
1227 |
> |
::get_resource, |
1228 |
> |
::get_1_resource, |
1229 |
> |
::get_ind_resource, |
1230 |
> |
::get_1_ind_resource, |
1231 |
> |
::r_get_resource |
1232 |
|
}; |
1233 |
|
get_resource_callbacks[selector - NATIVE_GET_RESOURCE](); |
1234 |
|
break; |
1240 |
|
EnableInterrupt(); |
1241 |
|
break; |
1242 |
|
case NATIVE_MAKE_EXECUTABLE: |
1243 |
< |
MakeExecutable(0, (void *)GPR(4), GPR(5)); |
1243 |
> |
MakeExecutable(0, (void *)gpr(4), gpr(5)); |
1244 |
> |
break; |
1245 |
> |
case NATIVE_CHECK_LOAD_INVOC: |
1246 |
> |
check_load_invoc(gpr(3), gpr(4), gpr(5)); |
1247 |
|
break; |
1248 |
|
default: |
1249 |
|
printf("FATAL: NATIVE_OP called with bogus selector %d\n", selector); |
1257 |
|
} |
1258 |
|
|
1259 |
|
/* |
911 |
– |
* Execute native subroutine (LR must contain return address) |
912 |
– |
*/ |
913 |
– |
|
914 |
– |
void ExecuteNative(int selector) |
915 |
– |
{ |
916 |
– |
SheepRoutineDescriptor desc(0, NativeTVECT(selector)); |
917 |
– |
M68kRegisters r; |
918 |
– |
Execute68k(desc.addr(), &r); |
919 |
– |
} |
920 |
– |
|
921 |
– |
/* |
1260 |
|
* Execute 68k subroutine (must be ended with EXEC_RETURN) |
1261 |
|
* This must only be called by the emul_thread when in EMUL_OP mode |
1262 |
|
* r->a[7] is unused, the routine runs on the caller's stack |