ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/SheepShaver/src/kpx_cpu/sheepshaver_glue.cpp
(Generate patch)

Comparing SheepShaver/src/kpx_cpu/sheepshaver_glue.cpp (file contents):
Revision 1.6 by gbeauche, 2003-10-11T09:33:27Z vs.
Revision 1.11 by gbeauche, 2003-10-26T14:16:39Z

# Line 44 | Line 44
44   #include "mon_disass.h"
45   #endif
46  
47 < #define DEBUG 1
47 > #define DEBUG 0
48   #include "debug.h"
49  
50   static void enter_mon(void)
# Line 57 | Line 57 | static void enter_mon(void)
57   }
58  
59   // Enable multicore (main/interrupts) cpu emulation?
60 < #define MULTICORE_CPU 0
60 > #define MULTICORE_CPU (ASYNC_IRQ ? 1 : 0)
61  
62   // Enable Execute68k() safety checks?
63   #define SAFE_EXEC_68K 1
# Line 89 | Line 89 | class sheepshaver_cpu
89  
90   public:
91  
92 <        sheepshaver_cpu()
93 <                : powerpc_cpu()
94 <                { init_decoder(); }
92 >        // Constructor
93 >        sheepshaver_cpu();
94  
95          // Condition Register accessors
96          uint32 get_cr() const           { return cr().get(); }
97          void set_cr(uint32 v)           { cr().set(v); }
98  
99          // Execution loop
100 <        void execute(uint32 pc);
100 >        void execute(uint32 entry, bool enable_cache = false);
101  
102          // Execute 68k routine
103          void execute_68k(uint32 entry, M68kRegisters *r);
# Line 114 | Line 113 | public:
113  
114          // Handle MacOS interrupt
115          void interrupt(uint32 entry);
116 +        void handle_interrupt();
117  
118          // spcflags for interrupts handling
119          static uint32 spcflags;
# Line 131 | Line 131 | public:
131   uint32 sheepshaver_cpu::spcflags = 0;
132   lazy_allocator< sheepshaver_cpu > allocator_helper< sheepshaver_cpu, lazy_allocator >::allocator;
133  
134 + sheepshaver_cpu::sheepshaver_cpu()
135 +        : powerpc_cpu()
136 + {
137 +        init_decoder();
138 + }
139 +
140   void sheepshaver_cpu::init_decoder()
141   {
142   #ifndef PPC_NO_STATIC_II_INDEX_TABLE
# Line 144 | Line 150 | void sheepshaver_cpu::init_decoder()
150                  { "sheep",
151                    (execute_fn)&sheepshaver_cpu::execute_sheep,
152                    NULL,
153 <                  D_form, 6, 0, CFLOW_TRAP
153 >                  D_form, 6, 0, CFLOW_JUMP | CFLOW_TRAP
154                  }
155          };
156  
# Line 181 | Line 187 | void sheepshaver_cpu::execute_sheep(uint
187          case 0:         // EMUL_RETURN
188                  QuitEmulator();
189                  break;
190 <                
190 >
191          case 1:         // EXEC_RETURN
192                  throw sheepshaver_exec_return();
193                  break;
# Line 216 | Line 222 | void sheepshaver_cpu::execute_sheep(uint
222          }
223   }
224  
219 // Checks for pending interrupts
220 struct execute_nothing {
221        static inline void execute(powerpc_cpu *) { }
222 };
223
224 struct execute_spcflags_check {
225        static inline void execute(powerpc_cpu *cpu) {
226 #if !ASYNC_IRQ
227                if (SPCFLAGS_TEST(SPCFLAG_ALL_BUT_EXEC_RETURN)) {
228                        if (SPCFLAGS_TEST( SPCFLAG_ENTER_MON )) {
229                                SPCFLAGS_CLEAR( SPCFLAG_ENTER_MON );
230                                enter_mon();
231                        }
232                        if (SPCFLAGS_TEST( SPCFLAG_DOINT )) {
233                                SPCFLAGS_CLEAR( SPCFLAG_DOINT );
234                                HandleInterrupt();
235                        }
236                        if (SPCFLAGS_TEST( SPCFLAG_INT )) {
237                                SPCFLAGS_CLEAR( SPCFLAG_INT );
238                                SPCFLAGS_SET( SPCFLAG_DOINT );
239                        }
240                }
241 #endif
242        }
243 };
244
225   // Execution loop
226 < void sheepshaver_cpu::execute(uint32 entry)
226 > void sheepshaver_cpu::execute(uint32 entry, bool enable_cache)
227   {
228          try {
229 <                pc() = entry;
250 <                powerpc_cpu::do_execute<execute_nothing, execute_spcflags_check>();
229 >                powerpc_cpu::execute(entry, enable_cache);
230          }
231          catch (sheepshaver_exec_return const &) {
232                  // Nothing, simply return
# Line 294 | Line 273 | void sheepshaver_cpu::interrupt(uint32 e
273          gpr(8)  = 0;
274          gpr(10) = (uint32)trampoline;
275          gpr(12) = (uint32)trampoline;
276 <        gpr(13) = cr().get();
276 >        gpr(13) = get_cr();
277  
278          // rlwimi. r7,r7,8,0,0
279          uint32 result = op_ppc_rlwimi::apply(gpr(7), 8, 0x80000000, gpr(7));
# Line 302 | Line 281 | void sheepshaver_cpu::interrupt(uint32 e
281          gpr(7) = result;
282  
283          gpr(11) = 0xf072; // MSR (SRR1)
284 <        cr().set((gpr(11) & 0x0fff0000) | (cr().get() & ~0x0fff0000));
284 >        cr().set((gpr(11) & 0x0fff0000) | (get_cr() & ~0x0fff0000));
285  
286          // Enter nanokernel
287          execute(entry);
# Line 328 | Line 307 | void sheepshaver_cpu::execute_68k(uint32
307          uint32 saved_pc = pc();
308          uint32 saved_lr = lr();
309          uint32 saved_ctr= ctr();
310 +        uint32 saved_cr = get_cr();
311  
312          // Create MacOS stack frame
313          // FIXME: make sure MacOS doesn't expect PPC registers to live on top
# Line 399 | Line 379 | void sheepshaver_cpu::execute_68k(uint32
379          pc() = saved_pc;
380          lr() = saved_lr;
381          ctr()= saved_ctr;
382 +        set_cr(saved_cr);
383   }
384  
385   // Call MacOS PPC code
# Line 492 | Line 473 | static sheepshaver_cpu *main_cpu = NULL;
473   static sheepshaver_cpu *interrupt_cpu = NULL;   // CPU emulator to handle interrupts
474   static sheepshaver_cpu *current_cpu = NULL;             // Current CPU emulator context
475  
476 + void FlushCodeCache(uintptr start, uintptr end)
477 + {
478 +        D(bug("FlushCodeCache(%08x, %08x)\n", start, end));
479 +        main_cpu->invalidate_cache_range(start, end);
480 + #if MULTICORE_CPU
481 +        interrupt_cpu->invalidate_cache_range(start, end);
482 + #endif
483 + }
484 +
485   static inline void cpu_push(sheepshaver_cpu *new_cpu)
486   {
487   #if MULTICORE_CPU
# Line 585 | Line 575 | void init_emul_ppc(void)
575   void emul_ppc(uint32 entry)
576   {
577          current_cpu = main_cpu;
578 + #if DEBUG
579          current_cpu->start_log();
580 <        current_cpu->execute(entry);
580 > #endif
581 >        // start emulation loop and enable code translation or caching
582 >        current_cpu->execute(entry, true);
583   }
584  
585   /*
586   *  Handle PowerPC interrupt
587   */
588  
589 < // Atomic operations
590 < extern int atomic_add(int *var, int v);
591 < extern int atomic_and(int *var, int v);
592 < extern int atomic_or(int *var, int v);
593 <
594 < #if !ASYNC_IRQ
589 > #if ASYNC_IRQ
590 > void HandleInterrupt(void)
591 > {
592 >        main_cpu->handle_interrupt();
593 > }
594 > #else
595   void TriggerInterrupt(void)
596   {
597   #if 0
598    WriteMacInt32(0x16a, ReadMacInt32(0x16a) + 1);
599   #else
600 <  SPCFLAGS_SET( SPCFLAG_INT );
600 >  // Trigger interrupt to main cpu only
601 >  if (main_cpu)
602 >          main_cpu->trigger_interrupt();
603   #endif
604   }
605   #endif
606  
607 < void HandleInterrupt(void)
607 > void sheepshaver_cpu::handle_interrupt(void)
608   {
609          // Do nothing if interrupts are disabled
610          if (int32(ReadMacInt32(XLM_IRQ_NEST)) > 0)
# Line 628 | Line 623 | void HandleInterrupt(void)
623                  // 68k emulator active, trigger 68k interrupt level 1
624                  assert(current_cpu == main_cpu);
625                  WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1);
626 <                main_cpu->set_cr(main_cpu->get_cr() | tswap32(kernel_data->v[0x674 >> 2]));
626 >                set_cr(get_cr() | tswap32(kernel_data->v[0x674 >> 2]));
627                  break;
628      
629   #if INTERRUPTS_IN_NATIVE_MODE
630          case MODE_NATIVE:
631                  // 68k emulator inactive, in nanokernel?
632                  assert(current_cpu == main_cpu);
633 <                if (main_cpu->gpr(1) != KernelDataAddr) {
633 >                if (gpr(1) != KernelDataAddr) {
634                          // Prepare for 68k interrupt level 1
635                          WriteMacInt16(tswap32(kernel_data->v[0x67c >> 2]), 1);
636                          WriteMacInt32(tswap32(kernel_data->v[0x658 >> 2]) + 0xdc,
# Line 723 | Line 718 | const uint32 NativeOpTable[NATIVE_OP_MAX
718          POWERPC_NATIVE_OP_INIT(1, NATIVE_R_GET_RESOURCE),
719          POWERPC_NATIVE_OP_INIT(0, NATIVE_DISABLE_INTERRUPT),
720          POWERPC_NATIVE_OP_INIT(0, NATIVE_ENABLE_INTERRUPT),
721 +        POWERPC_NATIVE_OP_INIT(1, NATIVE_MAKE_EXECUTABLE),
722   };
723  
724   static void get_resource(void);
# Line 790 | Line 786 | static void NativeOp(int selector)
786          case NATIVE_ENABLE_INTERRUPT:
787                  EnableInterrupt();
788                  break;
789 +        case NATIVE_MAKE_EXECUTABLE:
790 +                MakeExecutable(0, (void *)GPR(4), GPR(5));
791 +                break;
792          default:
793                  printf("FATAL: NATIVE_OP called with bogus selector %d\n", selector);
794                  QuitEmulator();
# Line 887 | Line 886 | uint32 call_macos7(uint32 tvect, uint32
886   }
887  
888   /*
890 *  Atomic operations
891 */
892
893 int atomic_add(int *var, int v)
894 {
895        int ret = *var;
896        *var += v;
897        return ret;
898 }
899
900 int atomic_and(int *var, int v)
901 {
902        int ret = *var;
903        *var &= v;
904        return ret;
905 }
906
907 int atomic_or(int *var, int v)
908 {
909        int ret = *var;
910        *var |= v;
911        return ret;
912 }
913
914 /*
889   *  Resource Manager thunks
890   */
891  

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines