ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/SheepShaver/src/Unix/sysdeps.h
(Generate patch)

Comparing SheepShaver/src/Unix/sysdeps.h (file contents):
Revision 1.1 by cebix, 2002-02-04T16:58:13Z vs.
Revision 1.33 by gbeauche, 2004-06-15T21:37:22Z

# Line 1 | Line 1
1   /*
2   *  sysdeps.h - System dependent definitions for Linux
3   *
4 < *  SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig
4 > *  SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
# Line 44 | Line 44
44   #include <string.h>
45   #include <signal.h>
46  
47 + #ifdef HAVE_PTHREADS
48 + # include <pthread.h>
49 + #endif
50 +
51   #ifdef HAVE_FCNTL_H
52   # include <fcntl.h>
53   #endif
# Line 59 | Line 63
63   # endif
64   #endif
65  
66 < // Are we using a PPC emulator or the real thing?
67 < #ifdef __powerpc__
68 < #define EMULATED_PPC 0
69 < #else
70 < #define EMULATED_PPC 1
67 < #endif
66 > // Define for external components
67 > #define SHEEPSHAVER 1
68 >
69 > // Mac and host address space are the same
70 > #define REAL_ADDRESSING 1
71  
72   #define POWERPC_ROM 1
73  
74 + #if EMULATED_PPC
75 + // Mac ROM is write protected when banked memory is used
76 + #if REAL_ADDRESSING || DIRECT_ADDRESSING
77 + # define ROM_IS_WRITE_PROTECTED 0
78 + # define USE_SCRATCHMEM_SUBTERFUGE 1
79 + #else
80 + # define ROM_IS_WRITE_PROTECTED 1
81 + #endif
82 + // Configure PowerPC emulator
83 + #define PPC_REENTRANT_JIT 1
84 + #define PPC_CHECK_INTERRUPTS 1
85 + #define PPC_DECODE_CACHE 1
86 + #define PPC_FLIGHT_RECORDER 1
87 + #define PPC_PROFILE_COMPILE_TIME 0
88 + #define PPC_PROFILE_GENERIC_CALLS 0
89 + #define KPX_MAX_CPUS 1
90 + #if ENABLE_DYNGEN
91 + // Don't bother with predecode cache when using JIT
92 + #define PPC_ENABLE_JIT 1
93 + #undef  PPC_DECODE_CACHE
94 + #endif
95 + #if defined(__i386__)
96 + #define DYNGEN_ASM_OPTS 1
97 + #endif
98 + #else
99 + // Mac ROM is write protected
100 + #define ROM_IS_WRITE_PROTECTED 1
101 + #define USE_SCRATCHMEM_SUBTERFUGE 0
102 + #endif
103 +
104   // Data types
105   typedef unsigned char uint8;
106   typedef signed char int8;
# Line 92 | Line 125 | typedef long int32;
125   #if SIZEOF_LONG == 8
126   typedef unsigned long uint64;
127   typedef long int64;
128 + #define VAL64(a) (a ## l)
129 + #define UVAL64(a) (a ## ul)
130   #elif SIZEOF_LONG_LONG == 8
131   typedef unsigned long long uint64;
132   typedef long long int64;
133 + #define VAL64(a) (a ## LL)
134 + #define UVAL64(a) (a ## uLL)
135   #else
136   #error "No 8 byte type, you lose."
137   #endif
138 + #if SIZEOF_VOID_P == 4
139 + typedef uint32 uintptr;
140 + typedef int32 intptr;
141 + #elif SIZEOF_VOID_P == 8
142 + typedef uint64 uintptr;
143 + typedef int64 intptr;
144 + #else
145 + #error "Unsupported size of pointer"
146 + #endif
147 +
148 + /**
149 + *              Helper functions to byteswap data
150 + **/
151 +
152 + #if defined(__GNUC__)
153 + #if defined(__x86_64__) || defined(__i386__)
154 + // Linux/AMD64 currently has no asm optimized bswap_32() in <byteswap.h>
155 + #define opt_bswap_32 do_opt_bswap_32
156 + static inline uint32 do_opt_bswap_32(uint32 x)
157 + {
158 +  uint32 v;
159 +  __asm__ __volatile__ ("bswap %0" : "=r" (v) : "0" (x));
160 +  return v;
161 + }
162 + #endif
163 + #endif
164 +
165 + #ifdef HAVE_BYTESWAP_H
166 + #include <byteswap.h>
167 + #endif
168 +
169 + #ifdef  opt_bswap_16
170 + #undef  bswap_16
171 + #define bswap_16 opt_bswap_16
172 + #endif
173 + #ifndef bswap_16
174 + #define bswap_16 generic_bswap_16
175 + #endif
176 +
177 + static inline uint16 generic_bswap_16(uint16 x)
178 + {
179 +  return ((x & 0xff) << 8) | ((x >> 8) & 0xff);
180 + }
181 +
182 + #ifdef  opt_bswap_32
183 + #undef  bswap_32
184 + #define bswap_32 opt_bswap_32
185 + #endif
186 + #ifndef bswap_32
187 + #define bswap_32 generic_bswap_32
188 + #endif
189 +
190 + static inline uint32 generic_bswap_32(uint32 x)
191 + {
192 +  return (((x & 0xff000000) >> 24) |
193 +                  ((x & 0x00ff0000) >>  8) |
194 +                  ((x & 0x0000ff00) <<  8) |
195 +                  ((x & 0x000000ff) << 24) );
196 + }
197 +
198 + #if defined(__i386__)
199 + #define opt_bswap_64 do_opt_bswap_64
200 + static inline uint64 do_opt_bswap_64(uint64 x)
201 + {
202 +  return (bswap_32(x >> 32) | (((uint64)bswap_32((uint32)x)) << 32));
203 + }
204 + #endif
205 +
206 + #ifdef  opt_bswap_64
207 + #undef  bswap_64
208 + #define bswap_64 opt_bswap_64
209 + #endif
210 + #ifndef bswap_64
211 + #define bswap_64 generic_bswap_64
212 + #endif
213 +
214 + static inline uint64 generic_bswap_64(uint64 x)
215 + {
216 +  return (((x & UVAL64(0xff00000000000000)) >> 56) |
217 +                  ((x & UVAL64(0x00ff000000000000)) >> 40) |
218 +                  ((x & UVAL64(0x0000ff0000000000)) >> 24) |
219 +                  ((x & UVAL64(0x000000ff00000000)) >>  8) |
220 +                  ((x & UVAL64(0x00000000ff000000)) <<  8) |
221 +                  ((x & UVAL64(0x0000000000ff0000)) << 24) |
222 +                  ((x & UVAL64(0x000000000000ff00)) << 40) |
223 +                  ((x & UVAL64(0x00000000000000ff)) << 56) );
224 + }
225 +
226 + #ifdef WORDS_BIGENDIAN
227 + static inline uint16 tswap16(uint16 x) { return x; }
228 + static inline uint32 tswap32(uint32 x) { return x; }
229 + static inline uint64 tswap64(uint64 x) { return x; }
230 + #else
231 + static inline uint16 tswap16(uint16 x) { return bswap_16(x); }
232 + static inline uint32 tswap32(uint32 x) { return bswap_32(x); }
233 + static inline uint64 tswap64(uint64 x) { return bswap_64(x); }
234 + #endif
235 +
236 + // spin locks
237 + #ifdef __GNUC__
238 +
239 + #if defined(__powerpc__) || defined(__ppc__)
240 + #define HAVE_TEST_AND_SET 1
241 + static inline int testandset(volatile int *p)
242 + {
243 +        int ret;
244 +        __asm__ __volatile__("0:    lwarx       %0,0,%1\n"
245 +                                                 "      xor.    %0,%3,%0\n"
246 +                                                 "      bne             1f\n"
247 +                                                 "      stwcx.  %2,0,%1\n"
248 +                                                 "      bne-    0b\n"
249 +                                                 "1:    "
250 +                                                 : "=&r" (ret)
251 +                                                 : "r" (p), "r" (1), "r" (0)
252 +                                                 : "cr0", "memory");
253 +        return ret;
254 + }
255 + #endif
256 +
257 + /* FIXME: SheepShaver occasionnally hangs with those locks */
258 + #if 0 && (defined(__i386__) || defined(__x86_64__))
259 + #define HAVE_TEST_AND_SET 1
260 + static inline int testandset(volatile int *p)
261 + {
262 +        long int ret;
263 +        /* Note: the "xchg" instruction does not need a "lock" prefix */
264 +        __asm__ __volatile__("xchgl %k0, %1"
265 +                                                 : "=r" (ret), "=m" (*p)
266 +                                                 : "0" (1), "m" (*p)
267 +                                                 : "memory");
268 +        return ret;
269 + }
270 + #endif
271 +
272 + #ifdef __s390__
273 + #define HAVE_TEST_AND_SET 1
274 + static inline int testandset(volatile int *p)
275 + {
276 +        int ret;
277 +
278 +        __asm__ __volatile__("0: cs    %0,%1,0(%2)\n"
279 +                                                 "   jl    0b"
280 +                                                 : "=&d" (ret)
281 +                                                 : "r" (1), "a" (p), "0" (*p)
282 +                                                 : "cc", "memory" );
283 +        return ret;
284 + }
285 + #endif
286 +
287 + #ifdef __alpha__
288 + #define HAVE_TEST_AND_SET 1
289 + static inline int testandset(volatile int *p)
290 + {
291 +        int ret;
292 +        unsigned long one;
293 +
294 +        __asm__ __volatile__("0:        mov 1,%2\n"
295 +                                                 "      ldl_l %0,%1\n"
296 +                                                 "      stl_c %2,%1\n"
297 +                                                 "      beq %2,1f\n"
298 +                                                 ".subsection 2\n"
299 +                                                 "1:    br 0b\n"
300 +                                                 ".previous"
301 +                                                 : "=r" (ret), "=m" (*p), "=r" (one)
302 +                                                 : "m" (*p));
303 +        return ret;
304 + }
305 + #endif
306 +
307 + #ifdef __sparc__
308 + #define HAVE_TEST_AND_SET 1
309 + static inline int testandset(volatile int *p)
310 + {
311 +        int ret;
312 +
313 +        __asm__ __volatile__("ldstub    [%1], %0"
314 +                                                 : "=r" (ret)
315 +                                                 : "r" (p)
316 +                                                 : "memory");
317 +
318 +        return (ret ? 1 : 0);
319 + }
320 + #endif
321 +
322 + #ifdef __arm__
323 + #define HAVE_TEST_AND_SET 1
324 + static inline int testandset(volatile int *p)
325 + {
326 +        register unsigned int ret;
327 +        __asm__ __volatile__("swp %0, %1, [%2]"
328 +                                                 : "=r"(ret)
329 +                                                 : "0"(1), "r"(p));
330 +        
331 +        return ret;
332 + }
333 + #endif
334 +
335 + #endif /* __GNUC__ */
336 +
337 + typedef volatile int spinlock_t;
338 +
339 + static const spinlock_t SPIN_LOCK_UNLOCKED = 0;
340 +
341 + #if HAVE_TEST_AND_SET
342 + #define HAVE_SPINLOCKS 1
343 + static inline void spin_lock(spinlock_t *lock)
344 + {
345 +        while (testandset(lock));
346 + }
347 +
348 + static inline void spin_unlock(spinlock_t *lock)
349 + {
350 +        *lock = 0;
351 + }
352 +
353 + static inline int spin_trylock(spinlock_t *lock)
354 + {
355 +        return !testandset(lock);
356 + }
357 + #else
358 + static inline void spin_lock(spinlock_t *lock)
359 + {
360 + }
361 +
362 + static inline void spin_unlock(spinlock_t *lock)
363 + {
364 + }
365 +
366 + static inline int spin_trylock(spinlock_t *lock)
367 + {
368 +        return 1;
369 + }
370 + #endif
371  
372   // Time data type for Time Manager emulation
373   #ifdef HAVE_CLOCK_GETTIME
# Line 106 | Line 376 | typedef struct timespec tm_time_t;
376   typedef struct timeval tm_time_t;
377   #endif
378  
379 + // Timing functions
380 + extern uint64 GetTicks_usec(void);
381 + extern void Delay_usec(uint32 usec);
382 +
383 + #if defined(HAVE_PTHREADS) || (defined(__linux__) && defined(__powerpc__))
384 + // Setup pthread attributes
385 + extern void Set_pthread_attr(pthread_attr_t *attr, int priority);
386 + #endif
387 +
388   // Various definitions
389   typedef struct rgb_color {
390          uint8           red;
# Line 114 | Line 393 | typedef struct rgb_color {
393          uint8           alpha;
394   } rgb_color;
395  
396 + // X11 display fast locks
397 + #ifdef HAVE_SPINLOCKS
398 + #define X11_LOCK_TYPE spinlock_t
399 + #define X11_LOCK_INIT SPIN_LOCK_UNLOCKED
400 + #define XDisplayLock() spin_lock(&x_display_lock)
401 + #define XDisplayUnlock() spin_unlock(&x_display_lock)
402 + #elif defined(HAVE_PTHREADS)
403 + #define X11_LOCK_TYPE pthread_mutex_t
404 + #define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
405 + #define XDisplayLock() pthread_mutex_lock(&x_display_lock);
406 + #define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock);
407 + #else
408 + #define XDisplayLock()
409 + #define XDisplayUnlock()
410 + #endif
411 + #ifdef X11_LOCK_TYPE
412 + extern X11_LOCK_TYPE x_display_lock;
413 + #endif
414 +
415   // Macro for calling MacOS routines
416   #define CallMacOS(type, tvect) call_macos((uint32)tvect)
417   #define CallMacOS1(type, tvect, arg1) call_macos1((uint32)tvect, (uint32)arg1)
# Line 124 | Line 422 | typedef struct rgb_color {
422   #define CallMacOS6(type, tvect, arg1, arg2, arg3, arg4, arg5, arg6) call_macos6((uint32)tvect, (uint32)arg1, (uint32)arg2, (uint32)arg3, (uint32)arg4, (uint32)arg5, (uint32)arg6)
423   #define CallMacOS7(type, tvect, arg1, arg2, arg3, arg4, arg5, arg6, arg7) call_macos7((uint32)tvect, (uint32)arg1, (uint32)arg2, (uint32)arg3, (uint32)arg4, (uint32)arg5, (uint32)arg6, (uint32)arg7)
424  
425 < extern "C" uint32 call_macos(uint32 tvect);
426 < extern "C" uint32 call_macos1(uint32 tvect, uint32 arg1);
427 < extern "C" uint32 call_macos2(uint32 tvect, uint32 arg1, uint32 arg2);
428 < extern "C" uint32 call_macos3(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3);
429 < extern "C" uint32 call_macos4(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4);
430 < extern "C" uint32 call_macos5(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4, uint32 arg5);
431 < extern "C" uint32 call_macos6(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4, uint32 arg5, uint32 arg6);
432 < extern "C" uint32 call_macos7(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4, uint32 arg5, uint32 arg6, uint32 arg7);
425 > #ifdef __cplusplus
426 > extern "C" {
427 > #endif
428 > extern uint32 call_macos(uint32 tvect);
429 > extern uint32 call_macos1(uint32 tvect, uint32 arg1);
430 > extern uint32 call_macos2(uint32 tvect, uint32 arg1, uint32 arg2);
431 > extern uint32 call_macos3(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3);
432 > extern uint32 call_macos4(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4);
433 > extern uint32 call_macos5(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4, uint32 arg5);
434 > extern uint32 call_macos6(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4, uint32 arg5, uint32 arg6);
435 > extern uint32 call_macos7(uint32 tvect, uint32 arg1, uint32 arg2, uint32 arg3, uint32 arg4, uint32 arg5, uint32 arg6, uint32 arg7);
436 > #ifdef __cplusplus
437 > }
438 > #endif
439  
440   #endif

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines