1 |
|
/* |
2 |
|
* sysdeps.h - System dependent definitions for Linux |
3 |
|
* |
4 |
< |
* SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig |
4 |
> |
* SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig |
5 |
|
* |
6 |
|
* This program is free software; you can redistribute it and/or modify |
7 |
|
* it under the terms of the GNU General Public License as published by |
44 |
|
#include <string.h> |
45 |
|
#include <signal.h> |
46 |
|
|
47 |
+ |
#ifdef HAVE_PTHREADS |
48 |
+ |
# include <pthread.h> |
49 |
+ |
#endif |
50 |
+ |
|
51 |
|
#ifdef HAVE_FCNTL_H |
52 |
|
# include <fcntl.h> |
53 |
|
#endif |
81 |
|
#else |
82 |
|
# define ROM_IS_WRITE_PROTECTED 1 |
83 |
|
#endif |
84 |
+ |
// Configure PowerPC emulator |
85 |
+ |
#define PPC_CHECK_INTERRUPTS (ASYNC_IRQ ? 0 : 1) |
86 |
+ |
#define PPC_DECODE_CACHE 1 |
87 |
+ |
#define PPC_FLIGHT_RECORDER 1 |
88 |
+ |
#define PPC_PROFILE_COMPILE_TIME 0 |
89 |
+ |
#define PPC_PROFILE_GENERIC_CALLS 0 |
90 |
+ |
#define KPX_MAX_CPUS 1 |
91 |
+ |
// direct block chaining is only tested on PPC right now |
92 |
+ |
#if defined(__powerpc__) |
93 |
+ |
#define DYNGEN_DIRECT_BLOCK_CHAINING 1 |
94 |
+ |
#endif |
95 |
|
#else |
96 |
|
// Mac ROM is write protected |
97 |
|
#define ROM_IS_WRITE_PROTECTED 1 |
142 |
|
#error "Unsupported size of pointer" |
143 |
|
#endif |
144 |
|
|
145 |
< |
// Helper functions to byteswap data |
145 |
> |
/** |
146 |
> |
* Helper functions to byteswap data |
147 |
> |
**/ |
148 |
> |
|
149 |
> |
#if defined(__GNUC__) |
150 |
> |
#if defined(__x86_64__) || defined(__i386__) |
151 |
> |
// Linux/AMD64 currently has no asm optimized bswap_32() in <byteswap.h> |
152 |
> |
#define opt_bswap_32 do_opt_bswap_32 |
153 |
> |
static inline uint32 do_opt_bswap_32(uint32 x) |
154 |
> |
{ |
155 |
> |
uint32 v; |
156 |
> |
__asm__ __volatile__ ("bswap %0" : "=r" (v) : "0" (x)); |
157 |
> |
return v; |
158 |
> |
} |
159 |
> |
#endif |
160 |
> |
#endif |
161 |
> |
|
162 |
|
#ifdef HAVE_BYTESWAP_H |
163 |
|
#include <byteswap.h> |
164 |
|
#endif |
165 |
|
|
166 |
+ |
#ifdef opt_bswap_16 |
167 |
+ |
#undef bswap_16 |
168 |
+ |
#define bswap_16 opt_bswap_16 |
169 |
+ |
#endif |
170 |
|
#ifndef bswap_16 |
171 |
|
#define bswap_16 generic_bswap_16 |
172 |
|
#endif |
176 |
|
return ((x & 0xff) << 8) | ((x >> 8) & 0xff); |
177 |
|
} |
178 |
|
|
179 |
+ |
#ifdef opt_bswap_32 |
180 |
+ |
#undef bswap_32 |
181 |
+ |
#define bswap_32 opt_bswap_32 |
182 |
+ |
#endif |
183 |
|
#ifndef bswap_32 |
184 |
|
#define bswap_32 generic_bswap_32 |
185 |
|
#endif |
192 |
|
((x & 0x000000ff) << 24) ); |
193 |
|
} |
194 |
|
|
195 |
+ |
#if defined(__i386__) |
196 |
+ |
#define opt_bswap_64 do_opt_bswap_64 |
197 |
+ |
static inline uint64 do_opt_bswap_64(uint64 x) |
198 |
+ |
{ |
199 |
+ |
return (bswap_32(x >> 32) | (((uint64)bswap_32((uint32)x)) << 32)); |
200 |
+ |
} |
201 |
+ |
#endif |
202 |
+ |
|
203 |
+ |
#ifdef opt_bswap_64 |
204 |
+ |
#undef bswap_64 |
205 |
+ |
#define bswap_64 opt_bswap_64 |
206 |
+ |
#endif |
207 |
|
#ifndef bswap_64 |
208 |
|
#define bswap_64 generic_bswap_64 |
209 |
|
#endif |
233 |
|
// spin locks |
234 |
|
#ifdef __GNUC__ |
235 |
|
|
236 |
< |
#ifdef __powerpc__ |
236 |
> |
#if defined(__powerpc__) || defined(__ppc__) |
237 |
|
#define HAVE_TEST_AND_SET 1 |
238 |
< |
static inline int testandset(int *p) |
238 |
> |
static inline int testandset(volatile int *p) |
239 |
|
{ |
240 |
|
int ret; |
241 |
< |
__asm__ __volatile__("0: lwarx %0,0,%1 ;" |
242 |
< |
" xor. %0,%3,%0;" |
243 |
< |
" bne 1f;" |
244 |
< |
" stwcx. %2,0,%1;" |
245 |
< |
" bne- 0b;" |
241 |
> |
__asm__ __volatile__("0: lwarx %0,0,%1\n" |
242 |
> |
" xor. %0,%3,%0\n" |
243 |
> |
" bne 1f\n" |
244 |
> |
" stwcx. %2,0,%1\n" |
245 |
> |
" bne- 0b\n" |
246 |
|
"1: " |
247 |
|
: "=&r" (ret) |
248 |
|
: "r" (p), "r" (1), "r" (0) |
253 |
|
|
254 |
|
#ifdef __i386__ |
255 |
|
#define HAVE_TEST_AND_SET 1 |
256 |
< |
static inline int testandset(int *p) |
256 |
> |
static inline int testandset(volatile int *p) |
257 |
|
{ |
258 |
< |
char ret; |
258 |
> |
int ret; |
259 |
|
long int readval; |
260 |
< |
|
261 |
< |
__asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0" |
262 |
< |
: "=q" (ret), "=m" (*p), "=a" (readval) |
263 |
< |
: "r" (1), "m" (*p), "a" (0) |
260 |
> |
/* Note: the "xchg" instruction does not need a "lock" prefix */ |
261 |
> |
__asm__ __volatile__("xchgl %0, %1" |
262 |
> |
: "=r" (ret), "=m" (*p), "=a" (readval) |
263 |
> |
: "0" (1), "m" (*p) |
264 |
|
: "memory"); |
265 |
|
return ret; |
266 |
|
} |
268 |
|
|
269 |
|
#ifdef __s390__ |
270 |
|
#define HAVE_TEST_AND_SET 1 |
271 |
< |
static inline int testandset(int *p) |
271 |
> |
static inline int testandset(volatile int *p) |
272 |
|
{ |
273 |
|
int ret; |
274 |
|
|
283 |
|
|
284 |
|
#ifdef __alpha__ |
285 |
|
#define HAVE_TEST_AND_SET 1 |
286 |
< |
static inline int testandset(int *p) |
286 |
> |
static inline int testandset(volatile int *p) |
287 |
|
{ |
288 |
|
int ret; |
289 |
|
unsigned long one; |
303 |
|
|
304 |
|
#ifdef __sparc__ |
305 |
|
#define HAVE_TEST_AND_SET 1 |
306 |
< |
static inline int testandset(int *p) |
306 |
> |
static inline int testandset(volatile int *p) |
307 |
|
{ |
308 |
|
int ret; |
309 |
|
|
318 |
|
|
319 |
|
#ifdef __arm__ |
320 |
|
#define HAVE_TEST_AND_SET 1 |
321 |
< |
static inline int testandset(int *p) |
321 |
> |
static inline int testandset(volatile int *p) |
322 |
|
{ |
323 |
|
register unsigned int ret; |
324 |
|
__asm__ __volatile__("swp %0, %1, [%2]" |
333 |
|
|
334 |
|
#if HAVE_TEST_AND_SET |
335 |
|
#define HAVE_SPINLOCKS 1 |
336 |
< |
typedef int spinlock_t; |
336 |
> |
typedef volatile int spinlock_t; |
337 |
|
|
338 |
< |
const spinlock_t SPIN_LOCK_UNLOCKED = 0; |
338 |
> |
static const spinlock_t SPIN_LOCK_UNLOCKED = 0; |
339 |
|
|
340 |
|
static inline void spin_lock(spinlock_t *lock) |
341 |
|
{ |
360 |
|
typedef struct timeval tm_time_t; |
361 |
|
#endif |
362 |
|
|
363 |
+ |
// Timing functions |
364 |
+ |
extern uint64 GetTicks_usec(void); |
365 |
+ |
extern void Delay_usec(uint32 usec); |
366 |
+ |
|
367 |
+ |
#if defined(HAVE_PTHREADS) || (defined(__linux__) && defined(__powerpc__)) |
368 |
|
// Setup pthread attributes |
369 |
|
extern void Set_pthread_attr(pthread_attr_t *attr, int priority); |
370 |
+ |
#endif |
371 |
|
|
372 |
|
// Various definitions |
373 |
|
typedef struct rgb_color { |
377 |
|
uint8 alpha; |
378 |
|
} rgb_color; |
379 |
|
|
380 |
+ |
// X11 display fast locks |
381 |
+ |
#ifdef HAVE_SPINLOCKS |
382 |
+ |
#define X11_LOCK_TYPE spinlock_t |
383 |
+ |
#define X11_LOCK_INIT SPIN_LOCK_UNLOCKED |
384 |
+ |
#define XDisplayLock() spin_lock(&x_display_lock) |
385 |
+ |
#define XDisplayUnlock() spin_unlock(&x_display_lock) |
386 |
+ |
#elif defined(HAVE_PTHREADS) |
387 |
+ |
#define X11_LOCK_TYPE pthread_mutex_t |
388 |
+ |
#define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER |
389 |
+ |
#define XDisplayLock() pthread_mutex_lock(&x_display_lock); |
390 |
+ |
#define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock); |
391 |
+ |
#else |
392 |
+ |
#define XDisplayLock() |
393 |
+ |
#define XDisplayUnlock() |
394 |
+ |
#endif |
395 |
+ |
#ifdef X11_LOCK_TYPE |
396 |
+ |
extern X11_LOCK_TYPE x_display_lock; |
397 |
+ |
#endif |
398 |
+ |
|
399 |
|
// Macro for calling MacOS routines |
400 |
|
#define CallMacOS(type, tvect) call_macos((uint32)tvect) |
401 |
|
#define CallMacOS1(type, tvect, arg1) call_macos1((uint32)tvect, (uint32)arg1) |