ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/SheepShaver/src/Unix/sysdeps.h
(Generate patch)

Comparing SheepShaver/src/Unix/sysdeps.h (file contents):
Revision 1.11 by gbeauche, 2003-10-12T05:44:14Z vs.
Revision 1.23 by gbeauche, 2004-01-26T13:52:31Z

# Line 1 | Line 1
1   /*
2   *  sysdeps.h - System dependent definitions for Linux
3   *
4 < *  SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig
4 > *  SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
# Line 78 | Line 78
78   # define ROM_IS_WRITE_PROTECTED 1
79   #endif
80   // Configure PowerPC emulator
81 < #define PPC_NO_LAZY_PC_UPDATE 1
82 < #define PPC_NO_DECODE_CACHE 1
81 > #define PPC_CHECK_INTERRUPTS (ASYNC_IRQ ? 0 : 1)
82 > #define PPC_DECODE_CACHE 1
83   #define PPC_FLIGHT_RECORDER 1
84 + #define PPC_PROFILE_COMPILE_TIME 0
85 + #define PPC_PROFILE_GENERIC_CALLS 0
86 + #define KPX_MAX_CPUS 1
87   #else
88   // Mac ROM is write protected
89   #define ROM_IS_WRITE_PROTECTED 1
# Line 131 | Line 134 | typedef int64 intptr;
134   #error "Unsupported size of pointer"
135   #endif
136  
137 < // Helper functions to byteswap data
137 > /**
138 > *              Helper functions to byteswap data
139 > **/
140 >
141 > #if defined(__GNUC__)
142 > #if defined(__x86_64__) || defined(__i386__)
143 > // Linux/AMD64 currently has no asm optimized bswap_32() in <byteswap.h>
144 > #define opt_bswap_32 do_opt_bswap_32
145 > static inline uint32 do_opt_bswap_32(uint32 x)
146 > {
147 >  uint32 v;
148 >  __asm__ __volatile__ ("bswap %0" : "=r" (v) : "0" (x));
149 >  return v;
150 > }
151 > #endif
152 > #endif
153 >
154   #ifdef HAVE_BYTESWAP_H
155   #include <byteswap.h>
156   #endif
157  
158 + #ifdef  opt_bswap_16
159 + #undef  bswap_16
160 + #define bswap_16 opt_bswap_16
161 + #endif
162   #ifndef bswap_16
163   #define bswap_16 generic_bswap_16
164   #endif
# Line 145 | Line 168 | static inline uint16 generic_bswap_16(ui
168    return ((x & 0xff) << 8) | ((x >> 8) & 0xff);
169   }
170  
171 + #ifdef  opt_bswap_32
172 + #undef  bswap_32
173 + #define bswap_32 opt_bswap_32
174 + #endif
175   #ifndef bswap_32
176   #define bswap_32 generic_bswap_32
177   #endif
# Line 157 | Line 184 | static inline uint32 generic_bswap_32(ui
184                    ((x & 0x000000ff) << 24) );
185   }
186  
187 + #if defined(__i386__)
188 + #define opt_bswap_64 do_opt_bswap_64
189 + static inline uint64 do_opt_bswap_64(uint64 x)
190 + {
191 +  return (bswap_32(x >> 32) | (((uint64)bswap_32((uint32)x)) << 32));
192 + }
193 + #endif
194 +
195 + #ifdef  opt_bswap_64
196 + #undef  bswap_64
197 + #define bswap_64 opt_bswap_64
198 + #endif
199   #ifndef bswap_64
200   #define bswap_64 generic_bswap_64
201   #endif
# Line 186 | Line 225 | static inline uint64 tswap64(uint64 x) {
225   // spin locks
226   #ifdef __GNUC__
227  
228 < #ifdef __powerpc__
228 > #if defined(__powerpc__) || defined(__ppc__)
229   #define HAVE_TEST_AND_SET 1
230 < static inline int testandset(int *p)
230 > static inline int testandset(volatile int *p)
231   {
232          int ret;
233 <        __asm__ __volatile__("0:    lwarx %0,0,%1 ;"
234 <                                                 "      xor. %0,%3,%0;"
235 <                                                 "      bne 1f;"
236 <                                                 "      stwcx. %2,0,%1;"
237 <                                                 "      bne- 0b;"
233 >        __asm__ __volatile__("0:    lwarx       %0,0,%1\n"
234 >                                                 "      xor.    %0,%3,%0\n"
235 >                                                 "      bne             1f\n"
236 >                                                 "      stwcx.  %2,0,%1\n"
237 >                                                 "      bne-    0b\n"
238                                                   "1:    "
239                                                   : "=&r" (ret)
240                                                   : "r" (p), "r" (1), "r" (0)
# Line 206 | Line 245 | static inline int testandset(int *p)
245  
246   #ifdef __i386__
247   #define HAVE_TEST_AND_SET 1
248 < static inline int testandset(int *p)
248 > static inline int testandset(volatile int *p)
249   {
250 <        char ret;
250 >        int ret;
251          long int readval;
252 <        
253 <        __asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0"
254 <                                                 : "=q" (ret), "=m" (*p), "=a" (readval)
255 <                                                 : "r" (1), "m" (*p), "a" (0)
252 >        /* Note: the "xchg" instruction does not need a "lock" prefix */
253 >        __asm__ __volatile__("xchgl %0, %1"
254 >                                                 : "=r" (ret), "=m" (*p), "=a" (readval)
255 >                                                 : "0" (1), "m" (*p)
256                                                   : "memory");
257          return ret;
258   }
# Line 221 | Line 260 | static inline int testandset(int *p)
260  
261   #ifdef __s390__
262   #define HAVE_TEST_AND_SET 1
263 < static inline int testandset(int *p)
263 > static inline int testandset(volatile int *p)
264   {
265          int ret;
266  
# Line 236 | Line 275 | static inline int testandset(int *p)
275  
276   #ifdef __alpha__
277   #define HAVE_TEST_AND_SET 1
278 < static inline int testandset(int *p)
278 > static inline int testandset(volatile int *p)
279   {
280          int ret;
281          unsigned long one;
# Line 256 | Line 295 | static inline int testandset(int *p)
295  
296   #ifdef __sparc__
297   #define HAVE_TEST_AND_SET 1
298 < static inline int testandset(int *p)
298 > static inline int testandset(volatile int *p)
299   {
300          int ret;
301  
# Line 271 | Line 310 | static inline int testandset(int *p)
310  
311   #ifdef __arm__
312   #define HAVE_TEST_AND_SET 1
313 < static inline int testandset(int *p)
313 > static inline int testandset(volatile int *p)
314   {
315          register unsigned int ret;
316          __asm__ __volatile__("swp %0, %1, [%2]"
# Line 286 | Line 325 | static inline int testandset(int *p)
325  
326   #if HAVE_TEST_AND_SET
327   #define HAVE_SPINLOCKS 1
328 < typedef int spinlock_t;
328 > typedef volatile int spinlock_t;
329  
330   static const spinlock_t SPIN_LOCK_UNLOCKED = 0;
331  
# Line 313 | Line 352 | typedef struct timespec tm_time_t;
352   typedef struct timeval tm_time_t;
353   #endif
354  
355 + // Timing functions
356 + extern uint64 GetTicks_usec(void);
357 + extern void Delay_usec(uint32 usec);
358 +
359   // Setup pthread attributes
360   extern void Set_pthread_attr(pthread_attr_t *attr, int priority);
361  
# Line 324 | Line 367 | typedef struct rgb_color {
367          uint8           alpha;
368   } rgb_color;
369  
370 + // X11 display fast locks
371 + #ifdef HAVE_SPINLOCKS
372 + #define X11_LOCK_TYPE spinlock_t
373 + #define X11_LOCK_INIT SPIN_LOCK_UNLOCKED
374 + #define XDisplayLock() spin_lock(&x_display_lock)
375 + #define XDisplayUnlock() spin_unlock(&x_display_lock)
376 + #elif defined(HAVE_PTHREADS)
377 + #define X11_LOCK_TYPE pthread_mutex_t
378 + #define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
379 + #define XDisplayLock() pthread_mutex_lock(&x_display_lock);
380 + #define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock);
381 + #else
382 + #define XDisplayLock()
383 + #define XDisplayUnlock()
384 + #endif
385 + #ifdef X11_LOCK_TYPE
386 + extern X11_LOCK_TYPE x_display_lock;
387 + #endif
388 +
389   // Macro for calling MacOS routines
390   #define CallMacOS(type, tvect) call_macos((uint32)tvect)
391   #define CallMacOS1(type, tvect, arg1) call_macos1((uint32)tvect, (uint32)arg1)

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines