ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/SheepShaver/src/Unix/sysdeps.h
(Generate patch)

Comparing SheepShaver/src/Unix/sysdeps.h (file contents):
Revision 1.18 by gbeauche, 2003-12-28T17:56:03Z vs.
Revision 1.25 by gbeauche, 2004-02-25T22:02:59Z

# Line 1 | Line 1
1   /*
2   *  sysdeps.h - System dependent definitions for Linux
3   *
4 < *  SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig
4 > *  SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
# Line 44 | Line 44
44   #include <string.h>
45   #include <signal.h>
46  
47 + #ifdef HAVE_PTHREADS
48 + # include <pthread.h>
49 + #endif
50 +
51   #ifdef HAVE_FCNTL_H
52   # include <fcntl.h>
53   #endif
# Line 139 | Line 143 | typedef int64 intptr;
143   **/
144  
145   #if defined(__GNUC__)
146 < #if defined(__x86_64__)
146 > #if defined(__x86_64__) || defined(__i386__)
147   // Linux/AMD64 currently has no asm optimized bswap_32() in <byteswap.h>
148   #define opt_bswap_32 do_opt_bswap_32
149   static inline uint32 do_opt_bswap_32(uint32 x)
# Line 184 | Line 188 | static inline uint32 generic_bswap_32(ui
188                    ((x & 0x000000ff) << 24) );
189   }
190  
191 + #if defined(__i386__)
192 + #define opt_bswap_64 do_opt_bswap_64
193 + static inline uint64 do_opt_bswap_64(uint64 x)
194 + {
195 +  return (bswap_32(x >> 32) | (((uint64)bswap_32((uint32)x)) << 32));
196 + }
197 + #endif
198 +
199   #ifdef  opt_bswap_64
200   #undef  bswap_64
201   #define bswap_64 opt_bswap_64
# Line 217 | Line 229 | static inline uint64 tswap64(uint64 x) {
229   // spin locks
230   #ifdef __GNUC__
231  
232 < #ifdef __powerpc__
232 > #if defined(__powerpc__) || defined(__ppc__)
233   #define HAVE_TEST_AND_SET 1
234 < static inline int testandset(int *p)
234 > static inline int testandset(volatile int *p)
235   {
236          int ret;
237 <        __asm__ __volatile__("0:    lwarx %0,0,%1 ;"
238 <                                                 "      xor. %0,%3,%0;"
239 <                                                 "      bne 1f;"
240 <                                                 "      stwcx. %2,0,%1;"
241 <                                                 "      bne- 0b;"
237 >        __asm__ __volatile__("0:    lwarx       %0,0,%1\n"
238 >                                                 "      xor.    %0,%3,%0\n"
239 >                                                 "      bne             1f\n"
240 >                                                 "      stwcx.  %2,0,%1\n"
241 >                                                 "      bne-    0b\n"
242                                                   "1:    "
243                                                   : "=&r" (ret)
244                                                   : "r" (p), "r" (1), "r" (0)
# Line 237 | Line 249 | static inline int testandset(int *p)
249  
250   #ifdef __i386__
251   #define HAVE_TEST_AND_SET 1
252 < static inline int testandset(int *p)
252 > static inline int testandset(volatile int *p)
253   {
254 <        char ret;
254 >        int ret;
255          long int readval;
256 <        
257 <        __asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0"
258 <                                                 : "=q" (ret), "=m" (*p), "=a" (readval)
259 <                                                 : "r" (1), "m" (*p), "a" (0)
256 >        /* Note: the "xchg" instruction does not need a "lock" prefix */
257 >        __asm__ __volatile__("xchgl %0, %1"
258 >                                                 : "=r" (ret), "=m" (*p), "=a" (readval)
259 >                                                 : "0" (1), "m" (*p)
260                                                   : "memory");
261          return ret;
262   }
# Line 252 | Line 264 | static inline int testandset(int *p)
264  
265   #ifdef __s390__
266   #define HAVE_TEST_AND_SET 1
267 < static inline int testandset(int *p)
267 > static inline int testandset(volatile int *p)
268   {
269          int ret;
270  
# Line 267 | Line 279 | static inline int testandset(int *p)
279  
280   #ifdef __alpha__
281   #define HAVE_TEST_AND_SET 1
282 < static inline int testandset(int *p)
282 > static inline int testandset(volatile int *p)
283   {
284          int ret;
285          unsigned long one;
# Line 287 | Line 299 | static inline int testandset(int *p)
299  
300   #ifdef __sparc__
301   #define HAVE_TEST_AND_SET 1
302 < static inline int testandset(int *p)
302 > static inline int testandset(volatile int *p)
303   {
304          int ret;
305  
# Line 302 | Line 314 | static inline int testandset(int *p)
314  
315   #ifdef __arm__
316   #define HAVE_TEST_AND_SET 1
317 < static inline int testandset(int *p)
317 > static inline int testandset(volatile int *p)
318   {
319          register unsigned int ret;
320          __asm__ __volatile__("swp %0, %1, [%2]"
# Line 317 | Line 329 | static inline int testandset(int *p)
329  
330   #if HAVE_TEST_AND_SET
331   #define HAVE_SPINLOCKS 1
332 < typedef int spinlock_t;
332 > typedef volatile int spinlock_t;
333  
334   static const spinlock_t SPIN_LOCK_UNLOCKED = 0;
335  
# Line 348 | Line 360 | typedef struct timeval tm_time_t;
360   extern uint64 GetTicks_usec(void);
361   extern void Delay_usec(uint32 usec);
362  
363 + #if defined(HAVE_PTHREADS) || (defined(__linux__) && defined(__powerpc__))
364   // Setup pthread attributes
365   extern void Set_pthread_attr(pthread_attr_t *attr, int priority);
366 + #endif
367  
368   // Various definitions
369   typedef struct rgb_color {
# Line 359 | Line 373 | typedef struct rgb_color {
373          uint8           alpha;
374   } rgb_color;
375  
376 + // X11 display fast locks
377 + #ifdef HAVE_SPINLOCKS
378 + #define X11_LOCK_TYPE spinlock_t
379 + #define X11_LOCK_INIT SPIN_LOCK_UNLOCKED
380 + #define XDisplayLock() spin_lock(&x_display_lock)
381 + #define XDisplayUnlock() spin_unlock(&x_display_lock)
382 + #elif defined(HAVE_PTHREADS)
383 + #define X11_LOCK_TYPE pthread_mutex_t
384 + #define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
385 + #define XDisplayLock() pthread_mutex_lock(&x_display_lock);
386 + #define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock);
387 + #else
388 + #define XDisplayLock()
389 + #define XDisplayUnlock()
390 + #endif
391 + #ifdef X11_LOCK_TYPE
392 + extern X11_LOCK_TYPE x_display_lock;
393 + #endif
394 +
395   // Macro for calling MacOS routines
396   #define CallMacOS(type, tvect) call_macos((uint32)tvect)
397   #define CallMacOS1(type, tvect, arg1) call_macos1((uint32)tvect, (uint32)arg1)

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines