ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/SheepShaver/src/Unix/sysdeps.h
(Generate patch)

Comparing SheepShaver/src/Unix/sysdeps.h (file contents):
Revision 1.5 by gbeauche, 2003-09-07T14:19:25Z vs.
Revision 1.48 by gbeauche, 2005-07-03T22:02:01Z

# Line 1 | Line 1
1   /*
2   *  sysdeps.h - System dependent definitions for Linux
3   *
4 < *  SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig
4 > *  SheepShaver (C) 1997-2005 Christian Bauer and Marc Hellwig
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
# Line 41 | Line 41
41   #include <assert.h>
42   #include <stdio.h>
43   #include <stdlib.h>
44 + #include <stddef.h>
45   #include <string.h>
46   #include <signal.h>
47  
48 + #ifdef HAVE_PTHREADS
49 + # include <pthread.h>
50 + #endif
51 +
52   #ifdef HAVE_FCNTL_H
53   # include <fcntl.h>
54   #endif
# Line 59 | Line 64
64   # endif
65   #endif
66  
67 + // Fix offsetof() on FreeBSD and GCC >= 3.4
68 + #if defined(__FreeBSD__) && defined(__cplusplus)
69 + #undef offsetof
70 + /* The cast to "char &" below avoids problems with user-defined
71 +   "operator &", which can appear in a POD type.  */
72 + #define offsetof(TYPE, MEMBER)                          \
73 +  (__offsetof__ (reinterpret_cast <size_t>              \
74 +                 (&reinterpret_cast <char &>            \
75 +                  (static_cast<TYPE *> (0)->MEMBER))))
76 + #endif
77 +
78   // Define for external components
79   #define SHEEPSHAVER 1
80  
81 < // Mac and host address space are the same
81 > // Always use Real Addressing mode on native architectures
82 > // Otherwise, use Direct Addressing mode if NATMEM_OFFSET is set
83 > #if !defined(EMULATED_PPC)
84 > #define REAL_ADDRESSING 1
85 > #include "ppc_asm.tmpl"
86 > #elif defined(NATMEM_OFFSET)
87 > #define DIRECT_ADDRESSING 1
88 > #else
89   #define REAL_ADDRESSING 1
90 + #endif
91 +
92 + // Always use the complete non-stubs Ethernet driver
93 + #if DIRECT_ADDRESSING
94 + #define USE_ETHER_FULL_DRIVER 1
95 + #endif
96  
97   #define POWERPC_ROM 1
98  
# Line 75 | Line 104
104   #else
105   # define ROM_IS_WRITE_PROTECTED 1
106   #endif
107 + // Configure PowerPC emulator
108 + #define PPC_REENTRANT_JIT 1
109 + #define PPC_CHECK_INTERRUPTS 1
110 + #define PPC_DECODE_CACHE 1
111 + #define PPC_FLIGHT_RECORDER 1
112 + #define PPC_PROFILE_COMPILE_TIME 0
113 + #define PPC_PROFILE_GENERIC_CALLS 0
114 + #define KPX_MAX_CPUS 1
115 + #if ENABLE_DYNGEN
116 + // Don't bother with predecode cache when using JIT
117 + #define PPC_ENABLE_JIT 1
118 + #undef  PPC_DECODE_CACHE
119 + #endif
120 + #if defined(__i386__)
121 + #define DYNGEN_ASM_OPTS 1
122 + #endif
123   #else
124   // Mac ROM is write protected
125   #define ROM_IS_WRITE_PROTECTED 1
# Line 125 | Line 170 | typedef int64 intptr;
170   #error "Unsupported size of pointer"
171   #endif
172  
173 < // Helper functions to byteswap data
173 > /**
174 > *              Helper functions to byteswap data
175 > **/
176 >
177 > #if defined(__GNUC__)
178 > #if defined(__x86_64__) || defined(__i386__)
179 > // Linux/AMD64 currently has no asm optimized bswap_32() in <byteswap.h>
180 > #define opt_bswap_32 do_opt_bswap_32
181 > static inline uint32 do_opt_bswap_32(uint32 x)
182 > {
183 >  uint32 v;
184 >  __asm__ __volatile__ ("bswap %0" : "=r" (v) : "0" (x));
185 >  return v;
186 > }
187 > #endif
188 > #endif
189 >
190   #ifdef HAVE_BYTESWAP_H
191   #include <byteswap.h>
192   #endif
193  
194 + #ifdef  opt_bswap_16
195 + #undef  bswap_16
196 + #define bswap_16 opt_bswap_16
197 + #endif
198   #ifndef bswap_16
199   #define bswap_16 generic_bswap_16
200   #endif
# Line 139 | Line 204 | static inline uint16 generic_bswap_16(ui
204    return ((x & 0xff) << 8) | ((x >> 8) & 0xff);
205   }
206  
207 + #ifdef  opt_bswap_32
208 + #undef  bswap_32
209 + #define bswap_32 opt_bswap_32
210 + #endif
211   #ifndef bswap_32
212   #define bswap_32 generic_bswap_32
213   #endif
# Line 151 | Line 220 | static inline uint32 generic_bswap_32(ui
220                    ((x & 0x000000ff) << 24) );
221   }
222  
223 + #if defined(__i386__)
224 + #define opt_bswap_64 do_opt_bswap_64
225 + static inline uint64 do_opt_bswap_64(uint64 x)
226 + {
227 +  return (bswap_32(x >> 32) | (((uint64)bswap_32((uint32)x)) << 32));
228 + }
229 + #endif
230 +
231 + #ifdef  opt_bswap_64
232 + #undef  bswap_64
233 + #define bswap_64 opt_bswap_64
234 + #endif
235   #ifndef bswap_64
236   #define bswap_64 generic_bswap_64
237   #endif
# Line 177 | Line 258 | static inline uint32 tswap32(uint32 x) {
258   static inline uint64 tswap64(uint64 x) { return bswap_64(x); }
259   #endif
260  
261 + // spin locks
262 + #ifdef __GNUC__
263 +
264 + #if defined(__powerpc__) || defined(__ppc__)
265 + #define HAVE_TEST_AND_SET 1
266 + static inline int testandset(volatile int *p)
267 + {
268 +        int ret;
269 +        __asm__ __volatile__("0:    lwarx       %0,0,%1\n"
270 +                                                 "      xor.    %0,%3,%0\n"
271 +                                                 "      bne             1f\n"
272 +                                                 "      stwcx.  %2,0,%1\n"
273 +                                                 "      bne-    0b\n"
274 +                                                 "1:    "
275 +                                                 : "=&r" (ret)
276 +                                                 : "r" (p), "r" (1), "r" (0)
277 +                                                 : "cr0", "memory");
278 +        return ret;
279 + }
280 + #endif
281 +
282 + #if defined(__i386__) || defined(__x86_64__)
283 + #define HAVE_TEST_AND_SET 1
284 + static inline int testandset(volatile int *p)
285 + {
286 +        long int ret;
287 +        /* Note: the "xchg" instruction does not need a "lock" prefix */
288 +        __asm__ __volatile__("xchgl %k0, %1"
289 +                                                 : "=r" (ret), "=m" (*p)
290 +                                                 : "0" (1), "m" (*p)
291 +                                                 : "memory");
292 +        return ret;
293 + }
294 + #endif
295 +
296 + #ifdef __s390__
297 + #define HAVE_TEST_AND_SET 1
298 + static inline int testandset(volatile int *p)
299 + {
300 +        int ret;
301 +
302 +        __asm__ __volatile__("0: cs    %0,%1,0(%2)\n"
303 +                                                 "   jl    0b"
304 +                                                 : "=&d" (ret)
305 +                                                 : "r" (1), "a" (p), "0" (*p)
306 +                                                 : "cc", "memory" );
307 +        return ret;
308 + }
309 + #endif
310 +
311 + #ifdef __alpha__
312 + #define HAVE_TEST_AND_SET 1
313 + static inline int testandset(volatile int *p)
314 + {
315 +        int ret;
316 +        unsigned long one;
317 +
318 +        __asm__ __volatile__("0:        mov 1,%2\n"
319 +                                                 "      ldl_l %0,%1\n"
320 +                                                 "      stl_c %2,%1\n"
321 +                                                 "      beq %2,1f\n"
322 +                                                 ".subsection 2\n"
323 +                                                 "1:    br 0b\n"
324 +                                                 ".previous"
325 +                                                 : "=r" (ret), "=m" (*p), "=r" (one)
326 +                                                 : "m" (*p));
327 +        return ret;
328 + }
329 + #endif
330 +
331 + #ifdef __sparc__
332 + #define HAVE_TEST_AND_SET 1
333 + static inline int testandset(volatile int *p)
334 + {
335 +        int ret;
336 +
337 +        __asm__ __volatile__("ldstub    [%1], %0"
338 +                                                 : "=r" (ret)
339 +                                                 : "r" (p)
340 +                                                 : "memory");
341 +
342 +        return (ret ? 1 : 0);
343 + }
344 + #endif
345 +
346 + #ifdef __arm__
347 + #define HAVE_TEST_AND_SET 1
348 + static inline int testandset(volatile int *p)
349 + {
350 +        register unsigned int ret;
351 +        __asm__ __volatile__("swp %0, %1, [%2]"
352 +                                                 : "=r"(ret)
353 +                                                 : "0"(1), "r"(p));
354 +        
355 +        return ret;
356 + }
357 + #endif
358 +
359 + #endif /* __GNUC__ */
360 +
361 + typedef volatile int spinlock_t;
362 +
363 + static const spinlock_t SPIN_LOCK_UNLOCKED = 0;
364 +
365 + #if defined(HAVE_TEST_AND_SET) && defined(HAVE_PTHREADS)
366 + // There is nothing to lock if we are not in an multithreaded environment
367 + #define HAVE_SPINLOCKS 1
368 + static inline void spin_lock(spinlock_t *lock)
369 + {
370 +        while (testandset(lock));
371 + }
372 +
373 + static inline void spin_unlock(spinlock_t *lock)
374 + {
375 +        *lock = 0;
376 + }
377 +
378 + static inline int spin_trylock(spinlock_t *lock)
379 + {
380 +        return !testandset(lock);
381 + }
382 + #else
383 + static inline void spin_lock(spinlock_t *lock)
384 + {
385 + }
386 +
387 + static inline void spin_unlock(spinlock_t *lock)
388 + {
389 + }
390 +
391 + static inline int spin_trylock(spinlock_t *lock)
392 + {
393 +        return 1;
394 + }
395 + #endif
396 +
397   // Time data type for Time Manager emulation
398   #ifdef HAVE_CLOCK_GETTIME
399   typedef struct timespec tm_time_t;
# Line 184 | Line 401 | typedef struct timespec tm_time_t;
401   typedef struct timeval tm_time_t;
402   #endif
403  
404 + /* Define codes for all the float formats that we know of.
405 + * Though we only handle IEEE format.  */
406 + #define UNKNOWN_FLOAT_FORMAT 0
407 + #define IEEE_FLOAT_FORMAT 1
408 + #define VAX_FLOAT_FORMAT 2
409 + #define IBM_FLOAT_FORMAT 3
410 + #define C4X_FLOAT_FORMAT 4
411 +
412 + // High-precision timing
413 + #if defined(HAVE_PTHREADS) && defined(HAVE_CLOCK_NANOSLEEP)
414 + #define PRECISE_TIMING 1
415 + #define PRECISE_TIMING_POSIX 1
416 + #endif
417 +
418 + // Timing functions
419 + extern uint64 GetTicks_usec(void);
420 + extern void Delay_usec(uint32 usec);
421 +
422 + #ifdef HAVE_PTHREADS
423   // Setup pthread attributes
424   extern void Set_pthread_attr(pthread_attr_t *attr, int priority);
425 + #endif
426  
427   // Various definitions
428   typedef struct rgb_color {
# Line 195 | Line 432 | typedef struct rgb_color {
432          uint8           alpha;
433   } rgb_color;
434  
435 + // X11 display fast locks
436 + #if defined(HAVE_PTHREADS)
437 + #define X11_LOCK_TYPE pthread_mutex_t
438 + #define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
439 + #define XDisplayLock() pthread_mutex_lock(&x_display_lock);
440 + #define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock);
441 + #elif defined(HAVE_SPINLOCKS)
442 + #define X11_LOCK_TYPE spinlock_t
443 + #define X11_LOCK_INIT SPIN_LOCK_UNLOCKED
444 + #define XDisplayLock() spin_lock(&x_display_lock)
445 + #define XDisplayUnlock() spin_unlock(&x_display_lock)
446 + #else
447 + #define XDisplayLock()
448 + #define XDisplayUnlock()
449 + #endif
450 + #ifdef X11_LOCK_TYPE
451 + extern X11_LOCK_TYPE x_display_lock;
452 + #endif
453 +
454   // Macro for calling MacOS routines
455 < #define CallMacOS(type, tvect) call_macos((uint32)tvect)
456 < #define CallMacOS1(type, tvect, arg1) call_macos1((uint32)tvect, (uint32)arg1)
457 < #define CallMacOS2(type, tvect, arg1, arg2) call_macos2((uint32)tvect, (uint32)arg1, (uint32)arg2)
458 < #define CallMacOS3(type, tvect, arg1, arg2, arg3) call_macos3((uint32)tvect, (uint32)arg1, (uint32)arg2, (uint32)arg3)
459 < #define CallMacOS4(type, tvect, arg1, arg2, arg3, arg4) call_macos4((uint32)tvect, (uint32)arg1, (uint32)arg2, (uint32)arg3, (uint32)arg4)
460 < #define CallMacOS5(type, tvect, arg1, arg2, arg3, arg4, arg5) call_macos5((uint32)tvect, (uint32)arg1, (uint32)arg2, (uint32)arg3, (uint32)arg4, (uint32)arg5)
461 < #define CallMacOS6(type, tvect, arg1, arg2, arg3, arg4, arg5, arg6) call_macos6((uint32)tvect, (uint32)arg1, (uint32)arg2, (uint32)arg3, (uint32)arg4, (uint32)arg5, (uint32)arg6)
462 < #define CallMacOS7(type, tvect, arg1, arg2, arg3, arg4, arg5, arg6, arg7) call_macos7((uint32)tvect, (uint32)arg1, (uint32)arg2, (uint32)arg3, (uint32)arg4, (uint32)arg5, (uint32)arg6, (uint32)arg7)
455 > #define CallMacOS(type, tvect) call_macos((uintptr)tvect)
456 > #define CallMacOS1(type, tvect, arg1) call_macos1((uintptr)tvect, (uintptr)arg1)
457 > #define CallMacOS2(type, tvect, arg1, arg2) call_macos2((uintptr)tvect, (uintptr)arg1, (uintptr)arg2)
458 > #define CallMacOS3(type, tvect, arg1, arg2, arg3) call_macos3((uintptr)tvect, (uintptr)arg1, (uintptr)arg2, (uintptr)arg3)
459 > #define CallMacOS4(type, tvect, arg1, arg2, arg3, arg4) call_macos4((uintptr)tvect, (uintptr)arg1, (uintptr)arg2, (uintptr)arg3, (uintptr)arg4)
460 > #define CallMacOS5(type, tvect, arg1, arg2, arg3, arg4, arg5) call_macos5((uintptr)tvect, (uintptr)arg1, (uintptr)arg2, (uintptr)arg3, (uintptr)arg4, (uintptr)arg5)
461 > #define CallMacOS6(type, tvect, arg1, arg2, arg3, arg4, arg5, arg6) call_macos6((uintptr)tvect, (uintptr)arg1, (uintptr)arg2, (uintptr)arg3, (uintptr)arg4, (uintptr)arg5, (uintptr)arg6)
462 > #define CallMacOS7(type, tvect, arg1, arg2, arg3, arg4, arg5, arg6, arg7) call_macos7((uintptr)tvect, (uintptr)arg1, (uintptr)arg2, (uintptr)arg3, (uintptr)arg4, (uintptr)arg5, (uintptr)arg6, (uintptr)arg7)
463  
464   #ifdef __cplusplus
465   extern "C" {

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines