ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/SheepShaver/src/Unix/sysdeps.h
(Generate patch)

Comparing SheepShaver/src/Unix/sysdeps.h (file contents):
Revision 1.4 by gbeauche, 2003-05-22T22:12:05Z vs.
Revision 1.31 by gbeauche, 2004-05-20T17:46:49Z

# Line 1 | Line 1
1   /*
2   *  sysdeps.h - System dependent definitions for Linux
3   *
4 < *  SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig
4 > *  SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
# Line 44 | Line 44
44   #include <string.h>
45   #include <signal.h>
46  
47 + #ifdef HAVE_PTHREADS
48 + # include <pthread.h>
49 + #endif
50 +
51   #ifdef HAVE_FCNTL_H
52   # include <fcntl.h>
53   #endif
# Line 59 | Line 63
63   # endif
64   #endif
65  
66 + // Define for external components
67 + #define SHEEPSHAVER 1
68 +
69   // Mac and host address space are the same
70   #define REAL_ADDRESSING 1
71  
72 < // Are we using a PPC emulator or the real thing?
73 < #ifdef __powerpc__
74 < #define EMULATED_PPC 0
72 > #define POWERPC_ROM 1
73 >
74 > #if EMULATED_PPC
75 > // Mac ROM is write protected when banked memory is used
76 > #if REAL_ADDRESSING || DIRECT_ADDRESSING
77 > # define ROM_IS_WRITE_PROTECTED 0
78 > # define USE_SCRATCHMEM_SUBTERFUGE 1
79   #else
80 < #define EMULATED_PPC 1
80 > # define ROM_IS_WRITE_PROTECTED 1
81 > #endif
82 > // Configure PowerPC emulator
83 > #define PPC_REENTRANT_JIT 1
84 > #define PPC_CHECK_INTERRUPTS 1
85 > #define PPC_DECODE_CACHE 1
86 > #define PPC_FLIGHT_RECORDER 1
87 > #define PPC_PROFILE_COMPILE_TIME 0
88 > #define PPC_PROFILE_GENERIC_CALLS 0
89 > #define KPX_MAX_CPUS 1
90 > #if defined(__i386__)
91 > #define DYNGEN_ASM_OPTS 1
92 > #endif
93 > #else
94 > // Mac ROM is write protected
95 > #define ROM_IS_WRITE_PROTECTED 1
96 > #define USE_SCRATCHMEM_SUBTERFUGE 0
97   #endif
71
72 #define POWERPC_ROM 1
98  
99   // Data types
100   typedef unsigned char uint8;
# Line 115 | Line 140 | typedef int64 intptr;
140   #error "Unsupported size of pointer"
141   #endif
142  
143 + /**
144 + *              Helper functions to byteswap data
145 + **/
146 +
147 + #if defined(__GNUC__)
148 + #if defined(__x86_64__) || defined(__i386__)
149 + // Linux/AMD64 currently has no asm optimized bswap_32() in <byteswap.h>
150 + #define opt_bswap_32 do_opt_bswap_32
151 + static inline uint32 do_opt_bswap_32(uint32 x)
152 + {
153 +  uint32 v;
154 +  __asm__ __volatile__ ("bswap %0" : "=r" (v) : "0" (x));
155 +  return v;
156 + }
157 + #endif
158 + #endif
159 +
160 + #ifdef HAVE_BYTESWAP_H
161 + #include <byteswap.h>
162 + #endif
163 +
164 + #ifdef  opt_bswap_16
165 + #undef  bswap_16
166 + #define bswap_16 opt_bswap_16
167 + #endif
168 + #ifndef bswap_16
169 + #define bswap_16 generic_bswap_16
170 + #endif
171 +
172 + static inline uint16 generic_bswap_16(uint16 x)
173 + {
174 +  return ((x & 0xff) << 8) | ((x >> 8) & 0xff);
175 + }
176 +
177 + #ifdef  opt_bswap_32
178 + #undef  bswap_32
179 + #define bswap_32 opt_bswap_32
180 + #endif
181 + #ifndef bswap_32
182 + #define bswap_32 generic_bswap_32
183 + #endif
184 +
185 + static inline uint32 generic_bswap_32(uint32 x)
186 + {
187 +  return (((x & 0xff000000) >> 24) |
188 +                  ((x & 0x00ff0000) >>  8) |
189 +                  ((x & 0x0000ff00) <<  8) |
190 +                  ((x & 0x000000ff) << 24) );
191 + }
192 +
193 + #if defined(__i386__)
194 + #define opt_bswap_64 do_opt_bswap_64
195 + static inline uint64 do_opt_bswap_64(uint64 x)
196 + {
197 +  return (bswap_32(x >> 32) | (((uint64)bswap_32((uint32)x)) << 32));
198 + }
199 + #endif
200 +
201 + #ifdef  opt_bswap_64
202 + #undef  bswap_64
203 + #define bswap_64 opt_bswap_64
204 + #endif
205 + #ifndef bswap_64
206 + #define bswap_64 generic_bswap_64
207 + #endif
208 +
209 + static inline uint64 generic_bswap_64(uint64 x)
210 + {
211 +  return (((x & UVAL64(0xff00000000000000)) >> 56) |
212 +                  ((x & UVAL64(0x00ff000000000000)) >> 40) |
213 +                  ((x & UVAL64(0x0000ff0000000000)) >> 24) |
214 +                  ((x & UVAL64(0x000000ff00000000)) >>  8) |
215 +                  ((x & UVAL64(0x00000000ff000000)) <<  8) |
216 +                  ((x & UVAL64(0x0000000000ff0000)) << 24) |
217 +                  ((x & UVAL64(0x000000000000ff00)) << 40) |
218 +                  ((x & UVAL64(0x00000000000000ff)) << 56) );
219 + }
220 +
221 + #ifdef WORDS_BIGENDIAN
222 + static inline uint16 tswap16(uint16 x) { return x; }
223 + static inline uint32 tswap32(uint32 x) { return x; }
224 + static inline uint64 tswap64(uint64 x) { return x; }
225 + #else
226 + static inline uint16 tswap16(uint16 x) { return bswap_16(x); }
227 + static inline uint32 tswap32(uint32 x) { return bswap_32(x); }
228 + static inline uint64 tswap64(uint64 x) { return bswap_64(x); }
229 + #endif
230 +
231 + // spin locks
232 + #ifdef __GNUC__
233 +
234 + #if defined(__powerpc__) || defined(__ppc__)
235 + #define HAVE_TEST_AND_SET 1
236 + static inline int testandset(volatile int *p)
237 + {
238 +        int ret;
239 +        __asm__ __volatile__("0:    lwarx       %0,0,%1\n"
240 +                                                 "      xor.    %0,%3,%0\n"
241 +                                                 "      bne             1f\n"
242 +                                                 "      stwcx.  %2,0,%1\n"
243 +                                                 "      bne-    0b\n"
244 +                                                 "1:    "
245 +                                                 : "=&r" (ret)
246 +                                                 : "r" (p), "r" (1), "r" (0)
247 +                                                 : "cr0", "memory");
248 +        return ret;
249 + }
250 + #endif
251 +
252 + #if defined(__i386__) || defined(__x86_64__)
253 + #define HAVE_TEST_AND_SET 1
254 + static inline int testandset(volatile int *p)
255 + {
256 +        long int ret;
257 +        /* Note: the "xchg" instruction does not need a "lock" prefix */
258 +        __asm__ __volatile__("xchgl %k0, %1"
259 +                                                 : "=r" (ret), "=m" (*p)
260 +                                                 : "0" (1), "m" (*p)
261 +                                                 : "memory");
262 +        return ret;
263 + }
264 + #endif
265 +
266 + #ifdef __s390__
267 + #define HAVE_TEST_AND_SET 1
268 + static inline int testandset(volatile int *p)
269 + {
270 +        int ret;
271 +
272 +        __asm__ __volatile__("0: cs    %0,%1,0(%2)\n"
273 +                                                 "   jl    0b"
274 +                                                 : "=&d" (ret)
275 +                                                 : "r" (1), "a" (p), "0" (*p)
276 +                                                 : "cc", "memory" );
277 +        return ret;
278 + }
279 + #endif
280 +
281 + #ifdef __alpha__
282 + #define HAVE_TEST_AND_SET 1
283 + static inline int testandset(volatile int *p)
284 + {
285 +        int ret;
286 +        unsigned long one;
287 +
288 +        __asm__ __volatile__("0:        mov 1,%2\n"
289 +                                                 "      ldl_l %0,%1\n"
290 +                                                 "      stl_c %2,%1\n"
291 +                                                 "      beq %2,1f\n"
292 +                                                 ".subsection 2\n"
293 +                                                 "1:    br 0b\n"
294 +                                                 ".previous"
295 +                                                 : "=r" (ret), "=m" (*p), "=r" (one)
296 +                                                 : "m" (*p));
297 +        return ret;
298 + }
299 + #endif
300 +
301 + #ifdef __sparc__
302 + #define HAVE_TEST_AND_SET 1
303 + static inline int testandset(volatile int *p)
304 + {
305 +        int ret;
306 +
307 +        __asm__ __volatile__("ldstub    [%1], %0"
308 +                                                 : "=r" (ret)
309 +                                                 : "r" (p)
310 +                                                 : "memory");
311 +
312 +        return (ret ? 1 : 0);
313 + }
314 + #endif
315 +
316 + #ifdef __arm__
317 + #define HAVE_TEST_AND_SET 1
318 + static inline int testandset(volatile int *p)
319 + {
320 +        register unsigned int ret;
321 +        __asm__ __volatile__("swp %0, %1, [%2]"
322 +                                                 : "=r"(ret)
323 +                                                 : "0"(1), "r"(p));
324 +        
325 +        return ret;
326 + }
327 + #endif
328 +
329 + #endif /* __GNUC__ */
330 +
331 + #if HAVE_TEST_AND_SET
332 + #define HAVE_SPINLOCKS 1
333 + typedef volatile int spinlock_t;
334 +
335 + static const spinlock_t SPIN_LOCK_UNLOCKED = 0;
336 +
337 + static inline void spin_lock(spinlock_t *lock)
338 + {
339 +        while (testandset(lock));
340 + }
341 +
342 + static inline void spin_unlock(spinlock_t *lock)
343 + {
344 +        *lock = 0;
345 + }
346 +
347 + static inline int spin_trylock(spinlock_t *lock)
348 + {
349 +        return !testandset(lock);
350 + }
351 + #endif
352 +
353   // Time data type for Time Manager emulation
354   #ifdef HAVE_CLOCK_GETTIME
355   typedef struct timespec tm_time_t;
# Line 122 | Line 357 | typedef struct timespec tm_time_t;
357   typedef struct timeval tm_time_t;
358   #endif
359  
360 + // Timing functions
361 + extern uint64 GetTicks_usec(void);
362 + extern void Delay_usec(uint32 usec);
363 +
364 + #if defined(HAVE_PTHREADS) || (defined(__linux__) && defined(__powerpc__))
365   // Setup pthread attributes
366   extern void Set_pthread_attr(pthread_attr_t *attr, int priority);
367 + #endif
368  
369   // Various definitions
370   typedef struct rgb_color {
# Line 133 | Line 374 | typedef struct rgb_color {
374          uint8           alpha;
375   } rgb_color;
376  
377 + // X11 display fast locks
378 + #ifdef HAVE_SPINLOCKS
379 + #define X11_LOCK_TYPE spinlock_t
380 + #define X11_LOCK_INIT SPIN_LOCK_UNLOCKED
381 + #define XDisplayLock() spin_lock(&x_display_lock)
382 + #define XDisplayUnlock() spin_unlock(&x_display_lock)
383 + #elif defined(HAVE_PTHREADS)
384 + #define X11_LOCK_TYPE pthread_mutex_t
385 + #define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
386 + #define XDisplayLock() pthread_mutex_lock(&x_display_lock);
387 + #define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock);
388 + #else
389 + #define XDisplayLock()
390 + #define XDisplayUnlock()
391 + #endif
392 + #ifdef X11_LOCK_TYPE
393 + extern X11_LOCK_TYPE x_display_lock;
394 + #endif
395 +
396   // Macro for calling MacOS routines
397   #define CallMacOS(type, tvect) call_macos((uint32)tvect)
398   #define CallMacOS1(type, tvect, arg1) call_macos1((uint32)tvect, (uint32)arg1)

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines