ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/SheepShaver/src/Unix/sysdeps.h
(Generate patch)

Comparing SheepShaver/src/Unix/sysdeps.h (file contents):
Revision 1.3 by gbeauche, 2003-01-04T12:23:39Z vs.
Revision 1.23 by gbeauche, 2004-01-26T13:52:31Z

# Line 1 | Line 1
1   /*
2   *  sysdeps.h - System dependent definitions for Linux
3   *
4 < *  SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig
4 > *  SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
# Line 59 | Line 59
59   # endif
60   #endif
61  
62 < // Are we using a PPC emulator or the real thing?
63 < #ifdef __powerpc__
64 < #define EMULATED_PPC 0
65 < #else
66 < #define EMULATED_PPC 1
67 < #endif
62 > // Define for external components
63 > #define SHEEPSHAVER 1
64 >
65 > // Mac and host address space are the same
66 > #define REAL_ADDRESSING 1
67  
68   #define POWERPC_ROM 1
69  
70 + #if EMULATED_PPC
71 + // Handle interrupts asynchronously?
72 + #define ASYNC_IRQ 0
73 + // Mac ROM is write protected when banked memory is used
74 + #if REAL_ADDRESSING || DIRECT_ADDRESSING
75 + # define ROM_IS_WRITE_PROTECTED 0
76 + # define USE_SCRATCHMEM_SUBTERFUGE 1
77 + #else
78 + # define ROM_IS_WRITE_PROTECTED 1
79 + #endif
80 + // Configure PowerPC emulator
81 + #define PPC_CHECK_INTERRUPTS (ASYNC_IRQ ? 0 : 1)
82 + #define PPC_DECODE_CACHE 1
83 + #define PPC_FLIGHT_RECORDER 1
84 + #define PPC_PROFILE_COMPILE_TIME 0
85 + #define PPC_PROFILE_GENERIC_CALLS 0
86 + #define KPX_MAX_CPUS 1
87 + #else
88 + // Mac ROM is write protected
89 + #define ROM_IS_WRITE_PROTECTED 1
90 + #define USE_SCRATCHMEM_SUBTERFUGE 0
91 + #endif
92 +
93   // Data types
94   typedef unsigned char uint8;
95   typedef signed char int8;
# Line 112 | Line 134 | typedef int64 intptr;
134   #error "Unsupported size of pointer"
135   #endif
136  
137 + /**
138 + *              Helper functions to byteswap data
139 + **/
140 +
141 + #if defined(__GNUC__)
142 + #if defined(__x86_64__) || defined(__i386__)
143 + // Linux/AMD64 currently has no asm optimized bswap_32() in <byteswap.h>
144 + #define opt_bswap_32 do_opt_bswap_32
145 + static inline uint32 do_opt_bswap_32(uint32 x)
146 + {
147 +  uint32 v;
148 +  __asm__ __volatile__ ("bswap %0" : "=r" (v) : "0" (x));
149 +  return v;
150 + }
151 + #endif
152 + #endif
153 +
154 + #ifdef HAVE_BYTESWAP_H
155 + #include <byteswap.h>
156 + #endif
157 +
158 + #ifdef  opt_bswap_16
159 + #undef  bswap_16
160 + #define bswap_16 opt_bswap_16
161 + #endif
162 + #ifndef bswap_16
163 + #define bswap_16 generic_bswap_16
164 + #endif
165 +
166 + static inline uint16 generic_bswap_16(uint16 x)
167 + {
168 +  return ((x & 0xff) << 8) | ((x >> 8) & 0xff);
169 + }
170 +
171 + #ifdef  opt_bswap_32
172 + #undef  bswap_32
173 + #define bswap_32 opt_bswap_32
174 + #endif
175 + #ifndef bswap_32
176 + #define bswap_32 generic_bswap_32
177 + #endif
178 +
179 + static inline uint32 generic_bswap_32(uint32 x)
180 + {
181 +  return (((x & 0xff000000) >> 24) |
182 +                  ((x & 0x00ff0000) >>  8) |
183 +                  ((x & 0x0000ff00) <<  8) |
184 +                  ((x & 0x000000ff) << 24) );
185 + }
186 +
187 + #if defined(__i386__)
188 + #define opt_bswap_64 do_opt_bswap_64
189 + static inline uint64 do_opt_bswap_64(uint64 x)
190 + {
191 +  return (bswap_32(x >> 32) | (((uint64)bswap_32((uint32)x)) << 32));
192 + }
193 + #endif
194 +
195 + #ifdef  opt_bswap_64
196 + #undef  bswap_64
197 + #define bswap_64 opt_bswap_64
198 + #endif
199 + #ifndef bswap_64
200 + #define bswap_64 generic_bswap_64
201 + #endif
202 +
203 + static inline uint64 generic_bswap_64(uint64 x)
204 + {
205 +  return (((x & UVAL64(0xff00000000000000)) >> 56) |
206 +                  ((x & UVAL64(0x00ff000000000000)) >> 40) |
207 +                  ((x & UVAL64(0x0000ff0000000000)) >> 24) |
208 +                  ((x & UVAL64(0x000000ff00000000)) >>  8) |
209 +                  ((x & UVAL64(0x00000000ff000000)) <<  8) |
210 +                  ((x & UVAL64(0x0000000000ff0000)) << 24) |
211 +                  ((x & UVAL64(0x000000000000ff00)) << 40) |
212 +                  ((x & UVAL64(0x00000000000000ff)) << 56) );
213 + }
214 +
215 + #ifdef WORDS_BIGENDIAN
216 + static inline uint16 tswap16(uint16 x) { return x; }
217 + static inline uint32 tswap32(uint32 x) { return x; }
218 + static inline uint64 tswap64(uint64 x) { return x; }
219 + #else
220 + static inline uint16 tswap16(uint16 x) { return bswap_16(x); }
221 + static inline uint32 tswap32(uint32 x) { return bswap_32(x); }
222 + static inline uint64 tswap64(uint64 x) { return bswap_64(x); }
223 + #endif
224 +
225 + // spin locks
226 + #ifdef __GNUC__
227 +
228 + #if defined(__powerpc__) || defined(__ppc__)
229 + #define HAVE_TEST_AND_SET 1
230 + static inline int testandset(volatile int *p)
231 + {
232 +        int ret;
233 +        __asm__ __volatile__("0:    lwarx       %0,0,%1\n"
234 +                                                 "      xor.    %0,%3,%0\n"
235 +                                                 "      bne             1f\n"
236 +                                                 "      stwcx.  %2,0,%1\n"
237 +                                                 "      bne-    0b\n"
238 +                                                 "1:    "
239 +                                                 : "=&r" (ret)
240 +                                                 : "r" (p), "r" (1), "r" (0)
241 +                                                 : "cr0", "memory");
242 +        return ret;
243 + }
244 + #endif
245 +
246 + #ifdef __i386__
247 + #define HAVE_TEST_AND_SET 1
248 + static inline int testandset(volatile int *p)
249 + {
250 +        int ret;
251 +        long int readval;
252 +        /* Note: the "xchg" instruction does not need a "lock" prefix */
253 +        __asm__ __volatile__("xchgl %0, %1"
254 +                                                 : "=r" (ret), "=m" (*p), "=a" (readval)
255 +                                                 : "0" (1), "m" (*p)
256 +                                                 : "memory");
257 +        return ret;
258 + }
259 + #endif
260 +
261 + #ifdef __s390__
262 + #define HAVE_TEST_AND_SET 1
263 + static inline int testandset(volatile int *p)
264 + {
265 +        int ret;
266 +
267 +        __asm__ __volatile__("0: cs    %0,%1,0(%2)\n"
268 +                                                 "   jl    0b"
269 +                                                 : "=&d" (ret)
270 +                                                 : "r" (1), "a" (p), "0" (*p)
271 +                                                 : "cc", "memory" );
272 +        return ret;
273 + }
274 + #endif
275 +
276 + #ifdef __alpha__
277 + #define HAVE_TEST_AND_SET 1
278 + static inline int testandset(volatile int *p)
279 + {
280 +        int ret;
281 +        unsigned long one;
282 +
283 +        __asm__ __volatile__("0:        mov 1,%2\n"
284 +                                                 "      ldl_l %0,%1\n"
285 +                                                 "      stl_c %2,%1\n"
286 +                                                 "      beq %2,1f\n"
287 +                                                 ".subsection 2\n"
288 +                                                 "1:    br 0b\n"
289 +                                                 ".previous"
290 +                                                 : "=r" (ret), "=m" (*p), "=r" (one)
291 +                                                 : "m" (*p));
292 +        return ret;
293 + }
294 + #endif
295 +
296 + #ifdef __sparc__
297 + #define HAVE_TEST_AND_SET 1
298 + static inline int testandset(volatile int *p)
299 + {
300 +        int ret;
301 +
302 +        __asm__ __volatile__("ldstub    [%1], %0"
303 +                                                 : "=r" (ret)
304 +                                                 : "r" (p)
305 +                                                 : "memory");
306 +
307 +        return (ret ? 1 : 0);
308 + }
309 + #endif
310 +
311 + #ifdef __arm__
312 + #define HAVE_TEST_AND_SET 1
313 + static inline int testandset(volatile int *p)
314 + {
315 +        register unsigned int ret;
316 +        __asm__ __volatile__("swp %0, %1, [%2]"
317 +                                                 : "=r"(ret)
318 +                                                 : "0"(1), "r"(p));
319 +        
320 +        return ret;
321 + }
322 + #endif
323 +
324 + #endif /* __GNUC__ */
325 +
326 + #if HAVE_TEST_AND_SET
327 + #define HAVE_SPINLOCKS 1
328 + typedef volatile int spinlock_t;
329 +
330 + static const spinlock_t SPIN_LOCK_UNLOCKED = 0;
331 +
332 + static inline void spin_lock(spinlock_t *lock)
333 + {
334 +        while (testandset(lock));
335 + }
336 +
337 + static inline void spin_unlock(spinlock_t *lock)
338 + {
339 +        *lock = 0;
340 + }
341 +
342 + static inline int spin_trylock(spinlock_t *lock)
343 + {
344 +        return !testandset(lock);
345 + }
346 + #endif
347 +
348   // Time data type for Time Manager emulation
349   #ifdef HAVE_CLOCK_GETTIME
350   typedef struct timespec tm_time_t;
# Line 119 | Line 352 | typedef struct timespec tm_time_t;
352   typedef struct timeval tm_time_t;
353   #endif
354  
355 + // Timing functions
356 + extern uint64 GetTicks_usec(void);
357 + extern void Delay_usec(uint32 usec);
358 +
359   // Setup pthread attributes
360   extern void Set_pthread_attr(pthread_attr_t *attr, int priority);
361  
# Line 130 | Line 367 | typedef struct rgb_color {
367          uint8           alpha;
368   } rgb_color;
369  
370 + // X11 display fast locks
371 + #ifdef HAVE_SPINLOCKS
372 + #define X11_LOCK_TYPE spinlock_t
373 + #define X11_LOCK_INIT SPIN_LOCK_UNLOCKED
374 + #define XDisplayLock() spin_lock(&x_display_lock)
375 + #define XDisplayUnlock() spin_unlock(&x_display_lock)
376 + #elif defined(HAVE_PTHREADS)
377 + #define X11_LOCK_TYPE pthread_mutex_t
378 + #define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
379 + #define XDisplayLock() pthread_mutex_lock(&x_display_lock);
380 + #define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock);
381 + #else
382 + #define XDisplayLock()
383 + #define XDisplayUnlock()
384 + #endif
385 + #ifdef X11_LOCK_TYPE
386 + extern X11_LOCK_TYPE x_display_lock;
387 + #endif
388 +
389   // Macro for calling MacOS routines
390   #define CallMacOS(type, tvect) call_macos((uint32)tvect)
391   #define CallMacOS1(type, tvect, arg1) call_macos1((uint32)tvect, (uint32)arg1)

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines