ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/sysdeps.h
(Generate patch)

Comparing BasiliskII/src/Unix/sysdeps.h (file contents):
Revision 1.12 by cebix, 2000-07-22T18:12:34Z vs.
Revision 1.32 by gbeauche, 2005-01-30T21:42:14Z

# Line 1 | Line 1
1   /*
2   *  sysdeps.h - System dependent definitions for Unix
3   *
4 < *  Basilisk II (C) 1997-2000 Christian Bauer
4 > *  Basilisk II (C) 1997-2005 Christian Bauer
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
# Line 43 | Line 43
43   #include <stdlib.h>
44   #include <string.h>
45  
46 + #ifdef HAVE_PTHREADS
47 + # include <pthread.h>
48 + #endif
49 +
50   #ifdef HAVE_FCNTL_H
51   # include <fcntl.h>
52   #endif
# Line 69 | Line 73
73  
74   /* Mac ROM is not write protected */
75   #define ROM_IS_WRITE_PROTECTED 0
76 + #define USE_SCRATCHMEM_SUBTERFUGE 1
77  
78   #else
79  
80   /* Mac and host address space are distinct */
81 + #ifndef REAL_ADDRESSING
82   #define REAL_ADDRESSING 0
83 + #endif
84  
85   /* Using 68k emulator */
86   #define EMULATED_68K 1
87  
88 < /* Mac ROM is write protected */
89 < #define ROM_IS_WRITE_PROTECTED 1
88 > /* The m68k emulator uses a prefetch buffer ? */
89 > #define USE_PREFETCH_BUFFER 0
90  
91 + /* Mac ROM is write protected when banked memory is used */
92 + #if REAL_ADDRESSING || DIRECT_ADDRESSING
93 + # define ROM_IS_WRITE_PROTECTED 0
94 + # define USE_SCRATCHMEM_SUBTERFUGE 1
95 + #else
96 + # define ROM_IS_WRITE_PROTECTED 1
97 + #endif
98 +
99 + #endif
100 +
101 + /* Direct Addressing requires Video on SEGV signals in plain X11 mode */
102 + #if DIRECT_ADDRESSING && (!ENABLE_VOSF && !USE_SDL_VIDEO)
103 + # undef  ENABLE_VOSF
104 + # define ENABLE_VOSF 1
105   #endif
106  
107   /* ExtFS is supported */
108   #define SUPPORTS_EXTFS 1
109  
110 + /* BSD socket API supported */
111 + #define SUPPORTS_UDP_TUNNEL 1
112 +
113  
114   /* Data types */
115   typedef unsigned char uint8;
# Line 121 | Line 145 | typedef long long int64;
145   #else
146   #error "No 8 byte type, you lose."
147   #endif
148 + #if SIZEOF_VOID_P == 4
149 + typedef uint32 uintptr;
150 + typedef int32 intptr;
151 + #elif SIZEOF_VOID_P == 8
152 + typedef uint64 uintptr;
153 + typedef int64 intptr;
154 + #else
155 + #error "Unsupported size of pointer"
156 + #endif
157 +
158 + #ifndef HAVE_LOFF_T
159 + typedef off_t loff_t;
160 + #endif
161 + #ifndef HAVE_CADDR_T
162 + typedef char * caddr_t;
163 + #endif
164  
165   /* Time data type for Time Manager emulation */
166   #ifdef HAVE_CLOCK_GETTIME
# Line 129 | Line 169 | typedef struct timespec tm_time_t;
169   typedef struct timeval tm_time_t;
170   #endif
171  
172 < /* Offset Mac->Unix time in seconds */
173 < #define TIME_OFFSET 0x7c25b080
172 > /* Define codes for all the float formats that we know of.
173 > * Though we only handle IEEE format.  */
174 > #define UNKNOWN_FLOAT_FORMAT 0
175 > #define IEEE_FLOAT_FORMAT 1
176 > #define VAX_FLOAT_FORMAT 2
177 > #define IBM_FLOAT_FORMAT 3
178 > #define C4X_FLOAT_FORMAT 4
179  
180   /* UAE CPU data types */
181   #define uae_s8 int8
# Line 144 | Line 189 | typedef struct timeval tm_time_t;
189   typedef uae_u32 uaecptr;
190  
191   /* Alignment restrictions */
192 < #if defined(__i386__) || defined(__powerpc__) || defined(__m68k__)
192 > #if defined(__i386__) || defined(__powerpc__) || defined(__m68k__) || defined(__x86_64__)
193   # define CPU_CAN_ACCESS_UNALIGNED
194   #endif
195  
# Line 152 | Line 197 | typedef uae_u32 uaecptr;
197   extern uint64 GetTicks_usec(void);
198   extern void Delay_usec(uint32 usec);
199  
200 + /* Spinlocks */
201 + #ifdef __GNUC__
202 +
203 + #if defined(__powerpc__) || defined(__ppc__)
204 + #define HAVE_TEST_AND_SET 1
205 + static inline int testandset(volatile int *p)
206 + {
207 +        int ret;
208 +        __asm__ __volatile__("0:    lwarx       %0,0,%1\n"
209 +                                                 "      xor.    %0,%3,%0\n"
210 +                                                 "      bne             1f\n"
211 +                                                 "      stwcx.  %2,0,%1\n"
212 +                                                 "      bne-    0b\n"
213 +                                                 "1:    "
214 +                                                 : "=&r" (ret)
215 +                                                 : "r" (p), "r" (1), "r" (0)
216 +                                                 : "cr0", "memory");
217 +        return ret;
218 + }
219 + #endif
220 +
221 + /* FIXME: SheepShaver occasionnally hangs with those locks */
222 + #if 0 && (defined(__i386__) || defined(__x86_64__))
223 + #define HAVE_TEST_AND_SET 1
224 + static inline int testandset(volatile int *p)
225 + {
226 +        long int ret;
227 +        /* Note: the "xchg" instruction does not need a "lock" prefix */
228 +        __asm__ __volatile__("xchgl %k0, %1"
229 +                                                 : "=r" (ret), "=m" (*p)
230 +                                                 : "0" (1), "m" (*p)
231 +                                                 : "memory");
232 +        return ret;
233 + }
234 + #endif
235 +
236 + #ifdef __s390__
237 + #define HAVE_TEST_AND_SET 1
238 + static inline int testandset(volatile int *p)
239 + {
240 +        int ret;
241 +
242 +        __asm__ __volatile__("0: cs    %0,%1,0(%2)\n"
243 +                                                 "   jl    0b"
244 +                                                 : "=&d" (ret)
245 +                                                 : "r" (1), "a" (p), "0" (*p)
246 +                                                 : "cc", "memory" );
247 +        return ret;
248 + }
249 + #endif
250 +
251 + #ifdef __alpha__
252 + #define HAVE_TEST_AND_SET 1
253 + static inline int testandset(volatile int *p)
254 + {
255 +        int ret;
256 +        unsigned long one;
257 +
258 +        __asm__ __volatile__("0:        mov 1,%2\n"
259 +                                                 "      ldl_l %0,%1\n"
260 +                                                 "      stl_c %2,%1\n"
261 +                                                 "      beq %2,1f\n"
262 +                                                 ".subsection 2\n"
263 +                                                 "1:    br 0b\n"
264 +                                                 ".previous"
265 +                                                 : "=r" (ret), "=m" (*p), "=r" (one)
266 +                                                 : "m" (*p));
267 +        return ret;
268 + }
269 + #endif
270 +
271 + #ifdef __sparc__
272 + #define HAVE_TEST_AND_SET 1
273 + static inline int testandset(volatile int *p)
274 + {
275 +        int ret;
276 +
277 +        __asm__ __volatile__("ldstub    [%1], %0"
278 +                                                 : "=r" (ret)
279 +                                                 : "r" (p)
280 +                                                 : "memory");
281 +
282 +        return (ret ? 1 : 0);
283 + }
284 + #endif
285 +
286 + #ifdef __arm__
287 + #define HAVE_TEST_AND_SET 1
288 + static inline int testandset(volatile int *p)
289 + {
290 +        register unsigned int ret;
291 +        __asm__ __volatile__("swp %0, %1, [%2]"
292 +                                                 : "=r"(ret)
293 +                                                 : "0"(1), "r"(p));
294 +        
295 +        return ret;
296 + }
297 + #endif
298 +
299 + #endif /* __GNUC__ */
300 +
301 + typedef volatile int spinlock_t;
302 +
303 + static const spinlock_t SPIN_LOCK_UNLOCKED = 0;
304 +
305 + #if HAVE_TEST_AND_SET
306 + #define HAVE_SPINLOCKS 1
307 + static inline void spin_lock(spinlock_t *lock)
308 + {
309 +        while (testandset(lock));
310 + }
311 +
312 + static inline void spin_unlock(spinlock_t *lock)
313 + {
314 +        *lock = 0;
315 + }
316 +
317 + static inline int spin_trylock(spinlock_t *lock)
318 + {
319 +        return !testandset(lock);
320 + }
321 + #else
322 + static inline void spin_lock(spinlock_t *lock)
323 + {
324 + }
325 +
326 + static inline void spin_unlock(spinlock_t *lock)
327 + {
328 + }
329 +
330 + static inline int spin_trylock(spinlock_t *lock)
331 + {
332 +        return 1;
333 + }
334 + #endif
335 +
336 + /* X11 display fast locks */
337 + #ifdef HAVE_SPINLOCKS
338 + #define X11_LOCK_TYPE spinlock_t
339 + #define X11_LOCK_INIT SPIN_LOCK_UNLOCKED
340 + #define XDisplayLock() spin_lock(&x_display_lock)
341 + #define XDisplayUnlock() spin_unlock(&x_display_lock)
342 + #elif defined(HAVE_PTHREADS)
343 + #define X11_LOCK_TYPE pthread_mutex_t
344 + #define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
345 + #define XDisplayLock() pthread_mutex_lock(&x_display_lock);
346 + #define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock);
347 + #else
348 + #define XDisplayLock()
349 + #define XDisplayUnlock()
350 + #endif
351 + #ifdef X11_LOCK_TYPE
352 + extern X11_LOCK_TYPE x_display_lock;
353 + #endif
354 +
355 + #ifdef HAVE_PTHREADS
356 + /* Centralized pthread attribute setup */
357 + void Set_pthread_attr(pthread_attr_t *attr, int priority);
358 + #endif
359 +
360   /* UAE CPU defines */
361   #ifdef WORDS_BIGENDIAN
362  
# Line 192 | Line 397 | static inline void do_put_mem_word(uae_u
397  
398   #else /* WORDS_BIGENDIAN */
399  
400 < #ifdef __i386__
400 > #if defined(__i386__) || defined(__x86_64__)
401  
402   /* Intel x86 */
403   #define X86_PPRO_OPT
# Line 210 | Line 415 | static inline void do_put_mem_word(uae_u
415   #else
416   static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {__asm__ ("rolw $8,%0" : "=r" (v) : "0" (v) : "cc"); *a = v;}
417   #endif
418 + #define HAVE_OPTIMIZED_BYTESWAP_32
419 + /* bswap doesn't affect condition codes */
420 + static inline uae_u32 do_byteswap_32(uae_u32 v) {__asm__ ("bswap %0" : "=r" (v) : "0" (v)); return v;}
421 + #define HAVE_OPTIMIZED_BYTESWAP_16
422 + #ifdef X86_PPRO_OPT
423 + static inline uae_u32 do_byteswap_16(uae_u32 v) {__asm__ ("bswapl %0" : "=&r" (v) : "0" (v << 16) : "cc"); return v;}
424 + #else
425 + static inline uae_u32 do_byteswap_16(uae_u32 v) {__asm__ ("rolw $8,%0" : "=r" (v) : "0" (v) : "cc"); return v;}
426 + #endif
427  
428   #elif defined(CPU_CAN_ACCESS_UNALIGNED)
429  
# Line 231 | Line 445 | static inline void do_put_mem_word(uae_u
445  
446   #endif /* WORDS_BIGENDIAN */
447  
448 + #ifndef HAVE_OPTIMIZED_BYTESWAP_32
449 + static inline uae_u32 do_byteswap_32(uae_u32 v)
450 +        { return (((v >> 24) & 0xff) | ((v >> 8) & 0xff00) | ((v & 0xff) << 24) | ((v & 0xff00) << 8)); }
451 + #endif
452 +
453 + #ifndef HAVE_OPTIMIZED_BYTESWAP_16
454 + static inline uae_u32 do_byteswap_16(uae_u32 v)
455 +        { return (((v >> 8) & 0xff) | ((v & 0xff) << 8)); }
456 + #endif
457 +
458   #define do_get_mem_byte(a) ((uae_u32)*((uae_u8 *)(a)))
459   #define do_put_mem_byte(a, v) (*(uae_u8 *)(a) = (v))
460  
# Line 244 | Line 468 | static inline void do_put_mem_word(uae_u
468   #define ENUMNAME(name) name
469   #define write_log printf
470  
471 < #ifdef USE_COMPILER
248 < #define USE_MAPPED_MEMORY
249 < #define CAN_MAP_MEMORY
250 < #define NO_EXCEPTION_3
251 < #define NO_PREFETCH_BUFFER
252 < #else
253 < #undef USE_MAPPED_MEMORY
254 < #undef CAN_MAP_MEMORY
255 < #endif
256 <
257 < #ifdef X86_ASSEMBLY
471 > #if defined(X86_ASSEMBLY) || defined(X86_64_ASSEMBLY)
472   #define ASM_SYM_FOR_FUNC(a) __asm__(a)
473   #else
474   #define ASM_SYM_FOR_FUNC(a)

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines