ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/sysdeps.h
(Generate patch)

Comparing BasiliskII/src/Unix/sysdeps.h (file contents):
Revision 1.7 by cebix, 1999-10-27T16:59:48Z vs.
Revision 1.35 by asvitkine, 2009-08-17T20:42:26Z

# Line 1 | Line 1
1   /*
2   *  sysdeps.h - System dependent definitions for Unix
3   *
4 < *  Basilisk II (C) 1997-1999 Christian Bauer
4 > *  Basilisk II (C) 1997-2008 Christian Bauer
5   *
6   *  This program is free software; you can redistribute it and/or modify
7   *  it under the terms of the GNU General Public License as published by
# Line 42 | Line 42
42   #include <stdio.h>
43   #include <stdlib.h>
44   #include <string.h>
45 < #include <pthread.h>
45 >
46 > #ifdef HAVE_PTHREADS
47 > # include <pthread.h>
48 > #endif
49  
50   #ifdef HAVE_FCNTL_H
51   # include <fcntl.h>
# Line 59 | Line 62
62   # endif
63   #endif
64  
65 + #if defined(__MACH__)
66 + #include <mach/clock.h>
67 + #endif
68 +
69 + #ifdef ENABLE_NATIVE_M68K
70 +
71 + /* Mac and host address space are the same */
72 + #define REAL_ADDRESSING 1
73  
74 < /* Are the Mac and the host address space the same? */
74 > /* Using 68k natively */
75 > #define EMULATED_68K 0
76 >
77 > /* Mac ROM is not write protected */
78 > #define ROM_IS_WRITE_PROTECTED 0
79 > #define USE_SCRATCHMEM_SUBTERFUGE 1
80 >
81 > #else
82 >
83 > /* Mac and host address space are distinct */
84 > #ifndef REAL_ADDRESSING
85   #define REAL_ADDRESSING 0
86 + #endif
87  
88 < /* Are we using a 68k emulator or the real thing? */
88 > /* Using 68k emulator */
89   #define EMULATED_68K 1
90  
91 < /* Is the Mac ROM write protected? */
92 < #define ROM_IS_WRITE_PROTECTED 1
91 > /* The m68k emulator uses a prefetch buffer ? */
92 > #define USE_PREFETCH_BUFFER 0
93 >
94 > /* Mac ROM is write protected when banked memory is used */
95 > #if REAL_ADDRESSING || DIRECT_ADDRESSING
96 > # define ROM_IS_WRITE_PROTECTED 0
97 > # define USE_SCRATCHMEM_SUBTERFUGE 1
98 > #else
99 > # define ROM_IS_WRITE_PROTECTED 1
100 > #endif
101 >
102 > #endif
103 >
104 > /* Direct Addressing requires Video on SEGV signals in plain X11 mode */
105 > #if DIRECT_ADDRESSING && (!ENABLE_VOSF && !USE_SDL_VIDEO)
106 > # undef  ENABLE_VOSF
107 > # define ENABLE_VOSF 1
108 > #endif
109  
110   /* ExtFS is supported */
111   #define SUPPORTS_EXTFS 1
112  
113 + /* BSD socket API supported */
114 + #define SUPPORTS_UDP_TUNNEL 1
115 +
116 + /* Use the CPU emulator to check for periodic tasks? */
117 + #ifdef HAVE_PTHREADS
118 + #define USE_PTHREADS_SERVICES
119 + #endif
120 + #if EMULATED_68K
121 + #if defined(__NetBSD__)
122 + #define USE_CPU_EMUL_SERVICES
123 + #endif
124 + #endif
125 + #ifdef USE_CPU_EMUL_SERVICES
126 + #undef USE_PTHREADS_SERVICES
127 + #endif
128 +
129 +
130   /* Data types */
131   typedef unsigned char uint8;
132   typedef signed char int8;
# Line 96 | Line 151 | typedef long int32;
151   #if SIZEOF_LONG == 8
152   typedef unsigned long uint64;
153   typedef long int64;
154 + #define VAL64(a) (a ## l)
155 + #define UVAL64(a) (a ## ul)
156   #elif SIZEOF_LONG_LONG == 8
157   typedef unsigned long long uint64;
158   typedef long long int64;
159 + #define VAL64(a) (a ## LL)
160 + #define UVAL64(a) (a ## uLL)
161   #else
162   #error "No 8 byte type, you lose."
163   #endif
164 + #if SIZEOF_VOID_P == 4
165 + typedef uint32 uintptr;
166 + typedef int32 intptr;
167 + #elif SIZEOF_VOID_P == 8
168 + typedef uint64 uintptr;
169 + typedef int64 intptr;
170 + #else
171 + #error "Unsupported size of pointer"
172 + #endif
173 +
174 + #ifndef HAVE_LOFF_T
175 + typedef off_t loff_t;
176 + #endif
177 + #ifndef HAVE_CADDR_T
178 + typedef char * caddr_t;
179 + #endif
180  
181   /* Time data type for Time Manager emulation */
182   #ifdef HAVE_CLOCK_GETTIME
183   typedef struct timespec tm_time_t;
184 + #elif defined(__MACH__)
185 + typedef mach_timespec_t tm_time_t;
186   #else
187   typedef struct timeval tm_time_t;
188   #endif
189  
190 < /* Offset Mac->Unix time in seconds */
191 < #define TIME_OFFSET 0x7c25b080
190 > /* Define codes for all the float formats that we know of.
191 > * Though we only handle IEEE format.  */
192 > #define UNKNOWN_FLOAT_FORMAT 0
193 > #define IEEE_FLOAT_FORMAT 1
194 > #define VAX_FLOAT_FORMAT 2
195 > #define IBM_FLOAT_FORMAT 3
196 > #define C4X_FLOAT_FORMAT 4
197  
198   /* UAE CPU data types */
199   #define uae_s8 int8
# Line 125 | Line 207 | typedef struct timeval tm_time_t;
207   typedef uae_u32 uaecptr;
208  
209   /* Alignment restrictions */
210 < #if defined(__i386__) || defined(__powerpc__) || defined(__m68k__)
210 > #if defined(__i386__) || defined(__powerpc__) || defined(__m68k__) || defined(__x86_64__)
211   # define CPU_CAN_ACCESS_UNALIGNED
212   #endif
213  
214 + /* Timing functions */
215 + extern uint64 GetTicks_usec(void);
216 + extern void Delay_usec(uint32 usec);
217 +
218 + /* Spinlocks */
219 + #ifdef __GNUC__
220 +
221 + #if defined(__powerpc__) || defined(__ppc__)
222 + #define HAVE_TEST_AND_SET 1
223 + static inline int testandset(volatile int *p)
224 + {
225 +        int ret;
226 +        __asm__ __volatile__("0:    lwarx       %0,0,%1\n"
227 +                                                 "      xor.    %0,%3,%0\n"
228 +                                                 "      bne             1f\n"
229 +                                                 "      stwcx.  %2,0,%1\n"
230 +                                                 "      bne-    0b\n"
231 +                                                 "1:    "
232 +                                                 : "=&r" (ret)
233 +                                                 : "r" (p), "r" (1), "r" (0)
234 +                                                 : "cr0", "memory");
235 +        return ret;
236 + }
237 + #endif
238 +
239 + /* FIXME: SheepShaver occasionnally hangs with those locks */
240 + #if 0 && (defined(__i386__) || defined(__x86_64__))
241 + #define HAVE_TEST_AND_SET 1
242 + static inline int testandset(volatile int *p)
243 + {
244 +        long int ret;
245 +        /* Note: the "xchg" instruction does not need a "lock" prefix */
246 +        __asm__ __volatile__("xchgl %k0, %1"
247 +                                                 : "=r" (ret), "=m" (*p)
248 +                                                 : "0" (1), "m" (*p)
249 +                                                 : "memory");
250 +        return ret;
251 + }
252 + #endif
253 +
254 + #ifdef __s390__
255 + #define HAVE_TEST_AND_SET 1
256 + static inline int testandset(volatile int *p)
257 + {
258 +        int ret;
259 +
260 +        __asm__ __volatile__("0: cs    %0,%1,0(%2)\n"
261 +                                                 "   jl    0b"
262 +                                                 : "=&d" (ret)
263 +                                                 : "r" (1), "a" (p), "0" (*p)
264 +                                                 : "cc", "memory" );
265 +        return ret;
266 + }
267 + #endif
268 +
269 + #ifdef __alpha__
270 + #define HAVE_TEST_AND_SET 1
271 + static inline int testandset(volatile int *p)
272 + {
273 +        int ret;
274 +        unsigned long one;
275 +
276 +        __asm__ __volatile__("0:        mov 1,%2\n"
277 +                                                 "      ldl_l %0,%1\n"
278 +                                                 "      stl_c %2,%1\n"
279 +                                                 "      beq %2,1f\n"
280 +                                                 ".subsection 2\n"
281 +                                                 "1:    br 0b\n"
282 +                                                 ".previous"
283 +                                                 : "=r" (ret), "=m" (*p), "=r" (one)
284 +                                                 : "m" (*p));
285 +        return ret;
286 + }
287 + #endif
288 +
289 + #ifdef __sparc__
290 + #define HAVE_TEST_AND_SET 1
291 + static inline int testandset(volatile int *p)
292 + {
293 +        int ret;
294 +
295 +        __asm__ __volatile__("ldstub    [%1], %0"
296 +                                                 : "=r" (ret)
297 +                                                 : "r" (p)
298 +                                                 : "memory");
299 +
300 +        return (ret ? 1 : 0);
301 + }
302 + #endif
303 +
304 + #ifdef __arm__
305 + #define HAVE_TEST_AND_SET 1
306 + static inline int testandset(volatile int *p)
307 + {
308 +        register unsigned int ret;
309 +        __asm__ __volatile__("swp %0, %1, [%2]"
310 +                                                 : "=r"(ret)
311 +                                                 : "0"(1), "r"(p));
312 +        
313 +        return ret;
314 + }
315 + #endif
316 +
317 + #endif /* __GNUC__ */
318 +
319 + typedef volatile int spinlock_t;
320 +
321 + static const spinlock_t SPIN_LOCK_UNLOCKED = 0;
322 +
323 + #if HAVE_TEST_AND_SET
324 + #define HAVE_SPINLOCKS 1
325 + static inline void spin_lock(spinlock_t *lock)
326 + {
327 +        while (testandset(lock));
328 + }
329 +
330 + static inline void spin_unlock(spinlock_t *lock)
331 + {
332 +        *lock = 0;
333 + }
334 +
335 + static inline int spin_trylock(spinlock_t *lock)
336 + {
337 +        return !testandset(lock);
338 + }
339 + #else
340 + static inline void spin_lock(spinlock_t *lock)
341 + {
342 + }
343 +
344 + static inline void spin_unlock(spinlock_t *lock)
345 + {
346 + }
347 +
348 + static inline int spin_trylock(spinlock_t *lock)
349 + {
350 +        return 1;
351 + }
352 + #endif
353 +
354 + /* X11 display fast locks */
355 + #ifdef HAVE_SPINLOCKS
356 + #define X11_LOCK_TYPE spinlock_t
357 + #define X11_LOCK_INIT SPIN_LOCK_UNLOCKED
358 + #define XDisplayLock() spin_lock(&x_display_lock)
359 + #define XDisplayUnlock() spin_unlock(&x_display_lock)
360 + #elif defined(HAVE_PTHREADS)
361 + #define X11_LOCK_TYPE pthread_mutex_t
362 + #define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER
363 + #define XDisplayLock() pthread_mutex_lock(&x_display_lock);
364 + #define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock);
365 + #else
366 + #define XDisplayLock()
367 + #define XDisplayUnlock()
368 + #endif
369 + #ifdef X11_LOCK_TYPE
370 + extern X11_LOCK_TYPE x_display_lock;
371 + #endif
372 +
373 + #ifdef HAVE_PTHREADS
374 + /* Centralized pthread attribute setup */
375 + void Set_pthread_attr(pthread_attr_t *attr, int priority);
376 + #endif
377 +
378   /* UAE CPU defines */
379   #ifdef WORDS_BIGENDIAN
380  
# Line 169 | Line 415 | static inline void do_put_mem_word(uae_u
415  
416   #else /* WORDS_BIGENDIAN */
417  
418 < #ifdef __i386__
418 > #if defined(__i386__) || defined(__x86_64__)
419  
420   /* Intel x86 */
421   #define X86_PPRO_OPT
# Line 187 | Line 433 | static inline void do_put_mem_word(uae_u
433   #else
434   static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {__asm__ ("rolw $8,%0" : "=r" (v) : "0" (v) : "cc"); *a = v;}
435   #endif
436 + #define HAVE_OPTIMIZED_BYTESWAP_32
437 + /* bswap doesn't affect condition codes */
438 + static inline uae_u32 do_byteswap_32(uae_u32 v) {__asm__ ("bswap %0" : "=r" (v) : "0" (v)); return v;}
439 + #define HAVE_OPTIMIZED_BYTESWAP_16
440 + #ifdef X86_PPRO_OPT
441 + static inline uae_u32 do_byteswap_16(uae_u32 v) {__asm__ ("bswapl %0" : "=&r" (v) : "0" (v << 16) : "cc"); return v;}
442 + #else
443 + static inline uae_u32 do_byteswap_16(uae_u32 v) {__asm__ ("rolw $8,%0" : "=r" (v) : "0" (v) : "cc"); return v;}
444 + #endif
445  
446   #elif defined(CPU_CAN_ACCESS_UNALIGNED)
447  
# Line 208 | Line 463 | static inline void do_put_mem_word(uae_u
463  
464   #endif /* WORDS_BIGENDIAN */
465  
466 + #ifndef HAVE_OPTIMIZED_BYTESWAP_32
467 + static inline uae_u32 do_byteswap_32(uae_u32 v)
468 +        { return (((v >> 24) & 0xff) | ((v >> 8) & 0xff00) | ((v & 0xff) << 24) | ((v & 0xff00) << 8)); }
469 + #endif
470 +
471 + #ifndef HAVE_OPTIMIZED_BYTESWAP_16
472 + static inline uae_u32 do_byteswap_16(uae_u32 v)
473 +        { return (((v >> 8) & 0xff) | ((v & 0xff) << 8)); }
474 + #endif
475 +
476   #define do_get_mem_byte(a) ((uae_u32)*((uae_u8 *)(a)))
477   #define do_put_mem_byte(a, v) (*(uae_u8 *)(a) = (v))
478  
# Line 215 | Line 480 | static inline void do_put_mem_word(uae_u
480   #define call_mem_put_func(func, addr, v) ((*func)(addr, v))
481   #define __inline__ inline
482   #define CPU_EMU_SIZE 0
218 #undef USE_MAPPED_MEMORY
219 #undef CAN_MAP_MEMORY
483   #undef NO_INLINE_MEMORY_ACCESS
484   #undef MD_HAVE_MEM_1_FUNCS
222 #undef USE_COMPILER
485   #define ENUMDECL typedef enum
486   #define ENUMNAME(name) name
487   #define write_log printf
488  
489 < #ifdef X86_ASSEMBLY
489 > #if defined(X86_ASSEMBLY) || defined(X86_64_ASSEMBLY)
490   #define ASM_SYM_FOR_FUNC(a) __asm__(a)
491   #else
492   #define ASM_SYM_FOR_FUNC(a)

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines