1 |
/* |
2 |
* sysdeps.h - System dependent definitions for Unix |
3 |
* |
4 |
* Basilisk II (C) 1997-2008 Christian Bauer |
5 |
* |
6 |
* This program is free software; you can redistribute it and/or modify |
7 |
* it under the terms of the GNU General Public License as published by |
8 |
* the Free Software Foundation; either version 2 of the License, or |
9 |
* (at your option) any later version. |
10 |
* |
11 |
* This program is distributed in the hope that it will be useful, |
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
* GNU General Public License for more details. |
15 |
* |
16 |
* You should have received a copy of the GNU General Public License |
17 |
* along with this program; if not, write to the Free Software |
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 |
*/ |
20 |
|
21 |
#ifndef SYSDEPS_H |
22 |
#define SYSDEPS_H |
23 |
|
24 |
#ifndef __STDC__ |
25 |
#error "Your compiler is not ANSI. Get a real one." |
26 |
#endif |
27 |
|
28 |
#include "config.h" |
29 |
#include "user_strings_unix.h" |
30 |
|
31 |
#ifndef STDC_HEADERS |
32 |
#error "You don't have ANSI C header files." |
33 |
#endif |
34 |
|
35 |
#ifdef HAVE_UNISTD_H |
36 |
# include <sys/types.h> |
37 |
# include <unistd.h> |
38 |
#endif |
39 |
|
40 |
#include <netinet/in.h> |
41 |
#include <assert.h> |
42 |
#include <stdio.h> |
43 |
#include <stdlib.h> |
44 |
#include <string.h> |
45 |
|
46 |
#ifdef HAVE_PTHREADS |
47 |
# include <pthread.h> |
48 |
#endif |
49 |
|
50 |
#ifdef HAVE_FCNTL_H |
51 |
# include <fcntl.h> |
52 |
#endif |
53 |
|
54 |
#ifdef TIME_WITH_SYS_TIME |
55 |
# include <sys/time.h> |
56 |
# include <time.h> |
57 |
#else |
58 |
# ifdef HAVE_SYS_TIME_H |
59 |
# include <sys/time.h> |
60 |
# else |
61 |
# include <time.h> |
62 |
# endif |
63 |
#endif |
64 |
|
65 |
#if defined(__MACH__) |
66 |
#include <mach/clock.h> |
67 |
#endif |
68 |
|
69 |
#ifdef ENABLE_NATIVE_M68K |
70 |
|
71 |
/* Mac and host address space are the same */ |
72 |
#define REAL_ADDRESSING 1 |
73 |
|
74 |
/* Using 68k natively */ |
75 |
#define EMULATED_68K 0 |
76 |
|
77 |
/* Mac ROM is not write protected */ |
78 |
#define ROM_IS_WRITE_PROTECTED 0 |
79 |
#define USE_SCRATCHMEM_SUBTERFUGE 1 |
80 |
|
81 |
#else |
82 |
|
83 |
/* Mac and host address space are distinct */ |
84 |
#ifndef REAL_ADDRESSING |
85 |
#define REAL_ADDRESSING 0 |
86 |
#endif |
87 |
|
88 |
/* Using 68k emulator */ |
89 |
#define EMULATED_68K 1 |
90 |
|
91 |
/* The m68k emulator uses a prefetch buffer ? */ |
92 |
#define USE_PREFETCH_BUFFER 0 |
93 |
|
94 |
/* Mac ROM is write protected when banked memory is used */ |
95 |
#if REAL_ADDRESSING || DIRECT_ADDRESSING |
96 |
# define ROM_IS_WRITE_PROTECTED 0 |
97 |
# define USE_SCRATCHMEM_SUBTERFUGE 1 |
98 |
#else |
99 |
# define ROM_IS_WRITE_PROTECTED 1 |
100 |
#endif |
101 |
|
102 |
#endif |
103 |
|
104 |
/* Direct Addressing requires Video on SEGV signals in plain X11 mode */ |
105 |
#if DIRECT_ADDRESSING && (!ENABLE_VOSF && !USE_SDL_VIDEO) |
106 |
# undef ENABLE_VOSF |
107 |
# define ENABLE_VOSF 1 |
108 |
#endif |
109 |
|
110 |
/* ExtFS is supported */ |
111 |
#define SUPPORTS_EXTFS 1 |
112 |
|
113 |
/* BSD socket API supported */ |
114 |
#define SUPPORTS_UDP_TUNNEL 1 |
115 |
|
116 |
/* Use the CPU emulator to check for periodic tasks? */ |
117 |
#ifdef HAVE_PTHREADS |
118 |
#define USE_PTHREADS_SERVICES |
119 |
#endif |
120 |
#if EMULATED_68K |
121 |
#if defined(__NetBSD__) |
122 |
#define USE_CPU_EMUL_SERVICES |
123 |
#endif |
124 |
#endif |
125 |
#ifdef USE_CPU_EMUL_SERVICES |
126 |
#undef USE_PTHREADS_SERVICES |
127 |
#endif |
128 |
|
129 |
|
130 |
/* Data types */ |
131 |
typedef unsigned char uint8; |
132 |
typedef signed char int8; |
133 |
#if SIZEOF_SHORT == 2 |
134 |
typedef unsigned short uint16; |
135 |
typedef short int16; |
136 |
#elif SIZEOF_INT == 2 |
137 |
typedef unsigned int uint16; |
138 |
typedef int int16; |
139 |
#else |
140 |
#error "No 2 byte type, you lose." |
141 |
#endif |
142 |
#if SIZEOF_INT == 4 |
143 |
typedef unsigned int uint32; |
144 |
typedef int int32; |
145 |
#elif SIZEOF_LONG == 4 |
146 |
typedef unsigned long uint32; |
147 |
typedef long int32; |
148 |
#else |
149 |
#error "No 4 byte type, you lose." |
150 |
#endif |
151 |
#if SIZEOF_LONG == 8 |
152 |
typedef unsigned long uint64; |
153 |
typedef long int64; |
154 |
#define VAL64(a) (a ## l) |
155 |
#define UVAL64(a) (a ## ul) |
156 |
#elif SIZEOF_LONG_LONG == 8 |
157 |
typedef unsigned long long uint64; |
158 |
typedef long long int64; |
159 |
#define VAL64(a) (a ## LL) |
160 |
#define UVAL64(a) (a ## uLL) |
161 |
#else |
162 |
#error "No 8 byte type, you lose." |
163 |
#endif |
164 |
#if SIZEOF_VOID_P == 4 |
165 |
typedef uint32 uintptr; |
166 |
typedef int32 intptr; |
167 |
#elif SIZEOF_VOID_P == 8 |
168 |
typedef uint64 uintptr; |
169 |
typedef int64 intptr; |
170 |
#else |
171 |
#error "Unsupported size of pointer" |
172 |
#endif |
173 |
|
174 |
#ifndef HAVE_LOFF_T |
175 |
typedef off_t loff_t; |
176 |
#endif |
177 |
#ifndef HAVE_CADDR_T |
178 |
typedef char * caddr_t; |
179 |
#endif |
180 |
|
181 |
/* Time data type for Time Manager emulation */ |
182 |
#ifdef HAVE_CLOCK_GETTIME |
183 |
typedef struct timespec tm_time_t; |
184 |
#elif defined(__MACH__) |
185 |
typedef mach_timespec_t tm_time_t; |
186 |
#else |
187 |
typedef struct timeval tm_time_t; |
188 |
#endif |
189 |
|
190 |
/* Define codes for all the float formats that we know of. |
191 |
* Though we only handle IEEE format. */ |
192 |
#define UNKNOWN_FLOAT_FORMAT 0 |
193 |
#define IEEE_FLOAT_FORMAT 1 |
194 |
#define VAX_FLOAT_FORMAT 2 |
195 |
#define IBM_FLOAT_FORMAT 3 |
196 |
#define C4X_FLOAT_FORMAT 4 |
197 |
|
198 |
/* UAE CPU data types */ |
199 |
#define uae_s8 int8 |
200 |
#define uae_u8 uint8 |
201 |
#define uae_s16 int16 |
202 |
#define uae_u16 uint16 |
203 |
#define uae_s32 int32 |
204 |
#define uae_u32 uint32 |
205 |
#define uae_s64 int64 |
206 |
#define uae_u64 uint64 |
207 |
typedef uae_u32 uaecptr; |
208 |
|
209 |
/* Alignment restrictions */ |
210 |
#if defined(__i386__) || defined(__powerpc__) || defined(__m68k__) || defined(__x86_64__) |
211 |
# define CPU_CAN_ACCESS_UNALIGNED |
212 |
#endif |
213 |
|
214 |
/* Timing functions */ |
215 |
extern uint64 GetTicks_usec(void); |
216 |
extern void Delay_usec(uint32 usec); |
217 |
|
218 |
/* Spinlocks */ |
219 |
#ifdef __GNUC__ |
220 |
|
221 |
#if defined(__powerpc__) || defined(__ppc__) |
222 |
#define HAVE_TEST_AND_SET 1 |
223 |
static inline int testandset(volatile int *p) |
224 |
{ |
225 |
int ret; |
226 |
__asm__ __volatile__("0: lwarx %0,0,%1\n" |
227 |
" xor. %0,%3,%0\n" |
228 |
" bne 1f\n" |
229 |
" stwcx. %2,0,%1\n" |
230 |
" bne- 0b\n" |
231 |
"1: " |
232 |
: "=&r" (ret) |
233 |
: "r" (p), "r" (1), "r" (0) |
234 |
: "cr0", "memory"); |
235 |
return ret; |
236 |
} |
237 |
#endif |
238 |
|
239 |
/* FIXME: SheepShaver occasionnally hangs with those locks */ |
240 |
#if 0 && (defined(__i386__) || defined(__x86_64__)) |
241 |
#define HAVE_TEST_AND_SET 1 |
242 |
static inline int testandset(volatile int *p) |
243 |
{ |
244 |
long int ret; |
245 |
/* Note: the "xchg" instruction does not need a "lock" prefix */ |
246 |
__asm__ __volatile__("xchgl %k0, %1" |
247 |
: "=r" (ret), "=m" (*p) |
248 |
: "0" (1), "m" (*p) |
249 |
: "memory"); |
250 |
return ret; |
251 |
} |
252 |
#endif |
253 |
|
254 |
#ifdef __s390__ |
255 |
#define HAVE_TEST_AND_SET 1 |
256 |
static inline int testandset(volatile int *p) |
257 |
{ |
258 |
int ret; |
259 |
|
260 |
__asm__ __volatile__("0: cs %0,%1,0(%2)\n" |
261 |
" jl 0b" |
262 |
: "=&d" (ret) |
263 |
: "r" (1), "a" (p), "0" (*p) |
264 |
: "cc", "memory" ); |
265 |
return ret; |
266 |
} |
267 |
#endif |
268 |
|
269 |
#ifdef __alpha__ |
270 |
#define HAVE_TEST_AND_SET 1 |
271 |
static inline int testandset(volatile int *p) |
272 |
{ |
273 |
int ret; |
274 |
unsigned long one; |
275 |
|
276 |
__asm__ __volatile__("0: mov 1,%2\n" |
277 |
" ldl_l %0,%1\n" |
278 |
" stl_c %2,%1\n" |
279 |
" beq %2,1f\n" |
280 |
".subsection 2\n" |
281 |
"1: br 0b\n" |
282 |
".previous" |
283 |
: "=r" (ret), "=m" (*p), "=r" (one) |
284 |
: "m" (*p)); |
285 |
return ret; |
286 |
} |
287 |
#endif |
288 |
|
289 |
#ifdef __sparc__ |
290 |
#define HAVE_TEST_AND_SET 1 |
291 |
static inline int testandset(volatile int *p) |
292 |
{ |
293 |
int ret; |
294 |
|
295 |
__asm__ __volatile__("ldstub [%1], %0" |
296 |
: "=r" (ret) |
297 |
: "r" (p) |
298 |
: "memory"); |
299 |
|
300 |
return (ret ? 1 : 0); |
301 |
} |
302 |
#endif |
303 |
|
304 |
#ifdef __arm__ |
305 |
#define HAVE_TEST_AND_SET 1 |
306 |
static inline int testandset(volatile int *p) |
307 |
{ |
308 |
register unsigned int ret; |
309 |
__asm__ __volatile__("swp %0, %1, [%2]" |
310 |
: "=r"(ret) |
311 |
: "0"(1), "r"(p)); |
312 |
|
313 |
return ret; |
314 |
} |
315 |
#endif |
316 |
|
317 |
#endif /* __GNUC__ */ |
318 |
|
319 |
typedef volatile int spinlock_t; |
320 |
|
321 |
static const spinlock_t SPIN_LOCK_UNLOCKED = 0; |
322 |
|
323 |
#if HAVE_TEST_AND_SET |
324 |
#define HAVE_SPINLOCKS 1 |
325 |
static inline void spin_lock(spinlock_t *lock) |
326 |
{ |
327 |
while (testandset(lock)); |
328 |
} |
329 |
|
330 |
static inline void spin_unlock(spinlock_t *lock) |
331 |
{ |
332 |
*lock = 0; |
333 |
} |
334 |
|
335 |
static inline int spin_trylock(spinlock_t *lock) |
336 |
{ |
337 |
return !testandset(lock); |
338 |
} |
339 |
#else |
340 |
static inline void spin_lock(spinlock_t *lock) |
341 |
{ |
342 |
} |
343 |
|
344 |
static inline void spin_unlock(spinlock_t *lock) |
345 |
{ |
346 |
} |
347 |
|
348 |
static inline int spin_trylock(spinlock_t *lock) |
349 |
{ |
350 |
return 1; |
351 |
} |
352 |
#endif |
353 |
|
354 |
/* X11 display fast locks */ |
355 |
#ifdef HAVE_SPINLOCKS |
356 |
#define X11_LOCK_TYPE spinlock_t |
357 |
#define X11_LOCK_INIT SPIN_LOCK_UNLOCKED |
358 |
#define XDisplayLock() spin_lock(&x_display_lock) |
359 |
#define XDisplayUnlock() spin_unlock(&x_display_lock) |
360 |
#elif defined(HAVE_PTHREADS) |
361 |
#define X11_LOCK_TYPE pthread_mutex_t |
362 |
#define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER |
363 |
#define XDisplayLock() pthread_mutex_lock(&x_display_lock); |
364 |
#define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock); |
365 |
#else |
366 |
#define XDisplayLock() |
367 |
#define XDisplayUnlock() |
368 |
#endif |
369 |
#ifdef X11_LOCK_TYPE |
370 |
extern X11_LOCK_TYPE x_display_lock; |
371 |
#endif |
372 |
|
373 |
#ifdef HAVE_PTHREADS |
374 |
/* Centralized pthread attribute setup */ |
375 |
void Set_pthread_attr(pthread_attr_t *attr, int priority); |
376 |
#endif |
377 |
|
378 |
/* UAE CPU defines */ |
379 |
#ifdef WORDS_BIGENDIAN |
380 |
|
381 |
#ifdef CPU_CAN_ACCESS_UNALIGNED |
382 |
|
383 |
/* Big-endian CPUs which can do unaligned accesses */ |
384 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {return *a;} |
385 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {return *a;} |
386 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {*a = v;} |
387 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {*a = v;} |
388 |
|
389 |
#else /* CPU_CAN_ACCESS_UNALIGNED */ |
390 |
|
391 |
#ifdef sgi |
392 |
/* The SGI MIPSPro compilers can do unaligned accesses given enough hints. |
393 |
* They will automatically inline these routines. */ |
394 |
#ifdef __cplusplus |
395 |
extern "C" { /* only the C compiler does unaligned accesses */ |
396 |
#endif |
397 |
extern uae_u32 do_get_mem_long(uae_u32 *a); |
398 |
extern uae_u32 do_get_mem_word(uae_u16 *a); |
399 |
extern void do_put_mem_long(uae_u32 *a, uae_u32 v); |
400 |
extern void do_put_mem_word(uae_u16 *a, uae_u32 v); |
401 |
#ifdef __cplusplus |
402 |
} |
403 |
#endif |
404 |
|
405 |
#else /* sgi */ |
406 |
|
407 |
/* Big-endian CPUs which can not do unaligned accesses (this is not the most efficient way to do this...) */ |
408 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint8 *b = (uint8 *)a; return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];} |
409 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint8 *b = (uint8 *)a; return (b[0] << 8) | b[1];} |
410 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 24; b[1] = v >> 16; b[2] = v >> 8; b[3] = v;} |
411 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 8; b[1] = v;} |
412 |
#endif /* sgi */ |
413 |
|
414 |
#endif /* CPU_CAN_ACCESS_UNALIGNED */ |
415 |
|
416 |
#else /* WORDS_BIGENDIAN */ |
417 |
|
418 |
#if defined(__i386__) || defined(__x86_64__) |
419 |
|
420 |
/* Intel x86 */ |
421 |
#define X86_PPRO_OPT |
422 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint32 retval; __asm__ ("bswap %0" : "=r" (retval) : "0" (*a) : "cc"); return retval;} |
423 |
#ifdef X86_PPRO_OPT |
424 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint32 retval; __asm__ ("movzwl %w1,%k0\n\tshll $16,%k0\n\tbswapl %k0\n" : "=&r" (retval) : "m" (*a) : "cc"); return retval;} |
425 |
#else |
426 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint32 retval; __asm__ ("xorl %k0,%k0\n\tmovw %w1,%w0\n\trolw $8,%w0" : "=&r" (retval) : "m" (*a) : "cc"); return retval;} |
427 |
#endif |
428 |
#define HAVE_GET_WORD_UNSWAPPED |
429 |
#define do_get_mem_word_unswapped(a) ((uae_u32)*((uae_u16 *)(a))) |
430 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {__asm__ ("bswap %0" : "=r" (v) : "0" (v) : "cc"); *a = v;} |
431 |
#ifdef X86_PPRO_OPT |
432 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {__asm__ ("bswapl %0" : "=&r" (v) : "0" (v << 16) : "cc"); *a = v;} |
433 |
#else |
434 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {__asm__ ("rolw $8,%0" : "=r" (v) : "0" (v) : "cc"); *a = v;} |
435 |
#endif |
436 |
#define HAVE_OPTIMIZED_BYTESWAP_32 |
437 |
/* bswap doesn't affect condition codes */ |
438 |
static inline uae_u32 do_byteswap_32(uae_u32 v) {__asm__ ("bswap %0" : "=r" (v) : "0" (v)); return v;} |
439 |
#define HAVE_OPTIMIZED_BYTESWAP_16 |
440 |
#ifdef X86_PPRO_OPT |
441 |
static inline uae_u32 do_byteswap_16(uae_u32 v) {__asm__ ("bswapl %0" : "=&r" (v) : "0" (v << 16) : "cc"); return v;} |
442 |
#else |
443 |
static inline uae_u32 do_byteswap_16(uae_u32 v) {__asm__ ("rolw $8,%0" : "=r" (v) : "0" (v) : "cc"); return v;} |
444 |
#endif |
445 |
|
446 |
#elif defined(CPU_CAN_ACCESS_UNALIGNED) |
447 |
|
448 |
/* Other little-endian CPUs which can do unaligned accesses */ |
449 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint32 x = *a; return (x >> 24) | (x >> 8) & 0xff00 | (x << 8) & 0xff0000 | (x << 24);} |
450 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint16 x = *a; return (x >> 8) | (x << 8);} |
451 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {*a = (v >> 24) | (v >> 8) & 0xff00 | (v << 8) & 0xff0000 | (v << 24);} |
452 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {*a = (v >> 8) | (v << 8);} |
453 |
|
454 |
#else /* CPU_CAN_ACCESS_UNALIGNED */ |
455 |
|
456 |
/* Other little-endian CPUs which can not do unaligned accesses (this needs optimization) */ |
457 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint8 *b = (uint8 *)a; return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];} |
458 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint8 *b = (uint8 *)a; return (b[0] << 8) | b[1];} |
459 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 24; b[1] = v >> 16; b[2] = v >> 8; b[3] = v;} |
460 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 8; b[1] = v;} |
461 |
|
462 |
#endif /* CPU_CAN_ACCESS_UNALIGNED */ |
463 |
|
464 |
#endif /* WORDS_BIGENDIAN */ |
465 |
|
466 |
#ifndef HAVE_OPTIMIZED_BYTESWAP_32 |
467 |
static inline uae_u32 do_byteswap_32(uae_u32 v) |
468 |
{ return (((v >> 24) & 0xff) | ((v >> 8) & 0xff00) | ((v & 0xff) << 24) | ((v & 0xff00) << 8)); } |
469 |
#endif |
470 |
|
471 |
#ifndef HAVE_OPTIMIZED_BYTESWAP_16 |
472 |
static inline uae_u32 do_byteswap_16(uae_u32 v) |
473 |
{ return (((v >> 8) & 0xff) | ((v & 0xff) << 8)); } |
474 |
#endif |
475 |
|
476 |
#define do_get_mem_byte(a) ((uae_u32)*((uae_u8 *)(a))) |
477 |
#define do_put_mem_byte(a, v) (*(uae_u8 *)(a) = (v)) |
478 |
|
479 |
#define call_mem_get_func(func, addr) ((*func)(addr)) |
480 |
#define call_mem_put_func(func, addr, v) ((*func)(addr, v)) |
481 |
#define __inline__ inline |
482 |
#define CPU_EMU_SIZE 0 |
483 |
#undef NO_INLINE_MEMORY_ACCESS |
484 |
#undef MD_HAVE_MEM_1_FUNCS |
485 |
#define ENUMDECL typedef enum |
486 |
#define ENUMNAME(name) name |
487 |
#define write_log printf |
488 |
|
489 |
#if defined(X86_ASSEMBLY) || defined(X86_64_ASSEMBLY) |
490 |
#define ASM_SYM_FOR_FUNC(a) __asm__(a) |
491 |
#else |
492 |
#define ASM_SYM_FOR_FUNC(a) |
493 |
#endif |
494 |
|
495 |
#ifndef REGPARAM |
496 |
# define REGPARAM |
497 |
#endif |
498 |
#define REGPARAM2 |
499 |
|
500 |
#endif |