1 |
/* |
2 |
* sysdeps.h - System dependent definitions for Unix |
3 |
* |
4 |
* Basilisk II (C) 1997-2005 Christian Bauer |
5 |
* |
6 |
* This program is free software; you can redistribute it and/or modify |
7 |
* it under the terms of the GNU General Public License as published by |
8 |
* the Free Software Foundation; either version 2 of the License, or |
9 |
* (at your option) any later version. |
10 |
* |
11 |
* This program is distributed in the hope that it will be useful, |
12 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
* GNU General Public License for more details. |
15 |
* |
16 |
* You should have received a copy of the GNU General Public License |
17 |
* along with this program; if not, write to the Free Software |
18 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 |
*/ |
20 |
|
21 |
#ifndef SYSDEPS_H |
22 |
#define SYSDEPS_H |
23 |
|
24 |
#ifndef __STDC__ |
25 |
#error "Your compiler is not ANSI. Get a real one." |
26 |
#endif |
27 |
|
28 |
#include "config.h" |
29 |
#include "user_strings_unix.h" |
30 |
|
31 |
#ifndef STDC_HEADERS |
32 |
#error "You don't have ANSI C header files." |
33 |
#endif |
34 |
|
35 |
#ifdef HAVE_UNISTD_H |
36 |
# include <sys/types.h> |
37 |
# include <unistd.h> |
38 |
#endif |
39 |
|
40 |
#include <netinet/in.h> |
41 |
#include <assert.h> |
42 |
#include <stdio.h> |
43 |
#include <stdlib.h> |
44 |
#include <string.h> |
45 |
|
46 |
#ifdef HAVE_PTHREADS |
47 |
# include <pthread.h> |
48 |
#endif |
49 |
|
50 |
#ifdef HAVE_FCNTL_H |
51 |
# include <fcntl.h> |
52 |
#endif |
53 |
|
54 |
#ifdef TIME_WITH_SYS_TIME |
55 |
# include <sys/time.h> |
56 |
# include <time.h> |
57 |
#else |
58 |
# ifdef HAVE_SYS_TIME_H |
59 |
# include <sys/time.h> |
60 |
# else |
61 |
# include <time.h> |
62 |
# endif |
63 |
#endif |
64 |
|
65 |
|
66 |
#ifdef ENABLE_NATIVE_M68K |
67 |
|
68 |
/* Mac and host address space are the same */ |
69 |
#define REAL_ADDRESSING 1 |
70 |
|
71 |
/* Using 68k natively */ |
72 |
#define EMULATED_68K 0 |
73 |
|
74 |
/* Mac ROM is not write protected */ |
75 |
#define ROM_IS_WRITE_PROTECTED 0 |
76 |
#define USE_SCRATCHMEM_SUBTERFUGE 1 |
77 |
|
78 |
#else |
79 |
|
80 |
/* Mac and host address space are distinct */ |
81 |
#ifndef REAL_ADDRESSING |
82 |
#define REAL_ADDRESSING 0 |
83 |
#endif |
84 |
|
85 |
/* Using 68k emulator */ |
86 |
#define EMULATED_68K 1 |
87 |
|
88 |
/* The m68k emulator uses a prefetch buffer ? */ |
89 |
#define USE_PREFETCH_BUFFER 0 |
90 |
|
91 |
/* Mac ROM is write protected when banked memory is used */ |
92 |
#if REAL_ADDRESSING || DIRECT_ADDRESSING |
93 |
# define ROM_IS_WRITE_PROTECTED 0 |
94 |
# define USE_SCRATCHMEM_SUBTERFUGE 1 |
95 |
#else |
96 |
# define ROM_IS_WRITE_PROTECTED 1 |
97 |
#endif |
98 |
|
99 |
#endif |
100 |
|
101 |
/* Direct Addressing requires Video on SEGV signals in plain X11 mode */ |
102 |
#if DIRECT_ADDRESSING && (!ENABLE_VOSF && !USE_SDL_VIDEO) |
103 |
# undef ENABLE_VOSF |
104 |
# define ENABLE_VOSF 1 |
105 |
#endif |
106 |
|
107 |
/* ExtFS is supported */ |
108 |
#define SUPPORTS_EXTFS 1 |
109 |
|
110 |
/* BSD socket API supported */ |
111 |
#define SUPPORTS_UDP_TUNNEL 1 |
112 |
|
113 |
/* Use the CPU emulator to check for periodic tasks? */ |
114 |
#ifdef HAVE_PTHREADS |
115 |
#define USE_PTHREADS_SERVICES |
116 |
#endif |
117 |
#if EMULATED_68K |
118 |
#if defined(__NetBSD__) |
119 |
#define USE_CPU_EMUL_SERVICES |
120 |
#endif |
121 |
#endif |
122 |
#ifdef USE_CPU_EMUL_SERVICES |
123 |
#undef USE_PTHREADS_SERVICES |
124 |
#endif |
125 |
|
126 |
|
127 |
/* Data types */ |
128 |
typedef unsigned char uint8; |
129 |
typedef signed char int8; |
130 |
#if SIZEOF_SHORT == 2 |
131 |
typedef unsigned short uint16; |
132 |
typedef short int16; |
133 |
#elif SIZEOF_INT == 2 |
134 |
typedef unsigned int uint16; |
135 |
typedef int int16; |
136 |
#else |
137 |
#error "No 2 byte type, you lose." |
138 |
#endif |
139 |
#if SIZEOF_INT == 4 |
140 |
typedef unsigned int uint32; |
141 |
typedef int int32; |
142 |
#elif SIZEOF_LONG == 4 |
143 |
typedef unsigned long uint32; |
144 |
typedef long int32; |
145 |
#else |
146 |
#error "No 4 byte type, you lose." |
147 |
#endif |
148 |
#if SIZEOF_LONG == 8 |
149 |
typedef unsigned long uint64; |
150 |
typedef long int64; |
151 |
#define VAL64(a) (a ## l) |
152 |
#define UVAL64(a) (a ## ul) |
153 |
#elif SIZEOF_LONG_LONG == 8 |
154 |
typedef unsigned long long uint64; |
155 |
typedef long long int64; |
156 |
#define VAL64(a) (a ## LL) |
157 |
#define UVAL64(a) (a ## uLL) |
158 |
#else |
159 |
#error "No 8 byte type, you lose." |
160 |
#endif |
161 |
#if SIZEOF_VOID_P == 4 |
162 |
typedef uint32 uintptr; |
163 |
typedef int32 intptr; |
164 |
#elif SIZEOF_VOID_P == 8 |
165 |
typedef uint64 uintptr; |
166 |
typedef int64 intptr; |
167 |
#else |
168 |
#error "Unsupported size of pointer" |
169 |
#endif |
170 |
|
171 |
#ifndef HAVE_LOFF_T |
172 |
typedef off_t loff_t; |
173 |
#endif |
174 |
#ifndef HAVE_CADDR_T |
175 |
typedef char * caddr_t; |
176 |
#endif |
177 |
|
178 |
/* Time data type for Time Manager emulation */ |
179 |
#ifdef HAVE_CLOCK_GETTIME |
180 |
typedef struct timespec tm_time_t; |
181 |
#else |
182 |
typedef struct timeval tm_time_t; |
183 |
#endif |
184 |
|
185 |
/* Define codes for all the float formats that we know of. |
186 |
* Though we only handle IEEE format. */ |
187 |
#define UNKNOWN_FLOAT_FORMAT 0 |
188 |
#define IEEE_FLOAT_FORMAT 1 |
189 |
#define VAX_FLOAT_FORMAT 2 |
190 |
#define IBM_FLOAT_FORMAT 3 |
191 |
#define C4X_FLOAT_FORMAT 4 |
192 |
|
193 |
/* UAE CPU data types */ |
194 |
#define uae_s8 int8 |
195 |
#define uae_u8 uint8 |
196 |
#define uae_s16 int16 |
197 |
#define uae_u16 uint16 |
198 |
#define uae_s32 int32 |
199 |
#define uae_u32 uint32 |
200 |
#define uae_s64 int64 |
201 |
#define uae_u64 uint64 |
202 |
typedef uae_u32 uaecptr; |
203 |
|
204 |
/* Alignment restrictions */ |
205 |
#if defined(__i386__) || defined(__powerpc__) || defined(__m68k__) || defined(__x86_64__) |
206 |
# define CPU_CAN_ACCESS_UNALIGNED |
207 |
#endif |
208 |
|
209 |
/* Timing functions */ |
210 |
extern uint64 GetTicks_usec(void); |
211 |
extern void Delay_usec(uint32 usec); |
212 |
|
213 |
/* Spinlocks */ |
214 |
#ifdef __GNUC__ |
215 |
|
216 |
#if defined(__powerpc__) || defined(__ppc__) |
217 |
#define HAVE_TEST_AND_SET 1 |
218 |
static inline int testandset(volatile int *p) |
219 |
{ |
220 |
int ret; |
221 |
__asm__ __volatile__("0: lwarx %0,0,%1\n" |
222 |
" xor. %0,%3,%0\n" |
223 |
" bne 1f\n" |
224 |
" stwcx. %2,0,%1\n" |
225 |
" bne- 0b\n" |
226 |
"1: " |
227 |
: "=&r" (ret) |
228 |
: "r" (p), "r" (1), "r" (0) |
229 |
: "cr0", "memory"); |
230 |
return ret; |
231 |
} |
232 |
#endif |
233 |
|
234 |
/* FIXME: SheepShaver occasionnally hangs with those locks */ |
235 |
#if 0 && (defined(__i386__) || defined(__x86_64__)) |
236 |
#define HAVE_TEST_AND_SET 1 |
237 |
static inline int testandset(volatile int *p) |
238 |
{ |
239 |
long int ret; |
240 |
/* Note: the "xchg" instruction does not need a "lock" prefix */ |
241 |
__asm__ __volatile__("xchgl %k0, %1" |
242 |
: "=r" (ret), "=m" (*p) |
243 |
: "0" (1), "m" (*p) |
244 |
: "memory"); |
245 |
return ret; |
246 |
} |
247 |
#endif |
248 |
|
249 |
#ifdef __s390__ |
250 |
#define HAVE_TEST_AND_SET 1 |
251 |
static inline int testandset(volatile int *p) |
252 |
{ |
253 |
int ret; |
254 |
|
255 |
__asm__ __volatile__("0: cs %0,%1,0(%2)\n" |
256 |
" jl 0b" |
257 |
: "=&d" (ret) |
258 |
: "r" (1), "a" (p), "0" (*p) |
259 |
: "cc", "memory" ); |
260 |
return ret; |
261 |
} |
262 |
#endif |
263 |
|
264 |
#ifdef __alpha__ |
265 |
#define HAVE_TEST_AND_SET 1 |
266 |
static inline int testandset(volatile int *p) |
267 |
{ |
268 |
int ret; |
269 |
unsigned long one; |
270 |
|
271 |
__asm__ __volatile__("0: mov 1,%2\n" |
272 |
" ldl_l %0,%1\n" |
273 |
" stl_c %2,%1\n" |
274 |
" beq %2,1f\n" |
275 |
".subsection 2\n" |
276 |
"1: br 0b\n" |
277 |
".previous" |
278 |
: "=r" (ret), "=m" (*p), "=r" (one) |
279 |
: "m" (*p)); |
280 |
return ret; |
281 |
} |
282 |
#endif |
283 |
|
284 |
#ifdef __sparc__ |
285 |
#define HAVE_TEST_AND_SET 1 |
286 |
static inline int testandset(volatile int *p) |
287 |
{ |
288 |
int ret; |
289 |
|
290 |
__asm__ __volatile__("ldstub [%1], %0" |
291 |
: "=r" (ret) |
292 |
: "r" (p) |
293 |
: "memory"); |
294 |
|
295 |
return (ret ? 1 : 0); |
296 |
} |
297 |
#endif |
298 |
|
299 |
#ifdef __arm__ |
300 |
#define HAVE_TEST_AND_SET 1 |
301 |
static inline int testandset(volatile int *p) |
302 |
{ |
303 |
register unsigned int ret; |
304 |
__asm__ __volatile__("swp %0, %1, [%2]" |
305 |
: "=r"(ret) |
306 |
: "0"(1), "r"(p)); |
307 |
|
308 |
return ret; |
309 |
} |
310 |
#endif |
311 |
|
312 |
#endif /* __GNUC__ */ |
313 |
|
314 |
typedef volatile int spinlock_t; |
315 |
|
316 |
static const spinlock_t SPIN_LOCK_UNLOCKED = 0; |
317 |
|
318 |
#if HAVE_TEST_AND_SET |
319 |
#define HAVE_SPINLOCKS 1 |
320 |
static inline void spin_lock(spinlock_t *lock) |
321 |
{ |
322 |
while (testandset(lock)); |
323 |
} |
324 |
|
325 |
static inline void spin_unlock(spinlock_t *lock) |
326 |
{ |
327 |
*lock = 0; |
328 |
} |
329 |
|
330 |
static inline int spin_trylock(spinlock_t *lock) |
331 |
{ |
332 |
return !testandset(lock); |
333 |
} |
334 |
#else |
335 |
static inline void spin_lock(spinlock_t *lock) |
336 |
{ |
337 |
} |
338 |
|
339 |
static inline void spin_unlock(spinlock_t *lock) |
340 |
{ |
341 |
} |
342 |
|
343 |
static inline int spin_trylock(spinlock_t *lock) |
344 |
{ |
345 |
return 1; |
346 |
} |
347 |
#endif |
348 |
|
349 |
/* X11 display fast locks */ |
350 |
#ifdef HAVE_SPINLOCKS |
351 |
#define X11_LOCK_TYPE spinlock_t |
352 |
#define X11_LOCK_INIT SPIN_LOCK_UNLOCKED |
353 |
#define XDisplayLock() spin_lock(&x_display_lock) |
354 |
#define XDisplayUnlock() spin_unlock(&x_display_lock) |
355 |
#elif defined(HAVE_PTHREADS) |
356 |
#define X11_LOCK_TYPE pthread_mutex_t |
357 |
#define X11_LOCK_INIT PTHREAD_MUTEX_INITIALIZER |
358 |
#define XDisplayLock() pthread_mutex_lock(&x_display_lock); |
359 |
#define XDisplayUnlock() pthread_mutex_unlock(&x_display_lock); |
360 |
#else |
361 |
#define XDisplayLock() |
362 |
#define XDisplayUnlock() |
363 |
#endif |
364 |
#ifdef X11_LOCK_TYPE |
365 |
extern X11_LOCK_TYPE x_display_lock; |
366 |
#endif |
367 |
|
368 |
#ifdef HAVE_PTHREADS |
369 |
/* Centralized pthread attribute setup */ |
370 |
void Set_pthread_attr(pthread_attr_t *attr, int priority); |
371 |
#endif |
372 |
|
373 |
/* UAE CPU defines */ |
374 |
#ifdef WORDS_BIGENDIAN |
375 |
|
376 |
#ifdef CPU_CAN_ACCESS_UNALIGNED |
377 |
|
378 |
/* Big-endian CPUs which can do unaligned accesses */ |
379 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {return *a;} |
380 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {return *a;} |
381 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {*a = v;} |
382 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {*a = v;} |
383 |
|
384 |
#else /* CPU_CAN_ACCESS_UNALIGNED */ |
385 |
|
386 |
#ifdef sgi |
387 |
/* The SGI MIPSPro compilers can do unaligned accesses given enough hints. |
388 |
* They will automatically inline these routines. */ |
389 |
#ifdef __cplusplus |
390 |
extern "C" { /* only the C compiler does unaligned accesses */ |
391 |
#endif |
392 |
extern uae_u32 do_get_mem_long(uae_u32 *a); |
393 |
extern uae_u32 do_get_mem_word(uae_u16 *a); |
394 |
extern void do_put_mem_long(uae_u32 *a, uae_u32 v); |
395 |
extern void do_put_mem_word(uae_u16 *a, uae_u32 v); |
396 |
#ifdef __cplusplus |
397 |
} |
398 |
#endif |
399 |
|
400 |
#else /* sgi */ |
401 |
|
402 |
/* Big-endian CPUs which can not do unaligned accesses (this is not the most efficient way to do this...) */ |
403 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint8 *b = (uint8 *)a; return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];} |
404 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint8 *b = (uint8 *)a; return (b[0] << 8) | b[1];} |
405 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 24; b[1] = v >> 16; b[2] = v >> 8; b[3] = v;} |
406 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 8; b[1] = v;} |
407 |
#endif /* sgi */ |
408 |
|
409 |
#endif /* CPU_CAN_ACCESS_UNALIGNED */ |
410 |
|
411 |
#else /* WORDS_BIGENDIAN */ |
412 |
|
413 |
#if defined(__i386__) || defined(__x86_64__) |
414 |
|
415 |
/* Intel x86 */ |
416 |
#define X86_PPRO_OPT |
417 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint32 retval; __asm__ ("bswap %0" : "=r" (retval) : "0" (*a) : "cc"); return retval;} |
418 |
#ifdef X86_PPRO_OPT |
419 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint32 retval; __asm__ ("movzwl %w1,%k0\n\tshll $16,%k0\n\tbswapl %k0\n" : "=&r" (retval) : "m" (*a) : "cc"); return retval;} |
420 |
#else |
421 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint32 retval; __asm__ ("xorl %k0,%k0\n\tmovw %w1,%w0\n\trolw $8,%w0" : "=&r" (retval) : "m" (*a) : "cc"); return retval;} |
422 |
#endif |
423 |
#define HAVE_GET_WORD_UNSWAPPED |
424 |
#define do_get_mem_word_unswapped(a) ((uae_u32)*((uae_u16 *)(a))) |
425 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {__asm__ ("bswap %0" : "=r" (v) : "0" (v) : "cc"); *a = v;} |
426 |
#ifdef X86_PPRO_OPT |
427 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {__asm__ ("bswapl %0" : "=&r" (v) : "0" (v << 16) : "cc"); *a = v;} |
428 |
#else |
429 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {__asm__ ("rolw $8,%0" : "=r" (v) : "0" (v) : "cc"); *a = v;} |
430 |
#endif |
431 |
#define HAVE_OPTIMIZED_BYTESWAP_32 |
432 |
/* bswap doesn't affect condition codes */ |
433 |
static inline uae_u32 do_byteswap_32(uae_u32 v) {__asm__ ("bswap %0" : "=r" (v) : "0" (v)); return v;} |
434 |
#define HAVE_OPTIMIZED_BYTESWAP_16 |
435 |
#ifdef X86_PPRO_OPT |
436 |
static inline uae_u32 do_byteswap_16(uae_u32 v) {__asm__ ("bswapl %0" : "=&r" (v) : "0" (v << 16) : "cc"); return v;} |
437 |
#else |
438 |
static inline uae_u32 do_byteswap_16(uae_u32 v) {__asm__ ("rolw $8,%0" : "=r" (v) : "0" (v) : "cc"); return v;} |
439 |
#endif |
440 |
|
441 |
#elif defined(CPU_CAN_ACCESS_UNALIGNED) |
442 |
|
443 |
/* Other little-endian CPUs which can do unaligned accesses */ |
444 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint32 x = *a; return (x >> 24) | (x >> 8) & 0xff00 | (x << 8) & 0xff0000 | (x << 24);} |
445 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint16 x = *a; return (x >> 8) | (x << 8);} |
446 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {*a = (v >> 24) | (v >> 8) & 0xff00 | (v << 8) & 0xff0000 | (v << 24);} |
447 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {*a = (v >> 8) | (v << 8);} |
448 |
|
449 |
#else /* CPU_CAN_ACCESS_UNALIGNED */ |
450 |
|
451 |
/* Other little-endian CPUs which can not do unaligned accesses (this needs optimization) */ |
452 |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint8 *b = (uint8 *)a; return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];} |
453 |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint8 *b = (uint8 *)a; return (b[0] << 8) | b[1];} |
454 |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 24; b[1] = v >> 16; b[2] = v >> 8; b[3] = v;} |
455 |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 8; b[1] = v;} |
456 |
|
457 |
#endif /* CPU_CAN_ACCESS_UNALIGNED */ |
458 |
|
459 |
#endif /* WORDS_BIGENDIAN */ |
460 |
|
461 |
#ifndef HAVE_OPTIMIZED_BYTESWAP_32 |
462 |
static inline uae_u32 do_byteswap_32(uae_u32 v) |
463 |
{ return (((v >> 24) & 0xff) | ((v >> 8) & 0xff00) | ((v & 0xff) << 24) | ((v & 0xff00) << 8)); } |
464 |
#endif |
465 |
|
466 |
#ifndef HAVE_OPTIMIZED_BYTESWAP_16 |
467 |
static inline uae_u32 do_byteswap_16(uae_u32 v) |
468 |
{ return (((v >> 8) & 0xff) | ((v & 0xff) << 8)); } |
469 |
#endif |
470 |
|
471 |
#define do_get_mem_byte(a) ((uae_u32)*((uae_u8 *)(a))) |
472 |
#define do_put_mem_byte(a, v) (*(uae_u8 *)(a) = (v)) |
473 |
|
474 |
#define call_mem_get_func(func, addr) ((*func)(addr)) |
475 |
#define call_mem_put_func(func, addr, v) ((*func)(addr, v)) |
476 |
#define __inline__ inline |
477 |
#define CPU_EMU_SIZE 0 |
478 |
#undef NO_INLINE_MEMORY_ACCESS |
479 |
#undef MD_HAVE_MEM_1_FUNCS |
480 |
#define ENUMDECL typedef enum |
481 |
#define ENUMNAME(name) name |
482 |
#define write_log printf |
483 |
|
484 |
#if defined(X86_ASSEMBLY) || defined(X86_64_ASSEMBLY) |
485 |
#define ASM_SYM_FOR_FUNC(a) __asm__(a) |
486 |
#else |
487 |
#define ASM_SYM_FOR_FUNC(a) |
488 |
#endif |
489 |
|
490 |
#ifndef REGPARAM |
491 |
# define REGPARAM |
492 |
#endif |
493 |
#define REGPARAM2 |
494 |
|
495 |
#endif |