1 |
|
/* |
2 |
|
* sysdeps.h - System dependent definitions for Windows |
3 |
|
* |
4 |
< |
* Basilisk II (C) 1997-2005 Christian Bauer |
4 |
> |
* Basilisk II (C) 1997-2008 Christian Bauer |
5 |
|
* |
6 |
|
* This program is free software; you can redistribute it and/or modify |
7 |
|
* it under the terms of the GNU General Public License as published by |
152 |
|
#define uae_u64 uint64 |
153 |
|
typedef uae_u32 uaecptr; |
154 |
|
|
155 |
– |
/* Alignment restrictions */ |
156 |
– |
#if defined(__i386__) || defined(__powerpc__) || defined(__m68k__) || defined(__x86_64__) |
157 |
– |
# define CPU_CAN_ACCESS_UNALIGNED |
158 |
– |
#endif |
159 |
– |
|
155 |
|
/* Timing functions */ |
156 |
|
extern void timer_init(void); |
157 |
|
extern uint64 GetTicks_usec(void); |
159 |
|
|
160 |
|
/* Spinlocks */ |
161 |
|
#ifdef __GNUC__ |
167 |
– |
|
168 |
– |
#if defined(__powerpc__) || defined(__ppc__) |
169 |
– |
#define HAVE_TEST_AND_SET 1 |
170 |
– |
static inline int testandset(volatile int *p) |
171 |
– |
{ |
172 |
– |
int ret; |
173 |
– |
__asm__ __volatile__("0: lwarx %0,0,%1\n" |
174 |
– |
" xor. %0,%3,%0\n" |
175 |
– |
" bne 1f\n" |
176 |
– |
" stwcx. %2,0,%1\n" |
177 |
– |
" bne- 0b\n" |
178 |
– |
"1: " |
179 |
– |
: "=&r" (ret) |
180 |
– |
: "r" (p), "r" (1), "r" (0) |
181 |
– |
: "cr0", "memory"); |
182 |
– |
return ret; |
183 |
– |
} |
184 |
– |
#endif |
185 |
– |
|
186 |
– |
#if defined(__i386__) || defined(__x86_64__) |
162 |
|
#define HAVE_TEST_AND_SET 1 |
163 |
|
static inline int testandset(volatile int *p) |
164 |
|
{ |
170 |
|
: "memory"); |
171 |
|
return ret; |
172 |
|
} |
198 |
– |
#endif |
199 |
– |
|
200 |
– |
#ifdef __alpha__ |
201 |
– |
#define HAVE_TEST_AND_SET 1 |
202 |
– |
static inline int testandset(volatile int *p) |
203 |
– |
{ |
204 |
– |
int ret; |
205 |
– |
unsigned long one; |
206 |
– |
|
207 |
– |
__asm__ __volatile__("0: mov 1,%2\n" |
208 |
– |
" ldl_l %0,%1\n" |
209 |
– |
" stl_c %2,%1\n" |
210 |
– |
" beq %2,1f\n" |
211 |
– |
".subsection 2\n" |
212 |
– |
"1: br 0b\n" |
213 |
– |
".previous" |
214 |
– |
: "=r" (ret), "=m" (*p), "=r" (one) |
215 |
– |
: "m" (*p)); |
216 |
– |
return ret; |
217 |
– |
} |
218 |
– |
#endif |
219 |
– |
|
173 |
|
#endif /* __GNUC__ */ |
174 |
|
|
175 |
|
typedef volatile int spinlock_t; |
207 |
|
} |
208 |
|
#endif |
209 |
|
|
257 |
– |
/* UAE CPU defines */ |
258 |
– |
#ifdef WORDS_BIGENDIAN |
259 |
– |
|
260 |
– |
#ifdef CPU_CAN_ACCESS_UNALIGNED |
261 |
– |
|
262 |
– |
/* Big-endian CPUs which can do unaligned accesses */ |
263 |
– |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {return *a;} |
264 |
– |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {return *a;} |
265 |
– |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {*a = v;} |
266 |
– |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {*a = v;} |
267 |
– |
|
268 |
– |
#else /* CPU_CAN_ACCESS_UNALIGNED */ |
269 |
– |
|
270 |
– |
/* Big-endian CPUs which can not do unaligned accesses (this is not the most efficient way to do this...) */ |
271 |
– |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint8 *b = (uint8 *)a; return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];} |
272 |
– |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint8 *b = (uint8 *)a; return (b[0] << 8) | b[1];} |
273 |
– |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 24; b[1] = v >> 16; b[2] = v >> 8; b[3] = v;} |
274 |
– |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 8; b[1] = v;} |
275 |
– |
|
276 |
– |
#endif /* CPU_CAN_ACCESS_UNALIGNED */ |
277 |
– |
|
278 |
– |
#else /* WORDS_BIGENDIAN */ |
279 |
– |
|
280 |
– |
#if defined(__i386__) || defined(__x86_64__) |
281 |
– |
|
210 |
|
/* Intel x86 */ |
211 |
|
#define X86_PPRO_OPT |
212 |
|
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint32 retval; __asm__ ("bswap %0" : "=r" (retval) : "0" (*a) : "cc"); return retval;} |
233 |
|
static inline uae_u32 do_byteswap_16_g(uae_u32 v) {__asm__ ("rolw $8,%0" : "=r" (v) : "0" (v) : "cc"); return v;} |
234 |
|
#endif |
235 |
|
|
308 |
– |
#elif defined(CPU_CAN_ACCESS_UNALIGNED) |
309 |
– |
|
310 |
– |
/* Other little-endian CPUs which can do unaligned accesses */ |
311 |
– |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint32 x = *a; return (x >> 24) | (x >> 8) & 0xff00 | (x << 8) & 0xff0000 | (x << 24);} |
312 |
– |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint16 x = *a; return (x >> 8) | (x << 8);} |
313 |
– |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {*a = (v >> 24) | (v >> 8) & 0xff00 | (v << 8) & 0xff0000 | (v << 24);} |
314 |
– |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {*a = (v >> 8) | (v << 8);} |
315 |
– |
|
316 |
– |
#else /* CPU_CAN_ACCESS_UNALIGNED */ |
317 |
– |
|
318 |
– |
/* Other little-endian CPUs which can not do unaligned accesses (this needs optimization) */ |
319 |
– |
static inline uae_u32 do_get_mem_long(uae_u32 *a) {uint8 *b = (uint8 *)a; return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];} |
320 |
– |
static inline uae_u32 do_get_mem_word(uae_u16 *a) {uint8 *b = (uint8 *)a; return (b[0] << 8) | b[1];} |
321 |
– |
static inline void do_put_mem_long(uae_u32 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 24; b[1] = v >> 16; b[2] = v >> 8; b[3] = v;} |
322 |
– |
static inline void do_put_mem_word(uae_u16 *a, uae_u32 v) {uint8 *b = (uint8 *)a; b[0] = v >> 8; b[1] = v;} |
323 |
– |
|
324 |
– |
#endif /* CPU_CAN_ACCESS_UNALIGNED */ |
325 |
– |
|
326 |
– |
#endif /* WORDS_BIGENDIAN */ |
327 |
– |
|
236 |
|
#ifndef HAVE_OPTIMIZED_BYTESWAP_32 |
237 |
|
static inline uae_u32 do_byteswap_32_g(uae_u32 v) |
238 |
|
{ return (((v >> 24) & 0xff) | ((v >> 8) & 0xff00) | ((v & 0xff) << 24) | ((v & 0xff00) << 8)); } |