1 |
|
/* |
2 |
|
* sysdeps.h - System dependent definitions for Linux |
3 |
|
* |
4 |
< |
* SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig |
4 |
> |
* SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig |
5 |
|
* |
6 |
|
* This program is free software; you can redistribute it and/or modify |
7 |
|
* it under the terms of the GNU General Public License as published by |
139 |
|
**/ |
140 |
|
|
141 |
|
#if defined(__GNUC__) |
142 |
< |
#if defined(__x86_64__) |
142 |
> |
#if defined(__x86_64__) || defined(__i386__) |
143 |
|
// Linux/AMD64 currently has no asm optimized bswap_32() in <byteswap.h> |
144 |
|
#define opt_bswap_32 do_opt_bswap_32 |
145 |
|
static inline uint32 do_opt_bswap_32(uint32 x) |
184 |
|
((x & 0x000000ff) << 24) ); |
185 |
|
} |
186 |
|
|
187 |
+ |
#if defined(__i386__) |
188 |
+ |
#define opt_bswap_64 do_opt_bswap_64 |
189 |
+ |
static inline uint64 do_opt_bswap_64(uint64 x) |
190 |
+ |
{ |
191 |
+ |
return (bswap_32(x >> 32) | (((uint64)bswap_32((uint32)x)) << 32)); |
192 |
+ |
} |
193 |
+ |
#endif |
194 |
+ |
|
195 |
|
#ifdef opt_bswap_64 |
196 |
|
#undef bswap_64 |
197 |
|
#define bswap_64 opt_bswap_64 |
225 |
|
// spin locks |
226 |
|
#ifdef __GNUC__ |
227 |
|
|
228 |
< |
#ifdef __powerpc__ |
228 |
> |
#if defined(__powerpc__) || defined(__ppc__) |
229 |
|
#define HAVE_TEST_AND_SET 1 |
230 |
< |
static inline int testandset(int *p) |
230 |
> |
static inline int testandset(volatile int *p) |
231 |
|
{ |
232 |
|
int ret; |
233 |
< |
__asm__ __volatile__("0: lwarx %0,0,%1 ;" |
234 |
< |
" xor. %0,%3,%0;" |
235 |
< |
" bne 1f;" |
236 |
< |
" stwcx. %2,0,%1;" |
237 |
< |
" bne- 0b;" |
233 |
> |
__asm__ __volatile__("0: lwarx %0,0,%1\n" |
234 |
> |
" xor. %0,%3,%0\n" |
235 |
> |
" bne 1f\n" |
236 |
> |
" stwcx. %2,0,%1\n" |
237 |
> |
" bne- 0b\n" |
238 |
|
"1: " |
239 |
|
: "=&r" (ret) |
240 |
|
: "r" (p), "r" (1), "r" (0) |
245 |
|
|
246 |
|
#ifdef __i386__ |
247 |
|
#define HAVE_TEST_AND_SET 1 |
248 |
< |
static inline int testandset(int *p) |
248 |
> |
static inline int testandset(volatile int *p) |
249 |
|
{ |
250 |
< |
char ret; |
250 |
> |
int ret; |
251 |
|
long int readval; |
252 |
< |
|
253 |
< |
__asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0" |
254 |
< |
: "=q" (ret), "=m" (*p), "=a" (readval) |
255 |
< |
: "r" (1), "m" (*p), "a" (0) |
252 |
> |
/* Note: the "xchg" instruction does not need a "lock" prefix */ |
253 |
> |
__asm__ __volatile__("xchgl %0, %1" |
254 |
> |
: "=r" (ret), "=m" (*p), "=a" (readval) |
255 |
> |
: "0" (1), "m" (*p) |
256 |
|
: "memory"); |
257 |
|
return ret; |
258 |
|
} |
260 |
|
|
261 |
|
#ifdef __s390__ |
262 |
|
#define HAVE_TEST_AND_SET 1 |
263 |
< |
static inline int testandset(int *p) |
263 |
> |
static inline int testandset(volatile int *p) |
264 |
|
{ |
265 |
|
int ret; |
266 |
|
|
275 |
|
|
276 |
|
#ifdef __alpha__ |
277 |
|
#define HAVE_TEST_AND_SET 1 |
278 |
< |
static inline int testandset(int *p) |
278 |
> |
static inline int testandset(volatile int *p) |
279 |
|
{ |
280 |
|
int ret; |
281 |
|
unsigned long one; |
295 |
|
|
296 |
|
#ifdef __sparc__ |
297 |
|
#define HAVE_TEST_AND_SET 1 |
298 |
< |
static inline int testandset(int *p) |
298 |
> |
static inline int testandset(volatile int *p) |
299 |
|
{ |
300 |
|
int ret; |
301 |
|
|
310 |
|
|
311 |
|
#ifdef __arm__ |
312 |
|
#define HAVE_TEST_AND_SET 1 |
313 |
< |
static inline int testandset(int *p) |
313 |
> |
static inline int testandset(volatile int *p) |
314 |
|
{ |
315 |
|
register unsigned int ret; |
316 |
|
__asm__ __volatile__("swp %0, %1, [%2]" |
325 |
|
|
326 |
|
#if HAVE_TEST_AND_SET |
327 |
|
#define HAVE_SPINLOCKS 1 |
328 |
< |
typedef int spinlock_t; |
328 |
> |
typedef volatile int spinlock_t; |
329 |
|
|
330 |
|
static const spinlock_t SPIN_LOCK_UNLOCKED = 0; |
331 |
|
|