2 |
|
* sheepthreads.c - Minimal pthreads implementation (libpthreads doesn't |
3 |
|
* like nonstandard stacks) |
4 |
|
* |
5 |
< |
* SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig |
5 |
> |
* SheepShaver (C) 1997-2005 Christian Bauer and Marc Hellwig |
6 |
|
* |
7 |
|
* This program is free software; you can redistribute it and/or modify |
8 |
|
* it under the terms of the GNU General Public License as published by |
33 |
|
#include <errno.h> |
34 |
|
#include <unistd.h> |
35 |
|
#include <signal.h> |
36 |
– |
#include <sched.h> |
36 |
|
#include <pthread.h> |
38 |
– |
#include <semaphore.h> |
37 |
|
|
38 |
|
|
39 |
|
/* Thread stack size */ |
49 |
|
extern int __clone(int (*fn)(void *), void *, int, void *); |
50 |
|
|
51 |
|
/* struct sem_t */ |
52 |
+ |
typedef struct { |
53 |
+ |
struct _pthread_fastlock __sem_lock; |
54 |
+ |
int __sem_value; |
55 |
+ |
_pthread_descr __sem_waiting; |
56 |
+ |
} sem_t; |
57 |
+ |
|
58 |
+ |
#define SEM_VALUE_MAX 64 |
59 |
|
#define status __status |
60 |
|
#define spinlock __spinlock |
61 |
|
#define sem_lock __sem_lock |
180 |
|
* Spinlocks |
181 |
|
*/ |
182 |
|
|
183 |
< |
static int try_acquire_spinlock(int *lock) |
183 |
> |
/* For multiprocessor systems, we want to ensure all memory accesses |
184 |
> |
are completed before we reset a lock. On other systems, we still |
185 |
> |
need to make sure that the compiler has flushed everything to memory. */ |
186 |
> |
#define MEMORY_BARRIER() __asm__ __volatile__ ("sync" : : : "memory") |
187 |
> |
|
188 |
> |
static void fastlock_init(struct _pthread_fastlock *lock) |
189 |
|
{ |
190 |
< |
return test_and_set(lock, 1) == 0; |
190 |
> |
lock->status = 0; |
191 |
> |
lock->spinlock = 0; |
192 |
|
} |
193 |
|
|
194 |
< |
static void acquire_spinlock(volatile int *lock) |
194 |
> |
static int fastlock_try_acquire(struct _pthread_fastlock *lock) |
195 |
|
{ |
196 |
< |
do { |
197 |
< |
while (*lock) ; |
198 |
< |
} while (test_and_set((int *)lock, 1) != 0); |
196 |
> |
int res = EBUSY; |
197 |
> |
if (test_and_set(&lock->spinlock, 1) == 0) { |
198 |
> |
if (lock->status == 0) { |
199 |
> |
lock->status = 1; |
200 |
> |
MEMORY_BARRIER(); |
201 |
> |
res = 0; |
202 |
> |
} |
203 |
> |
lock->spinlock = 0; |
204 |
> |
} |
205 |
> |
return res; |
206 |
> |
} |
207 |
> |
|
208 |
> |
static void fastlock_acquire(struct _pthread_fastlock *lock) |
209 |
> |
{ |
210 |
> |
MEMORY_BARRIER(); |
211 |
> |
while (test_and_set(&lock->spinlock, 1)) |
212 |
> |
usleep(0); |
213 |
|
} |
214 |
|
|
215 |
< |
static void release_spinlock(int *lock) |
215 |
> |
static void fastlock_release(struct _pthread_fastlock *lock) |
216 |
|
{ |
217 |
< |
*lock = 0; |
217 |
> |
MEMORY_BARRIER(); |
218 |
> |
lock->spinlock = 0; |
219 |
> |
__asm__ __volatile__ ("" : "=m" (lock->spinlock) : "m" (lock->spinlock)); |
220 |
|
} |
221 |
|
|
222 |
|
|
226 |
|
|
227 |
|
int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) |
228 |
|
{ |
229 |
< |
// pthread_init_lock |
203 |
< |
mutex->__m_lock.__status = 0; |
204 |
< |
mutex->__m_lock.__spinlock = 0; |
205 |
< |
|
229 |
> |
fastlock_init(&mutex->__m_lock); |
230 |
|
mutex->__m_kind = mutex_attr ? mutex_attr->__mutexkind : PTHREAD_MUTEX_TIMED_NP; |
231 |
|
mutex->__m_count = 0; |
232 |
|
mutex->__m_owner = NULL; |
257 |
|
{ |
258 |
|
switch (mutex->__m_kind) { |
259 |
|
case PTHREAD_MUTEX_TIMED_NP: |
260 |
< |
acquire_spinlock(&mutex->__m_lock.__spinlock); |
260 |
> |
fastlock_acquire(&mutex->__m_lock); |
261 |
|
return 0; |
262 |
|
default: |
263 |
|
return EINVAL; |
273 |
|
{ |
274 |
|
switch (mutex->__m_kind) { |
275 |
|
case PTHREAD_MUTEX_TIMED_NP: |
276 |
< |
if (!try_acquire_spinlock(&mutex->__m_lock.__spinlock)) |
253 |
< |
return EBUSY; |
254 |
< |
return 0; |
276 |
> |
return fastlock_try_acquire(&mutex->__m_lock); |
277 |
|
default: |
278 |
|
return EINVAL; |
279 |
|
} |
288 |
|
{ |
289 |
|
switch (mutex->__m_kind) { |
290 |
|
case PTHREAD_MUTEX_TIMED_NP: |
291 |
< |
release_spinlock(&mutex->__m_lock.__spinlock); |
291 |
> |
fastlock_release(&mutex->__m_lock); |
292 |
|
return 0; |
293 |
|
default: |
294 |
|
return EINVAL; |
323 |
|
|
324 |
|
int sem_init(sem_t *sem, int pshared, unsigned int value) |
325 |
|
{ |
326 |
< |
sem->sem_lock.status = 0; |
327 |
< |
sem->sem_lock.spinlock = 0; |
326 |
> |
if (sem == NULL || value > SEM_VALUE_MAX) { |
327 |
> |
errno = EINVAL; |
328 |
> |
return -1; |
329 |
> |
} |
330 |
> |
if (pshared) { |
331 |
> |
errno = ENOSYS; |
332 |
> |
return -1; |
333 |
> |
} |
334 |
> |
fastlock_init(&sem->sem_lock); |
335 |
|
sem->sem_value = value; |
336 |
|
sem->sem_waiting = NULL; |
337 |
|
return 0; |
344 |
|
|
345 |
|
int sem_destroy(sem_t *sem) |
346 |
|
{ |
347 |
+ |
if (sem == NULL) { |
348 |
+ |
errno = EINVAL; |
349 |
+ |
return -1; |
350 |
+ |
} |
351 |
+ |
if (sem->sem_waiting) { |
352 |
+ |
errno = EBUSY; |
353 |
+ |
return -1; |
354 |
+ |
} |
355 |
+ |
sem->sem_value = 0; |
356 |
+ |
sem->sem_waiting = NULL; |
357 |
|
return 0; |
358 |
|
} |
359 |
|
|
362 |
|
* Wait on semaphore |
363 |
|
*/ |
364 |
|
|
326 |
– |
void null_handler(int sig) |
327 |
– |
{ |
328 |
– |
} |
329 |
– |
|
365 |
|
int sem_wait(sem_t *sem) |
366 |
|
{ |
367 |
< |
acquire_spinlock(&sem->sem_lock.spinlock); |
368 |
< |
if (sem->sem_value > 0) |
369 |
< |
atomic_add((int *)&sem->sem_value, -1); |
370 |
< |
else { |
371 |
< |
sigset_t mask; |
372 |
< |
if (!sem->sem_lock.status) { |
373 |
< |
struct sigaction sa; |
374 |
< |
sem->sem_lock.status = SIGUSR2; |
375 |
< |
sa.sa_handler = null_handler; |
376 |
< |
sa.sa_flags = SA_RESTART; |
377 |
< |
sigemptyset(&sa.sa_mask); |
378 |
< |
sigaction(sem->sem_lock.status, &sa, NULL); |
379 |
< |
} |
380 |
< |
sem->sem_waiting = (struct _pthread_descr_struct *)getpid(); |
381 |
< |
sigemptyset(&mask); |
347 |
< |
sigsuspend(&mask); |
348 |
< |
sem->sem_waiting = NULL; |
367 |
> |
if (sem == NULL) { |
368 |
> |
errno = EINVAL; |
369 |
> |
return -1; |
370 |
> |
} |
371 |
> |
fastlock_acquire(&sem->sem_lock); |
372 |
> |
if (sem->sem_value > 0) { |
373 |
> |
sem->sem_value--; |
374 |
> |
fastlock_release(&sem->sem_lock); |
375 |
> |
return 0; |
376 |
> |
} |
377 |
> |
sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting + 1); |
378 |
> |
while (sem->sem_value == 0) { |
379 |
> |
fastlock_release(&sem->sem_lock); |
380 |
> |
usleep(0); |
381 |
> |
fastlock_acquire(&sem->sem_lock); |
382 |
|
} |
383 |
< |
release_spinlock(&sem->sem_lock.spinlock); |
383 |
> |
sem->sem_value--; |
384 |
> |
fastlock_release(&sem->sem_lock); |
385 |
|
return 0; |
386 |
|
} |
387 |
|
|
392 |
|
|
393 |
|
int sem_post(sem_t *sem) |
394 |
|
{ |
395 |
< |
acquire_spinlock(&sem->sem_lock.spinlock); |
396 |
< |
if (sem->sem_waiting == NULL) |
397 |
< |
atomic_add((int *)&sem->sem_value, 1); |
398 |
< |
else |
399 |
< |
kill((pid_t)sem->sem_waiting, sem->sem_lock.status); |
400 |
< |
release_spinlock(&sem->sem_lock.spinlock); |
395 |
> |
if (sem == NULL) { |
396 |
> |
errno = EINVAL; |
397 |
> |
return -1; |
398 |
> |
} |
399 |
> |
fastlock_acquire(&sem->sem_lock); |
400 |
> |
if (sem->sem_waiting) |
401 |
> |
sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting - 1); |
402 |
> |
else { |
403 |
> |
if (sem->sem_value >= SEM_VALUE_MAX) { |
404 |
> |
errno = ERANGE; |
405 |
> |
fastlock_release(&sem->sem_lock); |
406 |
> |
return -1; |
407 |
> |
} |
408 |
> |
} |
409 |
> |
sem->sem_value++; |
410 |
> |
fastlock_release(&sem->sem_lock); |
411 |
|
return 0; |
412 |
|
} |
413 |
|
|