2 |
|
* sheepthreads.c - Minimal pthreads implementation (libpthreads doesn't |
3 |
|
* like nonstandard stacks) |
4 |
|
* |
5 |
< |
* SheepShaver (C) 1997-2004 Christian Bauer and Marc Hellwig |
5 |
> |
* SheepShaver (C) 1997-2005 Christian Bauer and Marc Hellwig |
6 |
|
* |
7 |
|
* This program is free software; you can redistribute it and/or modify |
8 |
|
* it under the terms of the GNU General Public License as published by |
33 |
|
#include <errno.h> |
34 |
|
#include <unistd.h> |
35 |
|
#include <signal.h> |
36 |
– |
#include <sched.h> |
36 |
|
#include <pthread.h> |
37 |
|
#include <semaphore.h> |
38 |
|
|
174 |
|
* Spinlocks |
175 |
|
*/ |
176 |
|
|
177 |
< |
static int try_acquire_spinlock(int *lock) |
177 |
> |
/* For multiprocessor systems, we want to ensure all memory accesses |
178 |
> |
are completed before we reset a lock. On other systems, we still |
179 |
> |
need to make sure that the compiler has flushed everything to memory. */ |
180 |
> |
#define MEMORY_BARRIER() __asm__ __volatile__ ("sync" : : : "memory") |
181 |
> |
|
182 |
> |
static void fastlock_init(struct _pthread_fastlock *lock) |
183 |
|
{ |
184 |
< |
return test_and_set(lock, 1) == 0; |
184 |
> |
lock->status = 0; |
185 |
> |
lock->spinlock = 0; |
186 |
|
} |
187 |
|
|
188 |
< |
static void acquire_spinlock(volatile int *lock) |
188 |
> |
static int fastlock_try_acquire(struct _pthread_fastlock *lock) |
189 |
|
{ |
190 |
< |
do { |
191 |
< |
while (*lock) ; |
192 |
< |
} while (test_and_set((int *)lock, 1) != 0); |
190 |
> |
int res = EBUSY; |
191 |
> |
if (test_and_set(&lock->spinlock, 1) == 0) { |
192 |
> |
if (lock->status == 0) { |
193 |
> |
lock->status = 1; |
194 |
> |
MEMORY_BARRIER(); |
195 |
> |
res = 0; |
196 |
> |
} |
197 |
> |
lock->spinlock = 0; |
198 |
> |
} |
199 |
> |
return res; |
200 |
> |
} |
201 |
> |
|
202 |
> |
static void fastlock_acquire(struct _pthread_fastlock *lock) |
203 |
> |
{ |
204 |
> |
MEMORY_BARRIER(); |
205 |
> |
while (test_and_set(&lock->spinlock, 1)) |
206 |
> |
usleep(0); |
207 |
|
} |
208 |
|
|
209 |
< |
static void release_spinlock(int *lock) |
209 |
> |
static void fastlock_release(struct _pthread_fastlock *lock) |
210 |
|
{ |
211 |
< |
*lock = 0; |
211 |
> |
MEMORY_BARRIER(); |
212 |
> |
lock->spinlock = 0; |
213 |
> |
__asm__ __volatile__ ("" : "=m" (lock->spinlock) : "m" (lock->spinlock)); |
214 |
|
} |
215 |
|
|
216 |
|
|
220 |
|
|
221 |
|
int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) |
222 |
|
{ |
223 |
< |
// pthread_init_lock |
203 |
< |
mutex->__m_lock.__status = 0; |
204 |
< |
mutex->__m_lock.__spinlock = 0; |
205 |
< |
|
223 |
> |
fastlock_init(&mutex->__m_lock); |
224 |
|
mutex->__m_kind = mutex_attr ? mutex_attr->__mutexkind : PTHREAD_MUTEX_TIMED_NP; |
225 |
|
mutex->__m_count = 0; |
226 |
|
mutex->__m_owner = NULL; |
251 |
|
{ |
252 |
|
switch (mutex->__m_kind) { |
253 |
|
case PTHREAD_MUTEX_TIMED_NP: |
254 |
< |
acquire_spinlock(&mutex->__m_lock.__spinlock); |
254 |
> |
fastlock_acquire(&mutex->__m_lock); |
255 |
|
return 0; |
256 |
|
default: |
257 |
|
return EINVAL; |
267 |
|
{ |
268 |
|
switch (mutex->__m_kind) { |
269 |
|
case PTHREAD_MUTEX_TIMED_NP: |
270 |
< |
if (!try_acquire_spinlock(&mutex->__m_lock.__spinlock)) |
253 |
< |
return EBUSY; |
254 |
< |
return 0; |
270 |
> |
return fastlock_try_acquire(&mutex->__m_lock); |
271 |
|
default: |
272 |
|
return EINVAL; |
273 |
|
} |
282 |
|
{ |
283 |
|
switch (mutex->__m_kind) { |
284 |
|
case PTHREAD_MUTEX_TIMED_NP: |
285 |
< |
release_spinlock(&mutex->__m_lock.__spinlock); |
285 |
> |
fastlock_release(&mutex->__m_lock); |
286 |
|
return 0; |
287 |
|
default: |
288 |
|
return EINVAL; |
317 |
|
|
318 |
|
int sem_init(sem_t *sem, int pshared, unsigned int value) |
319 |
|
{ |
320 |
< |
sem->sem_lock.status = 0; |
321 |
< |
sem->sem_lock.spinlock = 0; |
320 |
> |
if (sem == NULL || value > SEM_VALUE_MAX) { |
321 |
> |
errno = EINVAL; |
322 |
> |
return -1; |
323 |
> |
} |
324 |
> |
if (pshared) { |
325 |
> |
errno = ENOSYS; |
326 |
> |
return -1; |
327 |
> |
} |
328 |
> |
fastlock_init(&sem->sem_lock); |
329 |
|
sem->sem_value = value; |
330 |
|
sem->sem_waiting = NULL; |
331 |
|
return 0; |
338 |
|
|
339 |
|
int sem_destroy(sem_t *sem) |
340 |
|
{ |
341 |
+ |
if (sem == NULL) { |
342 |
+ |
errno = EINVAL; |
343 |
+ |
return -1; |
344 |
+ |
} |
345 |
+ |
if (sem->sem_waiting) { |
346 |
+ |
errno = EBUSY; |
347 |
+ |
return -1; |
348 |
+ |
} |
349 |
+ |
sem->sem_value = 0; |
350 |
+ |
sem->sem_waiting = NULL; |
351 |
|
return 0; |
352 |
|
} |
353 |
|
|
356 |
|
* Wait on semaphore |
357 |
|
*/ |
358 |
|
|
326 |
– |
void null_handler(int sig) |
327 |
– |
{ |
328 |
– |
} |
329 |
– |
|
359 |
|
int sem_wait(sem_t *sem) |
360 |
|
{ |
361 |
< |
acquire_spinlock(&sem->sem_lock.spinlock); |
362 |
< |
if (sem->sem_value > 0) |
363 |
< |
atomic_add((int *)&sem->sem_value, -1); |
364 |
< |
else { |
365 |
< |
sigset_t mask; |
366 |
< |
if (!sem->sem_lock.status) { |
367 |
< |
struct sigaction sa; |
368 |
< |
sem->sem_lock.status = SIGUSR2; |
369 |
< |
sa.sa_handler = null_handler; |
370 |
< |
sa.sa_flags = SA_RESTART; |
371 |
< |
sigemptyset(&sa.sa_mask); |
372 |
< |
sigaction(sem->sem_lock.status, &sa, NULL); |
373 |
< |
} |
374 |
< |
sem->sem_waiting = (struct _pthread_descr_struct *)getpid(); |
375 |
< |
sigemptyset(&mask); |
376 |
< |
sigsuspend(&mask); |
377 |
< |
sem->sem_waiting = NULL; |
361 |
> |
int cnt = 0; |
362 |
> |
struct timespec tm; |
363 |
> |
|
364 |
> |
if (sem == NULL) { |
365 |
> |
errno = EINVAL; |
366 |
> |
return -1; |
367 |
> |
} |
368 |
> |
fastlock_acquire(&sem->sem_lock); |
369 |
> |
if (sem->sem_value > 0) { |
370 |
> |
sem->sem_value--; |
371 |
> |
fastlock_release(&sem->sem_lock); |
372 |
> |
return 0; |
373 |
> |
} |
374 |
> |
sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting + 1); |
375 |
> |
while (sem->sem_value == 0) { |
376 |
> |
fastlock_release(&sem->sem_lock); |
377 |
> |
usleep(0); |
378 |
> |
fastlock_acquire(&sem->sem_lock); |
379 |
|
} |
380 |
< |
release_spinlock(&sem->sem_lock.spinlock); |
380 |
> |
sem->sem_value--; |
381 |
> |
fastlock_release(&sem->sem_lock); |
382 |
|
return 0; |
383 |
|
} |
384 |
|
|
389 |
|
|
390 |
|
int sem_post(sem_t *sem) |
391 |
|
{ |
392 |
< |
acquire_spinlock(&sem->sem_lock.spinlock); |
393 |
< |
if (sem->sem_waiting == NULL) |
394 |
< |
atomic_add((int *)&sem->sem_value, 1); |
395 |
< |
else |
396 |
< |
kill((pid_t)sem->sem_waiting, sem->sem_lock.status); |
397 |
< |
release_spinlock(&sem->sem_lock.spinlock); |
392 |
> |
if (sem == NULL) { |
393 |
> |
errno = EINVAL; |
394 |
> |
return -1; |
395 |
> |
} |
396 |
> |
fastlock_acquire(&sem->sem_lock); |
397 |
> |
if (sem->sem_waiting) |
398 |
> |
sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting - 1); |
399 |
> |
else { |
400 |
> |
if (sem->sem_value >= SEM_VALUE_MAX) { |
401 |
> |
errno = ERANGE; |
402 |
> |
fastlock_release(&sem->sem_lock); |
403 |
> |
return -1; |
404 |
> |
} |
405 |
> |
} |
406 |
> |
sem->sem_value++; |
407 |
> |
fastlock_release(&sem->sem_lock); |
408 |
|
return 0; |
409 |
|
} |
410 |
|
|