2 |
|
* sheepthreads.c - Minimal pthreads implementation (libpthreads doesn't |
3 |
|
* like nonstandard stacks) |
4 |
|
* |
5 |
< |
* SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig |
5 |
> |
* SheepShaver (C) 1997-2005 Christian Bauer and Marc Hellwig |
6 |
|
* |
7 |
|
* This program is free software; you can redistribute it and/or modify |
8 |
|
* it under the terms of the GNU General Public License as published by |
33 |
|
#include <errno.h> |
34 |
|
#include <unistd.h> |
35 |
|
#include <signal.h> |
36 |
– |
#include <sched.h> |
36 |
|
#include <pthread.h> |
37 |
|
#include <semaphore.h> |
38 |
|
|
56 |
|
#define sem_value __sem_value |
57 |
|
#define sem_waiting __sem_waiting |
58 |
|
|
59 |
+ |
/* Wait for "clone" children only (Linux 2.4+ specific) */ |
60 |
+ |
#ifndef __WCLONE |
61 |
+ |
#define __WCLONE 0 |
62 |
+ |
#endif |
63 |
+ |
|
64 |
|
|
65 |
|
/* |
66 |
|
* Return pthread ID of self |
141 |
|
int pthread_join(pthread_t thread, void **ret) |
142 |
|
{ |
143 |
|
do { |
144 |
< |
if (waitpid(thread, NULL, 0) >= 0) |
144 |
> |
if (waitpid(thread, NULL, __WCLONE) >= 0); |
145 |
|
break; |
146 |
|
} while (errno == EINTR); |
147 |
|
if (ret) |
174 |
|
* Spinlocks |
175 |
|
*/ |
176 |
|
|
177 |
< |
static void acquire_spinlock(volatile int *lock) |
177 |
> |
/* For multiprocessor systems, we want to ensure all memory accesses |
178 |
> |
are completed before we reset a lock. On other systems, we still |
179 |
> |
need to make sure that the compiler has flushed everything to memory. */ |
180 |
> |
#define MEMORY_BARRIER() __asm__ __volatile__ ("sync" : : : "memory") |
181 |
> |
|
182 |
> |
static void fastlock_init(struct _pthread_fastlock *lock) |
183 |
|
{ |
184 |
< |
do { |
185 |
< |
while (*lock) ; |
186 |
< |
} while (test_and_set((int *)lock, 1) != 0); |
184 |
> |
lock->status = 0; |
185 |
> |
lock->spinlock = 0; |
186 |
> |
} |
187 |
> |
|
188 |
> |
static int fastlock_try_acquire(struct _pthread_fastlock *lock) |
189 |
> |
{ |
190 |
> |
int res = EBUSY; |
191 |
> |
if (test_and_set(&lock->spinlock, 1) == 0) { |
192 |
> |
if (lock->status == 0) { |
193 |
> |
lock->status = 1; |
194 |
> |
MEMORY_BARRIER(); |
195 |
> |
res = 0; |
196 |
> |
} |
197 |
> |
lock->spinlock = 0; |
198 |
> |
} |
199 |
> |
return res; |
200 |
> |
} |
201 |
> |
|
202 |
> |
static void fastlock_acquire(struct _pthread_fastlock *lock) |
203 |
> |
{ |
204 |
> |
MEMORY_BARRIER(); |
205 |
> |
while (test_and_set(&lock->spinlock, 1)) |
206 |
> |
usleep(0); |
207 |
> |
} |
208 |
> |
|
209 |
> |
static void fastlock_release(struct _pthread_fastlock *lock) |
210 |
> |
{ |
211 |
> |
MEMORY_BARRIER(); |
212 |
> |
lock->spinlock = 0; |
213 |
> |
__asm__ __volatile__ ("" : "=m" (lock->spinlock) : "m" (lock->spinlock)); |
214 |
> |
} |
215 |
> |
|
216 |
> |
|
217 |
> |
/* |
218 |
> |
* Initialize mutex |
219 |
> |
*/ |
220 |
> |
|
221 |
> |
int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) |
222 |
> |
{ |
223 |
> |
fastlock_init(&mutex->__m_lock); |
224 |
> |
mutex->__m_kind = mutex_attr ? mutex_attr->__mutexkind : PTHREAD_MUTEX_TIMED_NP; |
225 |
> |
mutex->__m_count = 0; |
226 |
> |
mutex->__m_owner = NULL; |
227 |
> |
return 0; |
228 |
> |
} |
229 |
> |
|
230 |
> |
|
231 |
> |
/* |
232 |
> |
* Destroy mutex |
233 |
> |
*/ |
234 |
> |
|
235 |
> |
int pthread_mutex_destroy(pthread_mutex_t *mutex) |
236 |
> |
{ |
237 |
> |
switch (mutex->__m_kind) { |
238 |
> |
case PTHREAD_MUTEX_TIMED_NP: |
239 |
> |
return (mutex->__m_lock.__status != 0) ? EBUSY : 0; |
240 |
> |
default: |
241 |
> |
return EINVAL; |
242 |
> |
} |
243 |
> |
} |
244 |
> |
|
245 |
> |
|
246 |
> |
/* |
247 |
> |
* Lock mutex |
248 |
> |
*/ |
249 |
> |
|
250 |
> |
int pthread_mutex_lock(pthread_mutex_t *mutex) |
251 |
> |
{ |
252 |
> |
switch (mutex->__m_kind) { |
253 |
> |
case PTHREAD_MUTEX_TIMED_NP: |
254 |
> |
fastlock_acquire(&mutex->__m_lock); |
255 |
> |
return 0; |
256 |
> |
default: |
257 |
> |
return EINVAL; |
258 |
> |
} |
259 |
> |
} |
260 |
> |
|
261 |
> |
|
262 |
> |
/* |
263 |
> |
* Try to lock mutex |
264 |
> |
*/ |
265 |
> |
|
266 |
> |
int pthread_mutex_trylock(pthread_mutex_t *mutex) |
267 |
> |
{ |
268 |
> |
switch (mutex->__m_kind) { |
269 |
> |
case PTHREAD_MUTEX_TIMED_NP: |
270 |
> |
return fastlock_try_acquire(&mutex->__m_lock); |
271 |
> |
default: |
272 |
> |
return EINVAL; |
273 |
> |
} |
274 |
> |
} |
275 |
> |
|
276 |
> |
|
277 |
> |
/* |
278 |
> |
* Unlock mutex |
279 |
> |
*/ |
280 |
> |
|
281 |
> |
int pthread_mutex_unlock(pthread_mutex_t *mutex) |
282 |
> |
{ |
283 |
> |
switch (mutex->__m_kind) { |
284 |
> |
case PTHREAD_MUTEX_TIMED_NP: |
285 |
> |
fastlock_release(&mutex->__m_lock); |
286 |
> |
return 0; |
287 |
> |
default: |
288 |
> |
return EINVAL; |
289 |
> |
} |
290 |
|
} |
291 |
|
|
292 |
< |
static void release_spinlock(int *lock) |
292 |
> |
|
293 |
> |
/* |
294 |
> |
* Create mutex attribute |
295 |
> |
*/ |
296 |
> |
|
297 |
> |
int pthread_mutexattr_init(pthread_mutexattr_t *attr) |
298 |
> |
{ |
299 |
> |
attr->__mutexkind = PTHREAD_MUTEX_TIMED_NP; |
300 |
> |
return 0; |
301 |
> |
} |
302 |
> |
|
303 |
> |
|
304 |
> |
/* |
305 |
> |
* Destroy mutex attribute |
306 |
> |
*/ |
307 |
> |
|
308 |
> |
int pthread_mutexattr_destroy(pthread_mutexattr_t *attr) |
309 |
|
{ |
310 |
< |
*lock = 0; |
310 |
> |
return 0; |
311 |
|
} |
312 |
|
|
313 |
|
|
317 |
|
|
318 |
|
int sem_init(sem_t *sem, int pshared, unsigned int value) |
319 |
|
{ |
320 |
< |
sem->sem_lock.status = 0; |
321 |
< |
sem->sem_lock.spinlock = 0; |
320 |
> |
if (sem == NULL || value > SEM_VALUE_MAX) { |
321 |
> |
errno = EINVAL; |
322 |
> |
return -1; |
323 |
> |
} |
324 |
> |
if (pshared) { |
325 |
> |
errno = ENOSYS; |
326 |
> |
return -1; |
327 |
> |
} |
328 |
> |
fastlock_init(&sem->sem_lock); |
329 |
|
sem->sem_value = value; |
330 |
|
sem->sem_waiting = NULL; |
331 |
|
return 0; |
338 |
|
|
339 |
|
int sem_destroy(sem_t *sem) |
340 |
|
{ |
341 |
+ |
if (sem == NULL) { |
342 |
+ |
errno = EINVAL; |
343 |
+ |
return -1; |
344 |
+ |
} |
345 |
+ |
if (sem->sem_waiting) { |
346 |
+ |
errno = EBUSY; |
347 |
+ |
return -1; |
348 |
+ |
} |
349 |
+ |
sem->sem_value = 0; |
350 |
+ |
sem->sem_waiting = NULL; |
351 |
|
return 0; |
352 |
|
} |
353 |
|
|
356 |
|
* Wait on semaphore |
357 |
|
*/ |
358 |
|
|
214 |
– |
void null_handler(int sig) |
215 |
– |
{ |
216 |
– |
} |
217 |
– |
|
359 |
|
int sem_wait(sem_t *sem) |
360 |
|
{ |
361 |
< |
acquire_spinlock(&sem->sem_lock.spinlock); |
362 |
< |
if (atomic_add((int *)&sem->sem_value, -1) >= 0) { |
363 |
< |
sigset_t mask; |
364 |
< |
if (!sem->sem_lock.status) { |
365 |
< |
struct sigaction sa; |
366 |
< |
sem->sem_lock.status = SIGUSR2; |
367 |
< |
sa.sa_handler = null_handler; |
368 |
< |
sa.sa_flags = SA_RESTART; |
369 |
< |
sigemptyset(&sa.sa_mask); |
370 |
< |
sigaction(sem->sem_lock.status, &sa, NULL); |
371 |
< |
} |
372 |
< |
sem->sem_waiting = (struct _pthread_descr_struct *)getpid(); |
373 |
< |
sigemptyset(&mask); |
374 |
< |
sigsuspend(&mask); |
375 |
< |
sem->sem_waiting = NULL; |
361 |
> |
int cnt = 0; |
362 |
> |
struct timespec tm; |
363 |
> |
|
364 |
> |
if (sem == NULL) { |
365 |
> |
errno = EINVAL; |
366 |
> |
return -1; |
367 |
> |
} |
368 |
> |
fastlock_acquire(&sem->sem_lock); |
369 |
> |
if (sem->sem_value > 0) { |
370 |
> |
sem->sem_value--; |
371 |
> |
fastlock_release(&sem->sem_lock); |
372 |
> |
return 0; |
373 |
> |
} |
374 |
> |
sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting + 1); |
375 |
> |
while (sem->sem_value == 0) { |
376 |
> |
fastlock_release(&sem->sem_lock); |
377 |
> |
usleep(0); |
378 |
> |
fastlock_acquire(&sem->sem_lock); |
379 |
|
} |
380 |
< |
release_spinlock(&sem->sem_lock.spinlock); |
380 |
> |
sem->sem_value--; |
381 |
> |
fastlock_release(&sem->sem_lock); |
382 |
|
return 0; |
383 |
|
} |
384 |
|
|
389 |
|
|
390 |
|
int sem_post(sem_t *sem) |
391 |
|
{ |
392 |
< |
acquire_spinlock(&sem->sem_lock.spinlock); |
393 |
< |
atomic_add((int *)&sem->sem_value, 1); |
392 |
> |
if (sem == NULL) { |
393 |
> |
errno = EINVAL; |
394 |
> |
return -1; |
395 |
> |
} |
396 |
> |
fastlock_acquire(&sem->sem_lock); |
397 |
|
if (sem->sem_waiting) |
398 |
< |
kill((pid_t)sem->sem_waiting, sem->sem_lock.status); |
399 |
< |
release_spinlock(&sem->sem_lock.spinlock); |
398 |
> |
sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting - 1); |
399 |
> |
else { |
400 |
> |
if (sem->sem_value >= SEM_VALUE_MAX) { |
401 |
> |
errno = ERANGE; |
402 |
> |
fastlock_release(&sem->sem_lock); |
403 |
> |
return -1; |
404 |
> |
} |
405 |
> |
} |
406 |
> |
sem->sem_value++; |
407 |
> |
fastlock_release(&sem->sem_lock); |
408 |
> |
return 0; |
409 |
> |
} |
410 |
> |
|
411 |
> |
|
412 |
> |
/* |
413 |
> |
* Simple producer/consumer test program |
414 |
> |
*/ |
415 |
> |
|
416 |
> |
#ifdef TEST |
417 |
> |
#include <stdio.h> |
418 |
> |
|
419 |
> |
static sem_t p_sem, c_sem; |
420 |
> |
static int data = 0; |
421 |
> |
|
422 |
> |
static void *producer_func(void *arg) |
423 |
> |
{ |
424 |
> |
int i, n = (int)arg; |
425 |
> |
for (i = 0; i < n; i++) { |
426 |
> |
sem_wait(&p_sem); |
427 |
> |
data++; |
428 |
> |
sem_post(&c_sem); |
429 |
> |
} |
430 |
> |
return NULL; |
431 |
> |
} |
432 |
> |
|
433 |
> |
static void *consumer_func(void *arg) |
434 |
> |
{ |
435 |
> |
int i, n = (int)arg; |
436 |
> |
for (i = 0; i < n; i++) { |
437 |
> |
sem_wait(&c_sem); |
438 |
> |
printf("data: %d\n", data); |
439 |
> |
sem_post(&p_sem); |
440 |
> |
} |
441 |
> |
sleep(1); // for testing pthread_join() |
442 |
> |
return NULL; |
443 |
> |
} |
444 |
> |
|
445 |
> |
int main(void) |
446 |
> |
{ |
447 |
> |
pthread_t producer_thread, consumer_thread; |
448 |
> |
static const int N = 5; |
449 |
> |
|
450 |
> |
if (sem_init(&c_sem, 0, 0) < 0) |
451 |
> |
return 1; |
452 |
> |
if (sem_init(&p_sem, 0, 1) < 0) |
453 |
> |
return 2; |
454 |
> |
if (pthread_create(&producer_thread, NULL, producer_func, (void *)N) != 0) |
455 |
> |
return 3; |
456 |
> |
if (pthread_create(&consumer_thread, NULL, consumer_func, (void *)N) != 0) |
457 |
> |
return 4; |
458 |
> |
pthread_join(producer_thread, NULL); |
459 |
> |
pthread_join(consumer_thread, NULL); |
460 |
> |
sem_destroy(&p_sem); |
461 |
> |
sem_destroy(&c_sem); |
462 |
> |
if (data != N) |
463 |
> |
return 5; |
464 |
|
return 0; |
465 |
|
} |
466 |
+ |
#endif |