ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/SheepShaver/src/Unix/Linux/sheepthreads.c
(Generate patch)

Comparing SheepShaver/src/Unix/Linux/sheepthreads.c (file contents):
Revision 1.1 by cebix, 2002-02-04T16:58:13Z vs.
Revision 1.9 by gbeauche, 2005-07-03T23:28:30Z

# Line 2 | Line 2
2   *  sheepthreads.c - Minimal pthreads implementation (libpthreads doesn't
3   *                   like nonstandard stacks)
4   *
5 < *  SheepShaver (C) 1997-2002 Christian Bauer and Marc Hellwig
5 > *  SheepShaver (C) 1997-2005 Christian Bauer and Marc Hellwig
6   *
7   *  This program is free software; you can redistribute it and/or modify
8   *  it under the terms of the GNU General Public License as published by
# Line 33 | Line 33
33   #include <errno.h>
34   #include <unistd.h>
35   #include <signal.h>
36 #include <sched.h>
36   #include <pthread.h>
38 #include <semaphore.h>
37  
38  
39   /* Thread stack size */
# Line 51 | Line 49 | extern int test_and_set(int *var, int va
49   extern int __clone(int (*fn)(void *), void *, int, void *);
50  
51   /* struct sem_t */
52 + typedef struct {
53 +        struct _pthread_fastlock __sem_lock;
54 +        int __sem_value;
55 +        _pthread_descr __sem_waiting;
56 + } sem_t;
57 +
58 + #define SEM_VALUE_MAX 64
59 + #define status __status
60 + #define spinlock __spinlock
61   #define sem_lock __sem_lock
62   #define sem_value __sem_value
63   #define sem_waiting __sem_waiting
64  
65 + /* Wait for "clone" children only (Linux 2.4+ specific) */
66 + #ifndef __WCLONE
67 + #define __WCLONE 0
68 + #endif
69 +
70  
71   /*
72   *  Return pthread ID of self
# Line 135 | Line 147 | int pthread_create(pthread_t *thread, co
147   int pthread_join(pthread_t thread, void **ret)
148   {
149          do {
150 <                if (waitpid(thread, NULL, 0) >= 0)
150 >                if (waitpid(thread, NULL, __WCLONE) >= 0);
151                          break;
152          } while (errno == EINTR);
153          if (ret)
# Line 168 | Line 180 | void pthread_testcancel(void)
180   *  Spinlocks
181   */
182  
183 < static void acquire_spinlock(volatile int *lock)
183 > /* For multiprocessor systems, we want to ensure all memory accesses
184 >   are completed before we reset a lock.  On other systems, we still
185 >   need to make sure that the compiler has flushed everything to memory.  */
186 > #define MEMORY_BARRIER() __asm__ __volatile__ ("sync" : : : "memory")
187 >
188 > static void fastlock_init(struct _pthread_fastlock *lock)
189   {
190 <        do {
191 <                while (*lock) ;
192 <        } while (test_and_set((int *)lock, 1) != 0);
190 >        lock->status = 0;
191 >        lock->spinlock = 0;
192 > }
193 >
194 > static int fastlock_try_acquire(struct _pthread_fastlock *lock)
195 > {
196 >        int res = EBUSY;
197 >        if (test_and_set(&lock->spinlock, 1) == 0) {
198 >                if (lock->status == 0) {
199 >                        lock->status = 1;
200 >                        MEMORY_BARRIER();
201 >                        res = 0;
202 >                }
203 >                lock->spinlock = 0;
204 >        }
205 >        return res;
206 > }
207 >
208 > static void fastlock_acquire(struct _pthread_fastlock *lock)
209 > {
210 >        MEMORY_BARRIER();
211 >        while (test_and_set(&lock->spinlock, 1))
212 >                usleep(0);
213 > }
214 >
215 > static void fastlock_release(struct _pthread_fastlock *lock)
216 > {
217 >        MEMORY_BARRIER();
218 >        lock->spinlock = 0;
219 >        __asm__ __volatile__ ("" : "=m" (lock->spinlock) : "m" (lock->spinlock));
220 > }
221 >
222 >
223 > /*
224 > *  Initialize mutex
225 > */
226 >
227 > int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr)
228 > {
229 >        fastlock_init(&mutex->__m_lock);
230 >        mutex->__m_kind = mutex_attr ? mutex_attr->__mutexkind : PTHREAD_MUTEX_TIMED_NP;
231 >        mutex->__m_count = 0;
232 >        mutex->__m_owner = NULL;
233 >        return 0;
234 > }
235 >
236 >
237 > /*
238 > *  Destroy mutex
239 > */
240 >
241 > int pthread_mutex_destroy(pthread_mutex_t *mutex)
242 > {
243 >        switch (mutex->__m_kind) {
244 >        case PTHREAD_MUTEX_TIMED_NP:
245 >                return (mutex->__m_lock.__status != 0) ? EBUSY : 0;
246 >        default:
247 >                return EINVAL;
248 >        }
249 > }
250 >
251 >
252 > /*
253 > *  Lock mutex
254 > */
255 >
256 > int pthread_mutex_lock(pthread_mutex_t *mutex)
257 > {
258 >        switch (mutex->__m_kind) {
259 >        case PTHREAD_MUTEX_TIMED_NP:
260 >                fastlock_acquire(&mutex->__m_lock);
261 >                return 0;
262 >        default:
263 >                return EINVAL;
264 >        }
265   }
266  
267 < static void release_spinlock(int *lock)
267 >
268 > /*
269 > *  Try to lock mutex
270 > */
271 >
272 > int pthread_mutex_trylock(pthread_mutex_t *mutex)
273   {
274 <        *lock = 0;
274 >        switch (mutex->__m_kind) {
275 >        case PTHREAD_MUTEX_TIMED_NP:
276 >                return fastlock_try_acquire(&mutex->__m_lock);
277 >        default:
278 >                return EINVAL;
279 >        }
280 > }
281 >
282 >
283 > /*
284 > *  Unlock mutex
285 > */
286 >
287 > int pthread_mutex_unlock(pthread_mutex_t *mutex)
288 > {
289 >        switch (mutex->__m_kind) {
290 >        case PTHREAD_MUTEX_TIMED_NP:
291 >                fastlock_release(&mutex->__m_lock);
292 >                return 0;
293 >        default:
294 >                return EINVAL;
295 >        }
296 > }
297 >
298 >
299 > /*
300 > *  Create mutex attribute
301 > */
302 >
303 > int pthread_mutexattr_init(pthread_mutexattr_t *attr)
304 > {
305 >        attr->__mutexkind = PTHREAD_MUTEX_TIMED_NP;
306 >        return 0;
307 > }
308 >
309 >
310 > /*
311 > *  Destroy mutex attribute
312 > */
313 >
314 > int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
315 > {
316 >        return 0;
317   }
318  
319  
# Line 187 | Line 323 | static void release_spinlock(int *lock)
323  
324   int sem_init(sem_t *sem, int pshared, unsigned int value)
325   {
326 <        sem->sem_lock.status = 0;
327 <        sem->sem_lock.spinlock = 0;
326 >        if (sem == NULL || value > SEM_VALUE_MAX) {
327 >                errno = EINVAL;
328 >                return -1;
329 >        }
330 >        if (pshared) {
331 >                errno = ENOSYS;
332 >                return -1;
333 >        }
334 >        fastlock_init(&sem->sem_lock);
335          sem->sem_value = value;
336          sem->sem_waiting = NULL;
337          return 0;
# Line 201 | Line 344 | int sem_init(sem_t *sem, int pshared, un
344  
345   int sem_destroy(sem_t *sem)
346   {
347 +        if (sem == NULL) {
348 +                errno = EINVAL;
349 +                return -1;
350 +        }
351 +        if (sem->sem_waiting) {
352 +                errno = EBUSY;
353 +                return -1;
354 +        }
355 +        sem->sem_value = 0;
356 +        sem->sem_waiting = NULL;
357          return 0;
358   }
359  
# Line 209 | Line 362 | int sem_destroy(sem_t *sem)
362   *  Wait on semaphore
363   */
364  
212 void null_handler(int sig)
213 {
214 }
215
365   int sem_wait(sem_t *sem)
366   {
367 <        acquire_spinlock(&sem->sem_lock.spinlock);
368 <        if (atomic_add((int *)&sem->sem_value, -1) >= 0) {
369 <                sigset_t mask;
221 <                if (!sem->sem_lock.status) {
222 <                        struct sigaction sa;
223 <                        sem->sem_lock.status = SIGUSR2;
224 <                        sa.sa_handler = null_handler;
225 <                        sa.sa_flags = SA_RESTART;
226 <                        sigemptyset(&sa.sa_mask);
227 <                        sigaction(sem->sem_lock.status, &sa, NULL);
228 <                }
229 <                sem->sem_waiting = (struct _pthread_descr_struct *)getpid();
230 <                sigemptyset(&mask);
231 <                sigsuspend(&mask);
232 <                sem->sem_waiting = NULL;
367 >        if (sem == NULL) {
368 >                errno = EINVAL;
369 >                return -1;
370          }
371 <        release_spinlock(&sem->sem_lock.spinlock);
371 >        fastlock_acquire(&sem->sem_lock);
372 >        if (sem->sem_value > 0) {
373 >                sem->sem_value--;
374 >                fastlock_release(&sem->sem_lock);
375 >                return 0;
376 >        }
377 >        sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting + 1);
378 >        while (sem->sem_value == 0) {
379 >                fastlock_release(&sem->sem_lock);
380 >                usleep(0);
381 >                fastlock_acquire(&sem->sem_lock);
382 >        }
383 >        sem->sem_value--;
384 >        fastlock_release(&sem->sem_lock);
385          return 0;
386   }
387  
# Line 242 | Line 392 | int sem_wait(sem_t *sem)
392  
393   int sem_post(sem_t *sem)
394   {
395 <        acquire_spinlock(&sem->sem_lock.spinlock);
396 <        atomic_add((int *)&sem->sem_value, 1);
395 >        if (sem == NULL) {
396 >                errno = EINVAL;
397 >                return -1;
398 >        }
399 >        fastlock_acquire(&sem->sem_lock);
400          if (sem->sem_waiting)
401 <                kill((pid_t)sem->sem_waiting, sem->sem_lock.status);
402 <        release_spinlock(&sem->sem_lock.spinlock);
401 >                sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting - 1);
402 >        else {
403 >                if (sem->sem_value >= SEM_VALUE_MAX) {
404 >                        errno = ERANGE;
405 >                        fastlock_release(&sem->sem_lock);
406 >                        return -1;
407 >                }
408 >        }
409 >        sem->sem_value++;
410 >        fastlock_release(&sem->sem_lock);
411 >        return 0;
412 > }
413 >
414 >
415 > /*
416 > *  Simple producer/consumer test program
417 > */
418 >
419 > #ifdef TEST
420 > #include <stdio.h>
421 >
422 > static sem_t p_sem, c_sem;
423 > static int data = 0;
424 >
425 > static void *producer_func(void *arg)
426 > {
427 >        int i, n = (int)arg;
428 >        for (i = 0; i < n; i++) {
429 >                sem_wait(&p_sem);
430 >                data++;
431 >                sem_post(&c_sem);
432 >        }
433 >        return NULL;
434 > }
435 >
436 > static void *consumer_func(void *arg)
437 > {
438 >        int i, n = (int)arg;
439 >        for (i = 0; i < n; i++) {
440 >                sem_wait(&c_sem);
441 >                printf("data: %d\n", data);
442 >                sem_post(&p_sem);
443 >        }
444 >        sleep(1); // for testing pthread_join()
445 >        return NULL;
446 > }
447 >
448 > int main(void)
449 > {
450 >        pthread_t producer_thread, consumer_thread;
451 >        static const int N = 5;
452 >
453 >        if (sem_init(&c_sem, 0, 0) < 0)
454 >                return 1;
455 >        if (sem_init(&p_sem, 0, 1) < 0)
456 >                return 2;
457 >        if (pthread_create(&producer_thread, NULL, producer_func, (void *)N) != 0)
458 >                return 3;
459 >        if (pthread_create(&consumer_thread, NULL, consumer_func, (void *)N) != 0)
460 >                return 4;
461 >        pthread_join(producer_thread, NULL);
462 >        pthread_join(consumer_thread, NULL);
463 >        sem_destroy(&p_sem);
464 >        sem_destroy(&c_sem);
465 >        if (data != N)
466 >                return 5;
467          return 0;
468   }
469 + #endif

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines