ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/SheepShaver/src/Unix/Linux/sheepthreads.c
Revision: 1.8
Committed: 2005-06-25T11:36:35Z (19 years, 5 months ago) by gbeauche
Content type: text/plain
Branch: MAIN
Changes since 1.7: +0 -3 lines
Log Message:
clean-ups from previous experiment with sched_yield(), this one caused a
slow-down too.

File Contents

# Content
1 /*
2 * sheepthreads.c - Minimal pthreads implementation (libpthreads doesn't
3 * like nonstandard stacks)
4 *
5 * SheepShaver (C) 1997-2005 Christian Bauer and Marc Hellwig
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 /*
23 * NOTES:
24 * - pthread_cancel() kills the thread immediately
25 * - Semaphores are VERY restricted: the only supported use is to have one
26 * thread sem_wait() on the semaphore while other threads sem_post() it
27 * (i.e. to use the semaphore as a signal)
28 */
29
30 #include <sys/types.h>
31 #include <sys/wait.h>
32 #include <stdlib.h>
33 #include <errno.h>
34 #include <unistd.h>
35 #include <signal.h>
36 #include <pthread.h>
37 #include <semaphore.h>
38
39
40 /* Thread stack size */
41 #define STACK_SIZE 65536
42
43 /* From asm_linux.S */
44 extern int atomic_add(int *var, int add);
45 extern int atomic_and(int *var, int and);
46 extern int atomic_or(int *var, int or);
47 extern int test_and_set(int *var, int val);
48
49 /* Linux kernel calls */
50 extern int __clone(int (*fn)(void *), void *, int, void *);
51
52 /* struct sem_t */
53 #define status __status
54 #define spinlock __spinlock
55 #define sem_lock __sem_lock
56 #define sem_value __sem_value
57 #define sem_waiting __sem_waiting
58
59 /* Wait for "clone" children only (Linux 2.4+ specific) */
60 #ifndef __WCLONE
61 #define __WCLONE 0
62 #endif
63
64
65 /*
66 * Return pthread ID of self
67 */
68
69 pthread_t pthread_self(void)
70 {
71 return getpid();
72 }
73
74
75 /*
76 * Test whether two pthread IDs are equal
77 */
78
79 int pthread_equal(pthread_t t1, pthread_t t2)
80 {
81 return t1 == t2;
82 }
83
84
85 /*
86 * Send signal to thread
87 */
88
89 int pthread_kill(pthread_t thread, int sig)
90 {
91 if (kill(thread, sig) == -1)
92 return errno;
93 else
94 return 0;
95 }
96
97
98 /*
99 * Create pthread
100 */
101
102 struct new_thread {
103 void *(*fn)(void *);
104 void *arg;
105 };
106
107 static int start_thread(void *arg)
108 {
109 struct new_thread *nt = (struct new_thread *)arg;
110 nt->fn(nt->arg);
111 return 0;
112 }
113
114 int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
115 {
116 struct new_thread *nt;
117 void *stack;
118 int pid;
119
120 nt = (struct new_thread *)malloc(sizeof(struct new_thread));
121 nt->fn = start_routine;
122 nt->arg = arg;
123 stack = malloc(STACK_SIZE);
124
125 pid = __clone(start_thread, (char *)stack + STACK_SIZE - 16, CLONE_VM | CLONE_FS | CLONE_FILES, nt);
126 if (pid == -1) {
127 free(stack);
128 free(nt);
129 return errno;
130 } else {
131 *thread = pid;
132 return 0;
133 }
134 }
135
136
137 /*
138 * Join pthread
139 */
140
141 int pthread_join(pthread_t thread, void **ret)
142 {
143 do {
144 if (waitpid(thread, NULL, __WCLONE) >= 0);
145 break;
146 } while (errno == EINTR);
147 if (ret)
148 *ret = NULL;
149 return 0;
150 }
151
152
153 /*
154 * Cancel thread
155 */
156
157 int pthread_cancel(pthread_t thread)
158 {
159 kill(thread, SIGINT);
160 return 0;
161 }
162
163
164 /*
165 * Test for cancellation
166 */
167
168 void pthread_testcancel(void)
169 {
170 }
171
172
173 /*
174 * Spinlocks
175 */
176
177 /* For multiprocessor systems, we want to ensure all memory accesses
178 are completed before we reset a lock. On other systems, we still
179 need to make sure that the compiler has flushed everything to memory. */
180 #define MEMORY_BARRIER() __asm__ __volatile__ ("sync" : : : "memory")
181
182 static void fastlock_init(struct _pthread_fastlock *lock)
183 {
184 lock->status = 0;
185 lock->spinlock = 0;
186 }
187
188 static int fastlock_try_acquire(struct _pthread_fastlock *lock)
189 {
190 int res = EBUSY;
191 if (test_and_set(&lock->spinlock, 1) == 0) {
192 if (lock->status == 0) {
193 lock->status = 1;
194 MEMORY_BARRIER();
195 res = 0;
196 }
197 lock->spinlock = 0;
198 }
199 return res;
200 }
201
202 static void fastlock_acquire(struct _pthread_fastlock *lock)
203 {
204 MEMORY_BARRIER();
205 while (test_and_set(&lock->spinlock, 1))
206 usleep(0);
207 }
208
209 static void fastlock_release(struct _pthread_fastlock *lock)
210 {
211 MEMORY_BARRIER();
212 lock->spinlock = 0;
213 __asm__ __volatile__ ("" : "=m" (lock->spinlock) : "m" (lock->spinlock));
214 }
215
216
217 /*
218 * Initialize mutex
219 */
220
221 int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr)
222 {
223 fastlock_init(&mutex->__m_lock);
224 mutex->__m_kind = mutex_attr ? mutex_attr->__mutexkind : PTHREAD_MUTEX_TIMED_NP;
225 mutex->__m_count = 0;
226 mutex->__m_owner = NULL;
227 return 0;
228 }
229
230
231 /*
232 * Destroy mutex
233 */
234
235 int pthread_mutex_destroy(pthread_mutex_t *mutex)
236 {
237 switch (mutex->__m_kind) {
238 case PTHREAD_MUTEX_TIMED_NP:
239 return (mutex->__m_lock.__status != 0) ? EBUSY : 0;
240 default:
241 return EINVAL;
242 }
243 }
244
245
246 /*
247 * Lock mutex
248 */
249
250 int pthread_mutex_lock(pthread_mutex_t *mutex)
251 {
252 switch (mutex->__m_kind) {
253 case PTHREAD_MUTEX_TIMED_NP:
254 fastlock_acquire(&mutex->__m_lock);
255 return 0;
256 default:
257 return EINVAL;
258 }
259 }
260
261
262 /*
263 * Try to lock mutex
264 */
265
266 int pthread_mutex_trylock(pthread_mutex_t *mutex)
267 {
268 switch (mutex->__m_kind) {
269 case PTHREAD_MUTEX_TIMED_NP:
270 return fastlock_try_acquire(&mutex->__m_lock);
271 default:
272 return EINVAL;
273 }
274 }
275
276
277 /*
278 * Unlock mutex
279 */
280
281 int pthread_mutex_unlock(pthread_mutex_t *mutex)
282 {
283 switch (mutex->__m_kind) {
284 case PTHREAD_MUTEX_TIMED_NP:
285 fastlock_release(&mutex->__m_lock);
286 return 0;
287 default:
288 return EINVAL;
289 }
290 }
291
292
293 /*
294 * Create mutex attribute
295 */
296
297 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
298 {
299 attr->__mutexkind = PTHREAD_MUTEX_TIMED_NP;
300 return 0;
301 }
302
303
304 /*
305 * Destroy mutex attribute
306 */
307
308 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
309 {
310 return 0;
311 }
312
313
314 /*
315 * Init semaphore
316 */
317
318 int sem_init(sem_t *sem, int pshared, unsigned int value)
319 {
320 if (sem == NULL || value > SEM_VALUE_MAX) {
321 errno = EINVAL;
322 return -1;
323 }
324 if (pshared) {
325 errno = ENOSYS;
326 return -1;
327 }
328 fastlock_init(&sem->sem_lock);
329 sem->sem_value = value;
330 sem->sem_waiting = NULL;
331 return 0;
332 }
333
334
335 /*
336 * Delete remaphore
337 */
338
339 int sem_destroy(sem_t *sem)
340 {
341 if (sem == NULL) {
342 errno = EINVAL;
343 return -1;
344 }
345 if (sem->sem_waiting) {
346 errno = EBUSY;
347 return -1;
348 }
349 sem->sem_value = 0;
350 sem->sem_waiting = NULL;
351 return 0;
352 }
353
354
355 /*
356 * Wait on semaphore
357 */
358
359 int sem_wait(sem_t *sem)
360 {
361 if (sem == NULL) {
362 errno = EINVAL;
363 return -1;
364 }
365 fastlock_acquire(&sem->sem_lock);
366 if (sem->sem_value > 0) {
367 sem->sem_value--;
368 fastlock_release(&sem->sem_lock);
369 return 0;
370 }
371 sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting + 1);
372 while (sem->sem_value == 0) {
373 fastlock_release(&sem->sem_lock);
374 usleep(0);
375 fastlock_acquire(&sem->sem_lock);
376 }
377 sem->sem_value--;
378 fastlock_release(&sem->sem_lock);
379 return 0;
380 }
381
382
383 /*
384 * Post semaphore
385 */
386
387 int sem_post(sem_t *sem)
388 {
389 if (sem == NULL) {
390 errno = EINVAL;
391 return -1;
392 }
393 fastlock_acquire(&sem->sem_lock);
394 if (sem->sem_waiting)
395 sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting - 1);
396 else {
397 if (sem->sem_value >= SEM_VALUE_MAX) {
398 errno = ERANGE;
399 fastlock_release(&sem->sem_lock);
400 return -1;
401 }
402 }
403 sem->sem_value++;
404 fastlock_release(&sem->sem_lock);
405 return 0;
406 }
407
408
409 /*
410 * Simple producer/consumer test program
411 */
412
413 #ifdef TEST
414 #include <stdio.h>
415
416 static sem_t p_sem, c_sem;
417 static int data = 0;
418
419 static void *producer_func(void *arg)
420 {
421 int i, n = (int)arg;
422 for (i = 0; i < n; i++) {
423 sem_wait(&p_sem);
424 data++;
425 sem_post(&c_sem);
426 }
427 return NULL;
428 }
429
430 static void *consumer_func(void *arg)
431 {
432 int i, n = (int)arg;
433 for (i = 0; i < n; i++) {
434 sem_wait(&c_sem);
435 printf("data: %d\n", data);
436 sem_post(&p_sem);
437 }
438 sleep(1); // for testing pthread_join()
439 return NULL;
440 }
441
442 int main(void)
443 {
444 pthread_t producer_thread, consumer_thread;
445 static const int N = 5;
446
447 if (sem_init(&c_sem, 0, 0) < 0)
448 return 1;
449 if (sem_init(&p_sem, 0, 1) < 0)
450 return 2;
451 if (pthread_create(&producer_thread, NULL, producer_func, (void *)N) != 0)
452 return 3;
453 if (pthread_create(&consumer_thread, NULL, consumer_func, (void *)N) != 0)
454 return 4;
455 pthread_join(producer_thread, NULL);
456 pthread_join(consumer_thread, NULL);
457 sem_destroy(&p_sem);
458 sem_destroy(&c_sem);
459 if (data != N)
460 return 5;
461 return 0;
462 }
463 #endif