1 |
/* |
2 |
* sheepthreads.c - Minimal pthreads implementation (libpthreads doesn't |
3 |
* like nonstandard stacks) |
4 |
* |
5 |
* SheepShaver (C) 1997-2005 Christian Bauer and Marc Hellwig |
6 |
* |
7 |
* This program is free software; you can redistribute it and/or modify |
8 |
* it under the terms of the GNU General Public License as published by |
9 |
* the Free Software Foundation; either version 2 of the License, or |
10 |
* (at your option) any later version. |
11 |
* |
12 |
* This program is distributed in the hope that it will be useful, |
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 |
* GNU General Public License for more details. |
16 |
* |
17 |
* You should have received a copy of the GNU General Public License |
18 |
* along with this program; if not, write to the Free Software |
19 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 |
*/ |
21 |
|
22 |
/* |
23 |
* NOTES: |
24 |
* - pthread_cancel() kills the thread immediately |
25 |
* - Semaphores are VERY restricted: the only supported use is to have one |
26 |
* thread sem_wait() on the semaphore while other threads sem_post() it |
27 |
* (i.e. to use the semaphore as a signal) |
28 |
*/ |
29 |
|
30 |
#include <sys/types.h> |
31 |
#include <sys/wait.h> |
32 |
#include <stdlib.h> |
33 |
#include <errno.h> |
34 |
#include <unistd.h> |
35 |
#include <signal.h> |
36 |
#include <pthread.h> |
37 |
|
38 |
|
39 |
/* Thread stack size */ |
40 |
#define STACK_SIZE 65536 |
41 |
|
42 |
/* From asm_linux.S */ |
43 |
extern int atomic_add(int *var, int add); |
44 |
extern int atomic_and(int *var, int and); |
45 |
extern int atomic_or(int *var, int or); |
46 |
extern int test_and_set(int *var, int val); |
47 |
|
48 |
/* Linux kernel calls */ |
49 |
extern int __clone(int (*fn)(void *), void *, int, void *); |
50 |
|
51 |
/* struct sem_t */ |
52 |
typedef struct { |
53 |
struct _pthread_fastlock __sem_lock; |
54 |
int __sem_value; |
55 |
_pthread_descr __sem_waiting; |
56 |
} sem_t; |
57 |
|
58 |
#define SEM_VALUE_MAX 64 |
59 |
#define status __status |
60 |
#define spinlock __spinlock |
61 |
#define sem_lock __sem_lock |
62 |
#define sem_value __sem_value |
63 |
#define sem_waiting __sem_waiting |
64 |
|
65 |
/* Wait for "clone" children only (Linux 2.4+ specific) */ |
66 |
#ifndef __WCLONE |
67 |
#define __WCLONE 0 |
68 |
#endif |
69 |
|
70 |
|
71 |
/* |
72 |
* Return pthread ID of self |
73 |
*/ |
74 |
|
75 |
pthread_t pthread_self(void) |
76 |
{ |
77 |
return getpid(); |
78 |
} |
79 |
|
80 |
|
81 |
/* |
82 |
* Test whether two pthread IDs are equal |
83 |
*/ |
84 |
|
85 |
int pthread_equal(pthread_t t1, pthread_t t2) |
86 |
{ |
87 |
return t1 == t2; |
88 |
} |
89 |
|
90 |
|
91 |
/* |
92 |
* Send signal to thread |
93 |
*/ |
94 |
|
95 |
int pthread_kill(pthread_t thread, int sig) |
96 |
{ |
97 |
if (kill(thread, sig) == -1) |
98 |
return errno; |
99 |
else |
100 |
return 0; |
101 |
} |
102 |
|
103 |
|
104 |
/* |
105 |
* Create pthread |
106 |
*/ |
107 |
|
108 |
struct new_thread { |
109 |
void *(*fn)(void *); |
110 |
void *arg; |
111 |
}; |
112 |
|
113 |
static int start_thread(void *arg) |
114 |
{ |
115 |
struct new_thread *nt = (struct new_thread *)arg; |
116 |
nt->fn(nt->arg); |
117 |
return 0; |
118 |
} |
119 |
|
120 |
int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) |
121 |
{ |
122 |
struct new_thread *nt; |
123 |
void *stack; |
124 |
int pid; |
125 |
|
126 |
nt = (struct new_thread *)malloc(sizeof(struct new_thread)); |
127 |
nt->fn = start_routine; |
128 |
nt->arg = arg; |
129 |
stack = malloc(STACK_SIZE); |
130 |
|
131 |
pid = __clone(start_thread, (char *)stack + STACK_SIZE - 16, CLONE_VM | CLONE_FS | CLONE_FILES, nt); |
132 |
if (pid == -1) { |
133 |
free(stack); |
134 |
free(nt); |
135 |
return errno; |
136 |
} else { |
137 |
*thread = pid; |
138 |
return 0; |
139 |
} |
140 |
} |
141 |
|
142 |
|
143 |
/* |
144 |
* Join pthread |
145 |
*/ |
146 |
|
147 |
int pthread_join(pthread_t thread, void **ret) |
148 |
{ |
149 |
do { |
150 |
if (waitpid(thread, NULL, __WCLONE) >= 0); |
151 |
break; |
152 |
} while (errno == EINTR); |
153 |
if (ret) |
154 |
*ret = NULL; |
155 |
return 0; |
156 |
} |
157 |
|
158 |
|
159 |
/* |
160 |
* Cancel thread |
161 |
*/ |
162 |
|
163 |
int pthread_cancel(pthread_t thread) |
164 |
{ |
165 |
kill(thread, SIGINT); |
166 |
return 0; |
167 |
} |
168 |
|
169 |
|
170 |
/* |
171 |
* Test for cancellation |
172 |
*/ |
173 |
|
174 |
void pthread_testcancel(void) |
175 |
{ |
176 |
} |
177 |
|
178 |
|
179 |
/* |
180 |
* Spinlocks |
181 |
*/ |
182 |
|
183 |
/* For multiprocessor systems, we want to ensure all memory accesses |
184 |
are completed before we reset a lock. On other systems, we still |
185 |
need to make sure that the compiler has flushed everything to memory. */ |
186 |
#define MEMORY_BARRIER() __asm__ __volatile__ ("sync" : : : "memory") |
187 |
|
188 |
static void fastlock_init(struct _pthread_fastlock *lock) |
189 |
{ |
190 |
lock->status = 0; |
191 |
lock->spinlock = 0; |
192 |
} |
193 |
|
194 |
static int fastlock_try_acquire(struct _pthread_fastlock *lock) |
195 |
{ |
196 |
int res = EBUSY; |
197 |
if (test_and_set(&lock->spinlock, 1) == 0) { |
198 |
if (lock->status == 0) { |
199 |
lock->status = 1; |
200 |
MEMORY_BARRIER(); |
201 |
res = 0; |
202 |
} |
203 |
lock->spinlock = 0; |
204 |
} |
205 |
return res; |
206 |
} |
207 |
|
208 |
static void fastlock_acquire(struct _pthread_fastlock *lock) |
209 |
{ |
210 |
MEMORY_BARRIER(); |
211 |
while (test_and_set(&lock->spinlock, 1)) |
212 |
usleep(0); |
213 |
} |
214 |
|
215 |
static void fastlock_release(struct _pthread_fastlock *lock) |
216 |
{ |
217 |
MEMORY_BARRIER(); |
218 |
lock->spinlock = 0; |
219 |
__asm__ __volatile__ ("" : "=m" (lock->spinlock) : "m" (lock->spinlock)); |
220 |
} |
221 |
|
222 |
|
223 |
/* |
224 |
* Initialize mutex |
225 |
*/ |
226 |
|
227 |
int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) |
228 |
{ |
229 |
fastlock_init(&mutex->__m_lock); |
230 |
mutex->__m_kind = mutex_attr ? mutex_attr->__mutexkind : PTHREAD_MUTEX_TIMED_NP; |
231 |
mutex->__m_count = 0; |
232 |
mutex->__m_owner = NULL; |
233 |
return 0; |
234 |
} |
235 |
|
236 |
|
237 |
/* |
238 |
* Destroy mutex |
239 |
*/ |
240 |
|
241 |
int pthread_mutex_destroy(pthread_mutex_t *mutex) |
242 |
{ |
243 |
switch (mutex->__m_kind) { |
244 |
case PTHREAD_MUTEX_TIMED_NP: |
245 |
return (mutex->__m_lock.__status != 0) ? EBUSY : 0; |
246 |
default: |
247 |
return EINVAL; |
248 |
} |
249 |
} |
250 |
|
251 |
|
252 |
/* |
253 |
* Lock mutex |
254 |
*/ |
255 |
|
256 |
int pthread_mutex_lock(pthread_mutex_t *mutex) |
257 |
{ |
258 |
switch (mutex->__m_kind) { |
259 |
case PTHREAD_MUTEX_TIMED_NP: |
260 |
fastlock_acquire(&mutex->__m_lock); |
261 |
return 0; |
262 |
default: |
263 |
return EINVAL; |
264 |
} |
265 |
} |
266 |
|
267 |
|
268 |
/* |
269 |
* Try to lock mutex |
270 |
*/ |
271 |
|
272 |
int pthread_mutex_trylock(pthread_mutex_t *mutex) |
273 |
{ |
274 |
switch (mutex->__m_kind) { |
275 |
case PTHREAD_MUTEX_TIMED_NP: |
276 |
return fastlock_try_acquire(&mutex->__m_lock); |
277 |
default: |
278 |
return EINVAL; |
279 |
} |
280 |
} |
281 |
|
282 |
|
283 |
/* |
284 |
* Unlock mutex |
285 |
*/ |
286 |
|
287 |
int pthread_mutex_unlock(pthread_mutex_t *mutex) |
288 |
{ |
289 |
switch (mutex->__m_kind) { |
290 |
case PTHREAD_MUTEX_TIMED_NP: |
291 |
fastlock_release(&mutex->__m_lock); |
292 |
return 0; |
293 |
default: |
294 |
return EINVAL; |
295 |
} |
296 |
} |
297 |
|
298 |
|
299 |
/* |
300 |
* Create mutex attribute |
301 |
*/ |
302 |
|
303 |
int pthread_mutexattr_init(pthread_mutexattr_t *attr) |
304 |
{ |
305 |
attr->__mutexkind = PTHREAD_MUTEX_TIMED_NP; |
306 |
return 0; |
307 |
} |
308 |
|
309 |
|
310 |
/* |
311 |
* Destroy mutex attribute |
312 |
*/ |
313 |
|
314 |
int pthread_mutexattr_destroy(pthread_mutexattr_t *attr) |
315 |
{ |
316 |
return 0; |
317 |
} |
318 |
|
319 |
|
320 |
/* |
321 |
* Init semaphore |
322 |
*/ |
323 |
|
324 |
int sem_init(sem_t *sem, int pshared, unsigned int value) |
325 |
{ |
326 |
if (sem == NULL || value > SEM_VALUE_MAX) { |
327 |
errno = EINVAL; |
328 |
return -1; |
329 |
} |
330 |
if (pshared) { |
331 |
errno = ENOSYS; |
332 |
return -1; |
333 |
} |
334 |
fastlock_init(&sem->sem_lock); |
335 |
sem->sem_value = value; |
336 |
sem->sem_waiting = NULL; |
337 |
return 0; |
338 |
} |
339 |
|
340 |
|
341 |
/* |
342 |
* Delete remaphore |
343 |
*/ |
344 |
|
345 |
int sem_destroy(sem_t *sem) |
346 |
{ |
347 |
if (sem == NULL) { |
348 |
errno = EINVAL; |
349 |
return -1; |
350 |
} |
351 |
if (sem->sem_waiting) { |
352 |
errno = EBUSY; |
353 |
return -1; |
354 |
} |
355 |
sem->sem_value = 0; |
356 |
sem->sem_waiting = NULL; |
357 |
return 0; |
358 |
} |
359 |
|
360 |
|
361 |
/* |
362 |
* Wait on semaphore |
363 |
*/ |
364 |
|
365 |
int sem_wait(sem_t *sem) |
366 |
{ |
367 |
if (sem == NULL) { |
368 |
errno = EINVAL; |
369 |
return -1; |
370 |
} |
371 |
fastlock_acquire(&sem->sem_lock); |
372 |
if (sem->sem_value > 0) { |
373 |
sem->sem_value--; |
374 |
fastlock_release(&sem->sem_lock); |
375 |
return 0; |
376 |
} |
377 |
sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting + 1); |
378 |
while (sem->sem_value == 0) { |
379 |
fastlock_release(&sem->sem_lock); |
380 |
usleep(0); |
381 |
fastlock_acquire(&sem->sem_lock); |
382 |
} |
383 |
sem->sem_value--; |
384 |
fastlock_release(&sem->sem_lock); |
385 |
return 0; |
386 |
} |
387 |
|
388 |
|
389 |
/* |
390 |
* Post semaphore |
391 |
*/ |
392 |
|
393 |
int sem_post(sem_t *sem) |
394 |
{ |
395 |
if (sem == NULL) { |
396 |
errno = EINVAL; |
397 |
return -1; |
398 |
} |
399 |
fastlock_acquire(&sem->sem_lock); |
400 |
if (sem->sem_waiting) |
401 |
sem->sem_waiting = (struct _pthread_descr_struct *)((long)sem->sem_waiting - 1); |
402 |
else { |
403 |
if (sem->sem_value >= SEM_VALUE_MAX) { |
404 |
errno = ERANGE; |
405 |
fastlock_release(&sem->sem_lock); |
406 |
return -1; |
407 |
} |
408 |
} |
409 |
sem->sem_value++; |
410 |
fastlock_release(&sem->sem_lock); |
411 |
return 0; |
412 |
} |
413 |
|
414 |
|
415 |
/* |
416 |
* Simple producer/consumer test program |
417 |
*/ |
418 |
|
419 |
#ifdef TEST |
420 |
#include <stdio.h> |
421 |
|
422 |
static sem_t p_sem, c_sem; |
423 |
static int data = 0; |
424 |
|
425 |
static void *producer_func(void *arg) |
426 |
{ |
427 |
int i, n = (int)arg; |
428 |
for (i = 0; i < n; i++) { |
429 |
sem_wait(&p_sem); |
430 |
data++; |
431 |
sem_post(&c_sem); |
432 |
} |
433 |
return NULL; |
434 |
} |
435 |
|
436 |
static void *consumer_func(void *arg) |
437 |
{ |
438 |
int i, n = (int)arg; |
439 |
for (i = 0; i < n; i++) { |
440 |
sem_wait(&c_sem); |
441 |
printf("data: %d\n", data); |
442 |
sem_post(&p_sem); |
443 |
} |
444 |
sleep(1); // for testing pthread_join() |
445 |
return NULL; |
446 |
} |
447 |
|
448 |
int main(void) |
449 |
{ |
450 |
pthread_t producer_thread, consumer_thread; |
451 |
static const int N = 5; |
452 |
|
453 |
if (sem_init(&c_sem, 0, 0) < 0) |
454 |
return 1; |
455 |
if (sem_init(&p_sem, 0, 1) < 0) |
456 |
return 2; |
457 |
if (pthread_create(&producer_thread, NULL, producer_func, (void *)N) != 0) |
458 |
return 3; |
459 |
if (pthread_create(&consumer_thread, NULL, consumer_func, (void *)N) != 0) |
460 |
return 4; |
461 |
pthread_join(producer_thread, NULL); |
462 |
pthread_join(consumer_thread, NULL); |
463 |
sem_destroy(&p_sem); |
464 |
sem_destroy(&c_sem); |
465 |
if (data != N) |
466 |
return 5; |
467 |
return 0; |
468 |
} |
469 |
#endif |