1 |
cebix |
1.1 |
/* |
2 |
|
|
* sheepthreads.c - Minimal pthreads implementation (libpthreads doesn't |
3 |
|
|
* like nonstandard stacks) |
4 |
|
|
* |
5 |
gbeauche |
1.6 |
* SheepShaver (C) 1997-2005 Christian Bauer and Marc Hellwig |
6 |
cebix |
1.1 |
* |
7 |
|
|
* This program is free software; you can redistribute it and/or modify |
8 |
|
|
* it under the terms of the GNU General Public License as published by |
9 |
|
|
* the Free Software Foundation; either version 2 of the License, or |
10 |
|
|
* (at your option) any later version. |
11 |
|
|
* |
12 |
|
|
* This program is distributed in the hope that it will be useful, |
13 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 |
|
|
* GNU General Public License for more details. |
16 |
|
|
* |
17 |
|
|
* You should have received a copy of the GNU General Public License |
18 |
|
|
* along with this program; if not, write to the Free Software |
19 |
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 |
|
|
*/ |
21 |
|
|
|
22 |
|
|
/* |
23 |
|
|
* NOTES: |
24 |
|
|
* - pthread_cancel() kills the thread immediately |
25 |
|
|
* - Semaphores are VERY restricted: the only supported use is to have one |
26 |
|
|
* thread sem_wait() on the semaphore while other threads sem_post() it |
27 |
|
|
* (i.e. to use the semaphore as a signal) |
28 |
|
|
*/ |
29 |
|
|
|
30 |
|
|
#include <sys/types.h> |
31 |
|
|
#include <sys/wait.h> |
32 |
|
|
#include <stdlib.h> |
33 |
|
|
#include <errno.h> |
34 |
|
|
#include <unistd.h> |
35 |
|
|
#include <signal.h> |
36 |
|
|
#include <sched.h> |
37 |
|
|
#include <pthread.h> |
38 |
|
|
#include <semaphore.h> |
39 |
|
|
|
40 |
|
|
|
41 |
|
|
/* Thread stack size */ |
42 |
|
|
#define STACK_SIZE 65536 |
43 |
|
|
|
44 |
|
|
/* From asm_linux.S */ |
45 |
|
|
extern int atomic_add(int *var, int add); |
46 |
|
|
extern int atomic_and(int *var, int and); |
47 |
|
|
extern int atomic_or(int *var, int or); |
48 |
|
|
extern int test_and_set(int *var, int val); |
49 |
|
|
|
50 |
|
|
/* Linux kernel calls */ |
51 |
|
|
extern int __clone(int (*fn)(void *), void *, int, void *); |
52 |
|
|
|
53 |
|
|
/* struct sem_t */ |
54 |
gbeauche |
1.2 |
#define status __status |
55 |
|
|
#define spinlock __spinlock |
56 |
cebix |
1.1 |
#define sem_lock __sem_lock |
57 |
|
|
#define sem_value __sem_value |
58 |
|
|
#define sem_waiting __sem_waiting |
59 |
|
|
|
60 |
gbeauche |
1.4 |
/* Wait for "clone" children only (Linux 2.4+ specific) */ |
61 |
|
|
#ifndef __WCLONE |
62 |
|
|
#define __WCLONE 0 |
63 |
|
|
#endif |
64 |
|
|
|
65 |
cebix |
1.1 |
|
66 |
|
|
/* |
67 |
|
|
* Return pthread ID of self |
68 |
|
|
*/ |
69 |
|
|
|
70 |
|
|
pthread_t pthread_self(void) |
71 |
|
|
{ |
72 |
|
|
return getpid(); |
73 |
|
|
} |
74 |
|
|
|
75 |
|
|
|
76 |
|
|
/* |
77 |
|
|
* Test whether two pthread IDs are equal |
78 |
|
|
*/ |
79 |
|
|
|
80 |
|
|
int pthread_equal(pthread_t t1, pthread_t t2) |
81 |
|
|
{ |
82 |
|
|
return t1 == t2; |
83 |
|
|
} |
84 |
|
|
|
85 |
|
|
|
86 |
|
|
/* |
87 |
|
|
* Send signal to thread |
88 |
|
|
*/ |
89 |
|
|
|
90 |
|
|
int pthread_kill(pthread_t thread, int sig) |
91 |
|
|
{ |
92 |
|
|
if (kill(thread, sig) == -1) |
93 |
|
|
return errno; |
94 |
|
|
else |
95 |
|
|
return 0; |
96 |
|
|
} |
97 |
|
|
|
98 |
|
|
|
99 |
|
|
/* |
100 |
|
|
* Create pthread |
101 |
|
|
*/ |
102 |
|
|
|
103 |
|
|
struct new_thread { |
104 |
|
|
void *(*fn)(void *); |
105 |
|
|
void *arg; |
106 |
|
|
}; |
107 |
|
|
|
108 |
|
|
static int start_thread(void *arg) |
109 |
|
|
{ |
110 |
|
|
struct new_thread *nt = (struct new_thread *)arg; |
111 |
|
|
nt->fn(nt->arg); |
112 |
|
|
return 0; |
113 |
|
|
} |
114 |
|
|
|
115 |
|
|
int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) |
116 |
|
|
{ |
117 |
|
|
struct new_thread *nt; |
118 |
|
|
void *stack; |
119 |
|
|
int pid; |
120 |
|
|
|
121 |
|
|
nt = (struct new_thread *)malloc(sizeof(struct new_thread)); |
122 |
|
|
nt->fn = start_routine; |
123 |
|
|
nt->arg = arg; |
124 |
|
|
stack = malloc(STACK_SIZE); |
125 |
|
|
|
126 |
|
|
pid = __clone(start_thread, (char *)stack + STACK_SIZE - 16, CLONE_VM | CLONE_FS | CLONE_FILES, nt); |
127 |
|
|
if (pid == -1) { |
128 |
|
|
free(stack); |
129 |
|
|
free(nt); |
130 |
|
|
return errno; |
131 |
|
|
} else { |
132 |
|
|
*thread = pid; |
133 |
|
|
return 0; |
134 |
|
|
} |
135 |
|
|
} |
136 |
|
|
|
137 |
|
|
|
138 |
|
|
/* |
139 |
|
|
* Join pthread |
140 |
|
|
*/ |
141 |
|
|
|
142 |
|
|
int pthread_join(pthread_t thread, void **ret) |
143 |
|
|
{ |
144 |
|
|
do { |
145 |
gbeauche |
1.4 |
if (waitpid(thread, NULL, __WCLONE) >= 0); |
146 |
cebix |
1.1 |
break; |
147 |
|
|
} while (errno == EINTR); |
148 |
|
|
if (ret) |
149 |
|
|
*ret = NULL; |
150 |
|
|
return 0; |
151 |
|
|
} |
152 |
|
|
|
153 |
|
|
|
154 |
|
|
/* |
155 |
|
|
* Cancel thread |
156 |
|
|
*/ |
157 |
|
|
|
158 |
|
|
int pthread_cancel(pthread_t thread) |
159 |
|
|
{ |
160 |
|
|
kill(thread, SIGINT); |
161 |
|
|
return 0; |
162 |
|
|
} |
163 |
|
|
|
164 |
|
|
|
165 |
|
|
/* |
166 |
|
|
* Test for cancellation |
167 |
|
|
*/ |
168 |
|
|
|
169 |
|
|
void pthread_testcancel(void) |
170 |
|
|
{ |
171 |
|
|
} |
172 |
|
|
|
173 |
|
|
|
174 |
|
|
/* |
175 |
|
|
* Spinlocks |
176 |
|
|
*/ |
177 |
|
|
|
178 |
gbeauche |
1.3 |
static int try_acquire_spinlock(int *lock) |
179 |
|
|
{ |
180 |
|
|
return test_and_set(lock, 1) == 0; |
181 |
|
|
} |
182 |
|
|
|
183 |
cebix |
1.1 |
static void acquire_spinlock(volatile int *lock) |
184 |
|
|
{ |
185 |
|
|
do { |
186 |
|
|
while (*lock) ; |
187 |
|
|
} while (test_and_set((int *)lock, 1) != 0); |
188 |
|
|
} |
189 |
|
|
|
190 |
|
|
static void release_spinlock(int *lock) |
191 |
|
|
{ |
192 |
|
|
*lock = 0; |
193 |
gbeauche |
1.3 |
} |
194 |
|
|
|
195 |
|
|
|
196 |
|
|
/* |
197 |
|
|
* Initialize mutex |
198 |
|
|
*/ |
199 |
|
|
|
200 |
|
|
int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *mutex_attr) |
201 |
|
|
{ |
202 |
|
|
// pthread_init_lock |
203 |
|
|
mutex->__m_lock.__status = 0; |
204 |
|
|
mutex->__m_lock.__spinlock = 0; |
205 |
|
|
|
206 |
|
|
mutex->__m_kind = mutex_attr ? mutex_attr->__mutexkind : PTHREAD_MUTEX_TIMED_NP; |
207 |
|
|
mutex->__m_count = 0; |
208 |
|
|
mutex->__m_owner = NULL; |
209 |
|
|
return 0; |
210 |
|
|
} |
211 |
|
|
|
212 |
|
|
|
213 |
|
|
/* |
214 |
|
|
* Destroy mutex |
215 |
|
|
*/ |
216 |
|
|
|
217 |
|
|
int pthread_mutex_destroy(pthread_mutex_t *mutex) |
218 |
|
|
{ |
219 |
|
|
switch (mutex->__m_kind) { |
220 |
|
|
case PTHREAD_MUTEX_TIMED_NP: |
221 |
|
|
return (mutex->__m_lock.__status != 0) ? EBUSY : 0; |
222 |
|
|
default: |
223 |
|
|
return EINVAL; |
224 |
|
|
} |
225 |
|
|
} |
226 |
|
|
|
227 |
|
|
|
228 |
|
|
/* |
229 |
|
|
* Lock mutex |
230 |
|
|
*/ |
231 |
|
|
|
232 |
|
|
int pthread_mutex_lock(pthread_mutex_t *mutex) |
233 |
|
|
{ |
234 |
|
|
switch (mutex->__m_kind) { |
235 |
|
|
case PTHREAD_MUTEX_TIMED_NP: |
236 |
|
|
acquire_spinlock(&mutex->__m_lock.__spinlock); |
237 |
|
|
return 0; |
238 |
|
|
default: |
239 |
|
|
return EINVAL; |
240 |
|
|
} |
241 |
|
|
} |
242 |
|
|
|
243 |
|
|
|
244 |
|
|
/* |
245 |
|
|
* Try to lock mutex |
246 |
|
|
*/ |
247 |
|
|
|
248 |
|
|
int pthread_mutex_trylock(pthread_mutex_t *mutex) |
249 |
|
|
{ |
250 |
|
|
switch (mutex->__m_kind) { |
251 |
|
|
case PTHREAD_MUTEX_TIMED_NP: |
252 |
|
|
if (!try_acquire_spinlock(&mutex->__m_lock.__spinlock)) |
253 |
|
|
return EBUSY; |
254 |
|
|
return 0; |
255 |
|
|
default: |
256 |
|
|
return EINVAL; |
257 |
|
|
} |
258 |
|
|
} |
259 |
|
|
|
260 |
|
|
|
261 |
|
|
/* |
262 |
|
|
* Unlock mutex |
263 |
|
|
*/ |
264 |
|
|
|
265 |
|
|
int pthread_mutex_unlock(pthread_mutex_t *mutex) |
266 |
|
|
{ |
267 |
|
|
switch (mutex->__m_kind) { |
268 |
|
|
case PTHREAD_MUTEX_TIMED_NP: |
269 |
|
|
release_spinlock(&mutex->__m_lock.__spinlock); |
270 |
|
|
return 0; |
271 |
|
|
default: |
272 |
|
|
return EINVAL; |
273 |
|
|
} |
274 |
|
|
} |
275 |
|
|
|
276 |
|
|
|
277 |
|
|
/* |
278 |
|
|
* Create mutex attribute |
279 |
|
|
*/ |
280 |
|
|
|
281 |
|
|
int pthread_mutexattr_init(pthread_mutexattr_t *attr) |
282 |
|
|
{ |
283 |
|
|
attr->__mutexkind = PTHREAD_MUTEX_TIMED_NP; |
284 |
|
|
return 0; |
285 |
|
|
} |
286 |
|
|
|
287 |
|
|
|
288 |
|
|
/* |
289 |
|
|
* Destroy mutex attribute |
290 |
|
|
*/ |
291 |
|
|
|
292 |
|
|
int pthread_mutexattr_destroy(pthread_mutexattr_t *attr) |
293 |
|
|
{ |
294 |
|
|
return 0; |
295 |
cebix |
1.1 |
} |
296 |
|
|
|
297 |
|
|
|
298 |
|
|
/* |
299 |
|
|
* Init semaphore |
300 |
|
|
*/ |
301 |
|
|
|
302 |
|
|
int sem_init(sem_t *sem, int pshared, unsigned int value) |
303 |
|
|
{ |
304 |
|
|
sem->sem_lock.status = 0; |
305 |
|
|
sem->sem_lock.spinlock = 0; |
306 |
|
|
sem->sem_value = value; |
307 |
|
|
sem->sem_waiting = NULL; |
308 |
|
|
return 0; |
309 |
|
|
} |
310 |
|
|
|
311 |
|
|
|
312 |
|
|
/* |
313 |
|
|
* Delete remaphore |
314 |
|
|
*/ |
315 |
|
|
|
316 |
|
|
int sem_destroy(sem_t *sem) |
317 |
|
|
{ |
318 |
|
|
return 0; |
319 |
|
|
} |
320 |
|
|
|
321 |
|
|
|
322 |
|
|
/* |
323 |
|
|
* Wait on semaphore |
324 |
|
|
*/ |
325 |
|
|
|
326 |
|
|
void null_handler(int sig) |
327 |
|
|
{ |
328 |
|
|
} |
329 |
|
|
|
330 |
|
|
int sem_wait(sem_t *sem) |
331 |
|
|
{ |
332 |
|
|
acquire_spinlock(&sem->sem_lock.spinlock); |
333 |
gbeauche |
1.4 |
if (sem->sem_value > 0) |
334 |
|
|
atomic_add((int *)&sem->sem_value, -1); |
335 |
|
|
else { |
336 |
cebix |
1.1 |
sigset_t mask; |
337 |
|
|
if (!sem->sem_lock.status) { |
338 |
|
|
struct sigaction sa; |
339 |
|
|
sem->sem_lock.status = SIGUSR2; |
340 |
|
|
sa.sa_handler = null_handler; |
341 |
|
|
sa.sa_flags = SA_RESTART; |
342 |
|
|
sigemptyset(&sa.sa_mask); |
343 |
|
|
sigaction(sem->sem_lock.status, &sa, NULL); |
344 |
|
|
} |
345 |
|
|
sem->sem_waiting = (struct _pthread_descr_struct *)getpid(); |
346 |
|
|
sigemptyset(&mask); |
347 |
|
|
sigsuspend(&mask); |
348 |
|
|
sem->sem_waiting = NULL; |
349 |
|
|
} |
350 |
|
|
release_spinlock(&sem->sem_lock.spinlock); |
351 |
|
|
return 0; |
352 |
|
|
} |
353 |
|
|
|
354 |
|
|
|
355 |
|
|
/* |
356 |
|
|
* Post semaphore |
357 |
|
|
*/ |
358 |
|
|
|
359 |
|
|
int sem_post(sem_t *sem) |
360 |
|
|
{ |
361 |
|
|
acquire_spinlock(&sem->sem_lock.spinlock); |
362 |
gbeauche |
1.4 |
if (sem->sem_waiting == NULL) |
363 |
|
|
atomic_add((int *)&sem->sem_value, 1); |
364 |
|
|
else |
365 |
cebix |
1.1 |
kill((pid_t)sem->sem_waiting, sem->sem_lock.status); |
366 |
|
|
release_spinlock(&sem->sem_lock.spinlock); |
367 |
|
|
return 0; |
368 |
|
|
} |
369 |
gbeauche |
1.4 |
|
370 |
|
|
|
371 |
|
|
/* |
372 |
|
|
* Simple producer/consumer test program |
373 |
|
|
*/ |
374 |
|
|
|
375 |
|
|
#ifdef TEST |
376 |
|
|
#include <stdio.h> |
377 |
|
|
|
378 |
|
|
static sem_t p_sem, c_sem; |
379 |
|
|
static int data = 0; |
380 |
|
|
|
381 |
|
|
static void *producer_func(void *arg) |
382 |
|
|
{ |
383 |
|
|
int i, n = (int)arg; |
384 |
|
|
for (i = 0; i < n; i++) { |
385 |
|
|
sem_wait(&p_sem); |
386 |
|
|
data++; |
387 |
|
|
sem_post(&c_sem); |
388 |
|
|
} |
389 |
|
|
return NULL; |
390 |
|
|
} |
391 |
|
|
|
392 |
|
|
static void *consumer_func(void *arg) |
393 |
|
|
{ |
394 |
|
|
int i, n = (int)arg; |
395 |
|
|
for (i = 0; i < n; i++) { |
396 |
|
|
sem_wait(&c_sem); |
397 |
|
|
printf("data: %d\n", data); |
398 |
|
|
sem_post(&p_sem); |
399 |
|
|
} |
400 |
|
|
sleep(1); // for testing pthread_join() |
401 |
|
|
return NULL; |
402 |
|
|
} |
403 |
|
|
|
404 |
|
|
int main(void) |
405 |
|
|
{ |
406 |
|
|
pthread_t producer_thread, consumer_thread; |
407 |
|
|
static const int N = 5; |
408 |
|
|
|
409 |
|
|
if (sem_init(&c_sem, 0, 0) < 0) |
410 |
|
|
return 1; |
411 |
|
|
if (sem_init(&p_sem, 0, 1) < 0) |
412 |
|
|
return 2; |
413 |
|
|
if (pthread_create(&producer_thread, NULL, producer_func, (void *)N) != 0) |
414 |
|
|
return 3; |
415 |
|
|
if (pthread_create(&consumer_thread, NULL, consumer_func, (void *)N) != 0) |
416 |
|
|
return 4; |
417 |
|
|
pthread_join(producer_thread, NULL); |
418 |
|
|
pthread_join(consumer_thread, NULL); |
419 |
|
|
sem_destroy(&p_sem); |
420 |
|
|
sem_destroy(&c_sem); |
421 |
|
|
if (data != N) |
422 |
|
|
return 5; |
423 |
|
|
return 0; |
424 |
|
|
} |
425 |
|
|
#endif |