224 |
|
static void *tick_func(void *arg); |
225 |
|
#if EMULATED_PPC |
226 |
|
static void sigusr2_handler(int sig); |
227 |
+ |
extern void emul_ppc(uint32 start); |
228 |
+ |
extern void init_emul_ppc(void); |
229 |
+ |
extern void exit_emul_ppc(void); |
230 |
|
#else |
231 |
|
static void sigusr2_handler(int sig, sigcontext_struct *sc); |
232 |
|
static void sigsegv_handler(int sig, sigcontext_struct *sc); |
235 |
|
|
236 |
|
|
237 |
|
// From asm_linux.S |
238 |
< |
#if EMULATED_PPC |
236 |
< |
extern int atomic_add(int *var, int v); |
237 |
< |
extern int atomic_and(int *var, int v); |
238 |
< |
extern int atomic_or(int *var, int v); |
239 |
< |
#else |
238 |
> |
#if !EMULATED_PPC |
239 |
|
extern "C" void *get_toc(void); |
240 |
|
extern "C" void *get_sp(void); |
241 |
|
extern "C" void flush_icache_range(void *start, void *end); |
250 |
|
#endif |
251 |
|
|
252 |
|
|
253 |
+ |
#if EMULATED_PPC |
254 |
+ |
/* |
255 |
+ |
* Atomic operations |
256 |
+ |
*/ |
257 |
+ |
|
258 |
+ |
#if HAVE_SPINLOCKS |
259 |
+ |
static spinlock_t atomic_ops_lock = SPIN_LOCK_UNLOCKED; |
260 |
+ |
#else |
261 |
+ |
#define spin_lock(LOCK) |
262 |
+ |
#define spin_unlock(LOCK) |
263 |
+ |
#endif |
264 |
+ |
|
265 |
+ |
int atomic_add(int *var, int v) |
266 |
+ |
{ |
267 |
+ |
spin_lock(&atomic_ops_lock); |
268 |
+ |
int ret = *var; |
269 |
+ |
*var += v; |
270 |
+ |
spin_unlock(&atomic_ops_lock); |
271 |
+ |
return ret; |
272 |
+ |
} |
273 |
+ |
|
274 |
+ |
int atomic_and(int *var, int v) |
275 |
+ |
{ |
276 |
+ |
spin_lock(&atomic_ops_lock); |
277 |
+ |
int ret = *var; |
278 |
+ |
*var &= v; |
279 |
+ |
spin_unlock(&atomic_ops_lock); |
280 |
+ |
return ret; |
281 |
+ |
} |
282 |
+ |
|
283 |
+ |
int atomic_or(int *var, int v) |
284 |
+ |
{ |
285 |
+ |
spin_lock(&atomic_ops_lock); |
286 |
+ |
int ret = *var; |
287 |
+ |
*var |= v; |
288 |
+ |
spin_unlock(&atomic_ops_lock); |
289 |
+ |
return ret; |
290 |
+ |
} |
291 |
+ |
#endif |
292 |
+ |
|
293 |
+ |
|
294 |
|
/* |
295 |
|
* Main program |
296 |
|
*/ |
782 |
|
|
783 |
|
static void Quit(void) |
784 |
|
{ |
785 |
+ |
#if EMULATED_PPC |
786 |
+ |
// Exit PowerPC emulation |
787 |
+ |
exit_emul_ppc(); |
788 |
+ |
#endif |
789 |
+ |
|
790 |
|
// Stop 60Hz thread |
791 |
|
if (tick_thread_active) { |
792 |
|
pthread_cancel(tick_thread); |
890 |
|
*/ |
891 |
|
|
892 |
|
#if EMULATED_PPC |
848 |
– |
extern void emul_ppc(uint32 start); |
849 |
– |
extern void init_emul_ppc(void); |
893 |
|
void jump_to_rom(uint32 entry) |
894 |
|
{ |
895 |
|
init_emul_ppc(); |
1152 |
|
|
1153 |
|
void Set_pthread_attr(pthread_attr_t *attr, int priority) |
1154 |
|
{ |
1155 |
< |
// nothing to do |
1155 |
> |
#ifdef HAVE_PTHREADS |
1156 |
> |
pthread_attr_init(attr); |
1157 |
> |
#if defined(_POSIX_THREAD_PRIORITY_SCHEDULING) |
1158 |
> |
// Some of these only work for superuser |
1159 |
> |
if (geteuid() == 0) { |
1160 |
> |
pthread_attr_setinheritsched(attr, PTHREAD_EXPLICIT_SCHED); |
1161 |
> |
pthread_attr_setschedpolicy(attr, SCHED_FIFO); |
1162 |
> |
struct sched_param fifo_param; |
1163 |
> |
fifo_param.sched_priority = ((sched_get_priority_min(SCHED_FIFO) + |
1164 |
> |
sched_get_priority_max(SCHED_FIFO)) / 2 + |
1165 |
> |
priority); |
1166 |
> |
pthread_attr_setschedparam(attr, &fifo_param); |
1167 |
> |
} |
1168 |
> |
if (pthread_attr_setscope(attr, PTHREAD_SCOPE_SYSTEM) != 0) { |
1169 |
> |
#ifdef PTHREAD_SCOPE_BOUND_NP |
1170 |
> |
// If system scope is not available (eg. we're not running |
1171 |
> |
// with CAP_SCHED_MGT capability on an SGI box), try bound |
1172 |
> |
// scope. It exposes pthread scheduling to the kernel, |
1173 |
> |
// without setting realtime priority. |
1174 |
> |
pthread_attr_setscope(attr, PTHREAD_SCOPE_BOUND_NP); |
1175 |
> |
#endif |
1176 |
> |
} |
1177 |
> |
#endif |
1178 |
> |
#endif |
1179 |
|
} |
1180 |
|
|
1181 |
|
|