ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.33
Committed: 2011-12-30T20:37:30Z (12 years, 8 months ago) by asvitkine
Branch: MAIN
Changes since 1.32: +19 -0 lines
Log Message:
Fix CrashReporter poping up when PPC is not emulated on pre-10.5 systems.

File Contents

# User Rev Content
1 gbeauche 1.1 /*
2     * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3     * (supports mmap, vm_allocate or fallbacks to malloc)
4     *
5 gbeauche 1.27 * Basilisk II (C) 1997-2008 Christian Bauer
6 gbeauche 1.1 *
7     * This program is free software; you can redistribute it and/or modify
8     * it under the terms of the GNU General Public License as published by
9     * the Free Software Foundation; either version 2 of the License, or
10     * (at your option) any later version.
11     *
12     * This program is distributed in the hope that it will be useful,
13     * but WITHOUT ANY WARRANTY; without even the implied warranty of
14     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15     * GNU General Public License for more details.
16     *
17     * You should have received a copy of the GNU General Public License
18     * along with this program; if not, write to the Free Software
19     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20     */
21    
22     #ifdef HAVE_CONFIG_H
23     #include "config.h"
24     #endif
25    
26 gbeauche 1.13 #ifdef HAVE_FCNTL_H
27     #include <fcntl.h>
28     #endif
29    
30 gbeauche 1.14 #ifdef HAVE_WIN32_VM
31     #define WIN32_LEAN_AND_MEAN /* avoid including junk */
32     #include <windows.h>
33     #endif
34    
35 asvitkine 1.32 #include <errno.h>
36 gbeauche 1.13 #include <stdio.h>
37 gbeauche 1.1 #include <stdlib.h>
38     #include <string.h>
39 gbeauche 1.13 #include <limits.h>
40 gbeauche 1.1 #include "vm_alloc.h"
41    
42 asvitkine 1.33 #if defined(__APPLE__) && defined(__MACH__)
43     #include <sys/utsname.h>
44     #endif
45    
46 gbeauche 1.1 #ifdef HAVE_MACH_VM
47     #ifndef HAVE_MACH_TASK_SELF
48     #ifdef HAVE_TASK_SELF
49     #define mach_task_self task_self
50     #else
51     #error "No task_self(), you lose."
52     #endif
53     #endif
54     #endif
55    
56 gbeauche 1.28 #ifdef HAVE_WIN32_VM
57     /* Windows is either ILP32 or LLP64 */
58     typedef UINT_PTR vm_uintptr_t;
59     #else
60     /* Other systems are sane as they are either ILP32 or LP64 */
61     typedef unsigned long vm_uintptr_t;
62     #endif
63    
64 gbeauche 1.9 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
65     because the emulated target is 32-bit and this helps to allocate
66     memory so that branches could be resolved more easily (32-bit
67     displacement to code in .text), on AMD64 for example. */
68 gbeauche 1.31 #if defined(__hpux)
69     #define MAP_32BIT MAP_ADDR32
70     #endif
71 gbeauche 1.9 #ifndef MAP_32BIT
72     #define MAP_32BIT 0
73     #endif
74 gbeauche 1.13 #ifndef MAP_ANON
75     #define MAP_ANON 0
76     #endif
77     #ifndef MAP_ANONYMOUS
78     #define MAP_ANONYMOUS 0
79     #endif
80 gbeauche 1.9
81     #define MAP_EXTRA_FLAGS (MAP_32BIT)
82    
83 gbeauche 1.1 #ifdef HAVE_MMAP_VM
84 gbeauche 1.23 #if (defined(__linux__) && defined(__i386__)) || HAVE_LINKER_SCRIPT
85 gbeauche 1.9 /* Force a reasonnable address below 0x80000000 on x86 so that we
86     don't get addresses above when the program is run on AMD64.
87     NOTE: this is empirically determined on Linux/x86. */
88     #define MAP_BASE 0x10000000
89     #else
90     #define MAP_BASE 0x00000000
91     #endif
92     static char * next_address = (char *)MAP_BASE;
93 gbeauche 1.1 #ifdef HAVE_MMAP_ANON
94 gbeauche 1.10 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
95 gbeauche 1.1 #define zero_fd -1
96     #else
97     #ifdef HAVE_MMAP_ANONYMOUS
98 gbeauche 1.10 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
99 gbeauche 1.1 #define zero_fd -1
100     #else
101 gbeauche 1.10 #define map_flags (MAP_EXTRA_FLAGS)
102 gbeauche 1.1 static int zero_fd = -1;
103     #endif
104     #endif
105     #endif
106    
107 gbeauche 1.10 /* Translate generic VM map flags to host values. */
108    
109     #ifdef HAVE_MMAP_VM
110     static int translate_map_flags(int vm_flags)
111     {
112     int flags = 0;
113     if (vm_flags & VM_MAP_SHARED)
114     flags |= MAP_SHARED;
115     if (vm_flags & VM_MAP_PRIVATE)
116     flags |= MAP_PRIVATE;
117     if (vm_flags & VM_MAP_FIXED)
118     flags |= MAP_FIXED;
119     if (vm_flags & VM_MAP_32BIT)
120     flags |= MAP_32BIT;
121     return flags;
122     }
123     #endif
124    
125 gbeauche 1.14 /* Align ADDR and SIZE to 64K boundaries. */
126    
127     #ifdef HAVE_WIN32_VM
128     static inline LPVOID align_addr_segment(LPVOID addr)
129     {
130 gbeauche 1.28 return (LPVOID)(((vm_uintptr_t)addr) & -((vm_uintptr_t)65536));
131 gbeauche 1.14 }
132    
133     static inline DWORD align_size_segment(LPVOID addr, DWORD size)
134     {
135 gbeauche 1.28 return size + ((vm_uintptr_t)addr - (vm_uintptr_t)align_addr_segment(addr));
136 gbeauche 1.14 }
137     #endif
138    
139     /* Translate generic VM prot flags to host values. */
140    
141     #ifdef HAVE_WIN32_VM
142     static int translate_prot_flags(int prot_flags)
143     {
144     int prot = PAGE_READWRITE;
145     if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
146     prot = PAGE_EXECUTE_READWRITE;
147     else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
148     prot = PAGE_EXECUTE_READ;
149     else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
150     prot = PAGE_READWRITE;
151     else if (prot_flags == VM_PAGE_READ)
152     prot = PAGE_READONLY;
153     else if (prot_flags == 0)
154     prot = PAGE_NOACCESS;
155     return prot;
156     }
157     #endif
158    
159 asvitkine 1.32 /* Translate Mach return codes to POSIX errno values. */
160     #ifdef HAVE_MACH_VM
161     static int vm_error(kern_return_t ret_code)
162     {
163     switch (ret_code) {
164     case KERN_SUCCESS:
165     return 0;
166     case KERN_INVALID_ADDRESS:
167     case KERN_NO_SPACE:
168     return ENOMEM;
169     case KERN_PROTECTION_FAILURE:
170     return EACCES;
171     default:
172     return EINVAL;
173     }
174     }
175     #endif
176    
177 gbeauche 1.1 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
178    
179     int vm_init(void)
180     {
181     #ifdef HAVE_MMAP_VM
182     #ifndef zero_fd
183     zero_fd = open("/dev/zero", O_RDWR);
184     if (zero_fd < 0)
185     return -1;
186     #endif
187     #endif
188 asvitkine 1.33
189     // On 10.4 and earlier, reset CrashReporter's task signal handler to
190     // avoid having it show up for signals that get handled.
191     #if defined(__APPLE__) && defined(__MACH__)
192     struct utsname info;
193    
194     if (!uname(&info) && atoi(info.release) <= 8) {
195     task_set_exception_ports(mach_task_self(),
196     EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
197     MACH_PORT_NULL,
198     EXCEPTION_STATE_IDENTITY,
199     MACHINE_THREAD_STATE);
200     }
201     #endif
202    
203 gbeauche 1.1 return 0;
204     }
205    
206     /* Deallocate all internal data used to wrap virtual memory allocators. */
207    
208     void vm_exit(void)
209     {
210     #ifdef HAVE_MMAP_VM
211     #ifndef zero_fd
212 gbeauche 1.19 if (zero_fd != -1) {
213     close(zero_fd);
214     zero_fd = -1;
215     }
216 gbeauche 1.1 #endif
217     #endif
218     }
219    
220     /* Allocate zero-filled memory of SIZE bytes. The mapping is private
221     and default protection bits are read / write. The return value
222     is the actual mapping address chosen or VM_MAP_FAILED for errors. */
223    
224 gbeauche 1.10 void * vm_acquire(size_t size, int options)
225 gbeauche 1.1 {
226     void * addr;
227 asvitkine 1.32
228     errno = 0;
229 gbeauche 1.10
230     // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
231     if (options & VM_MAP_FIXED)
232     return VM_MAP_FAILED;
233    
234 gbeauche 1.29 #ifndef HAVE_VM_WRITE_WATCH
235     if (options & VM_MAP_WRITE_WATCH)
236     return VM_MAP_FAILED;
237     #endif
238    
239 gbeauche 1.1 #ifdef HAVE_MACH_VM
240     // vm_allocate() returns a zero-filled memory region
241 asvitkine 1.32 kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE);
242     if (ret_code != KERN_SUCCESS) {
243     errno = vm_error(ret_code);
244 gbeauche 1.1 return VM_MAP_FAILED;
245 asvitkine 1.32 }
246 gbeauche 1.1 #else
247     #ifdef HAVE_MMAP_VM
248 gbeauche 1.13 int fd = zero_fd;
249     int the_map_flags = translate_map_flags(options) | map_flags;
250    
251     if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
252 gbeauche 1.1 return VM_MAP_FAILED;
253    
254 gbeauche 1.10 // Sanity checks for 64-bit platforms
255     if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
256     return VM_MAP_FAILED;
257    
258 gbeauche 1.3 next_address = (char *)addr + size;
259 gbeauche 1.1 #else
260 gbeauche 1.14 #ifdef HAVE_WIN32_VM
261 gbeauche 1.29 int alloc_type = MEM_RESERVE | MEM_COMMIT;
262     if (options & VM_MAP_WRITE_WATCH)
263     alloc_type |= MEM_WRITE_WATCH;
264    
265     if ((addr = VirtualAlloc(NULL, size, alloc_type, PAGE_EXECUTE_READWRITE)) == NULL)
266 gbeauche 1.14 return VM_MAP_FAILED;
267     #else
268 gbeauche 1.1 if ((addr = calloc(size, 1)) == 0)
269     return VM_MAP_FAILED;
270    
271     // Omit changes for protections because they are not supported in this mode
272     return addr;
273     #endif
274     #endif
275 gbeauche 1.14 #endif
276 cebix 1.2
277 gbeauche 1.1 // Explicitely protect the newly mapped region here because on some systems,
278     // say MacOS X, mmap() doesn't honour the requested protection flags.
279     if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
280     return VM_MAP_FAILED;
281    
282     return addr;
283     }
284    
285     /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
286     Retuns 0 if successful, -1 on errors. */
287    
288 gbeauche 1.10 int vm_acquire_fixed(void * addr, size_t size, int options)
289 gbeauche 1.1 {
290 asvitkine 1.32 errno = 0;
291    
292 gbeauche 1.10 // Fixed mappings are required to be private
293     if (options & VM_MAP_SHARED)
294     return -1;
295    
296 gbeauche 1.29 #ifndef HAVE_VM_WRITE_WATCH
297     if (options & VM_MAP_WRITE_WATCH)
298     return -1;
299     #endif
300    
301 gbeauche 1.1 #ifdef HAVE_MACH_VM
302     // vm_allocate() returns a zero-filled memory region
303 asvitkine 1.32 kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0);
304     if (ret_code != KERN_SUCCESS) {
305     errno = vm_error(ret_code);
306 gbeauche 1.1 return -1;
307 asvitkine 1.32 }
308 gbeauche 1.1 #else
309     #ifdef HAVE_MMAP_VM
310 gbeauche 1.21 int fd = zero_fd;
311     int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED;
312 gbeauche 1.10
313 gbeauche 1.21 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
314 gbeauche 1.1 return -1;
315     #else
316 gbeauche 1.14 #ifdef HAVE_WIN32_VM
317     // Windows cannot allocate Low Memory
318     if (addr == NULL)
319     return -1;
320    
321 gbeauche 1.29 int alloc_type = MEM_RESERVE | MEM_COMMIT;
322     if (options & VM_MAP_WRITE_WATCH)
323     alloc_type |= MEM_WRITE_WATCH;
324    
325 gbeauche 1.14 // Allocate a possibly offset region to align on 64K boundaries
326     LPVOID req_addr = align_addr_segment(addr);
327     DWORD req_size = align_size_segment(addr, size);
328 gbeauche 1.29 LPVOID ret_addr = VirtualAlloc(req_addr, req_size, alloc_type, PAGE_EXECUTE_READWRITE);
329 gbeauche 1.14 if (ret_addr != req_addr)
330     return -1;
331     #else
332 gbeauche 1.1 // Unsupported
333     return -1;
334     #endif
335     #endif
336 gbeauche 1.14 #endif
337 gbeauche 1.1
338     // Explicitely protect the newly mapped region here because on some systems,
339     // say MacOS X, mmap() doesn't honour the requested protection flags.
340 gbeauche 1.6 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
341 gbeauche 1.1 return -1;
342    
343     return 0;
344     }
345    
346     /* Deallocate any mapping for the region starting at ADDR and extending
347     LEN bytes. Returns 0 if successful, -1 on errors. */
348    
349     int vm_release(void * addr, size_t size)
350     {
351 gbeauche 1.3 // Safety check: don't try to release memory that was not allocated
352     if (addr == VM_MAP_FAILED)
353     return 0;
354    
355 gbeauche 1.1 #ifdef HAVE_MACH_VM
356 gbeauche 1.4 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
357     return -1;
358 gbeauche 1.1 #else
359     #ifdef HAVE_MMAP_VM
360 gbeauche 1.7 if (munmap((caddr_t)addr, size) != 0)
361 gbeauche 1.4 return -1;
362 gbeauche 1.1 #else
363 gbeauche 1.14 #ifdef HAVE_WIN32_VM
364     if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
365     return -1;
366     #else
367 gbeauche 1.1 free(addr);
368     #endif
369     #endif
370 gbeauche 1.14 #endif
371 gbeauche 1.4
372     return 0;
373 gbeauche 1.1 }
374    
375     /* Change the memory protection of the region starting at ADDR and
376     extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
377    
378     int vm_protect(void * addr, size_t size, int prot)
379     {
380     #ifdef HAVE_MACH_VM
381     int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
382     return ret_code == KERN_SUCCESS ? 0 : -1;
383     #else
384     #ifdef HAVE_MMAP_VM
385 gbeauche 1.7 int ret_code = mprotect((caddr_t)addr, size, prot);
386 gbeauche 1.1 return ret_code == 0 ? 0 : -1;
387     #else
388 gbeauche 1.14 #ifdef HAVE_WIN32_VM
389     DWORD old_prot;
390     int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
391     return ret_code != 0 ? 0 : -1;
392     #else
393 gbeauche 1.1 // Unsupported
394     return -1;
395     #endif
396     #endif
397 gbeauche 1.14 #endif
398 gbeauche 1.1 }
399    
400 gbeauche 1.29 /* Return the addresses of the pages that got modified in the
401     specified range [ ADDR, ADDR + SIZE [ since the last reset of the watch
402     bits. Returns 0 if successful, -1 for errors. */
403    
404     int vm_get_write_watch(void * addr, size_t size,
405     void ** pages, unsigned int * n_pages,
406     int options)
407     {
408     #ifdef HAVE_VM_WRITE_WATCH
409     #ifdef HAVE_WIN32_VM
410     DWORD flags = 0;
411     if (options & VM_WRITE_WATCH_RESET)
412     flags |= WRITE_WATCH_FLAG_RESET;
413    
414     ULONG page_size;
415 gbeauche 1.30 ULONG_PTR count = *n_pages;
416 gbeauche 1.29 int ret_code = GetWriteWatch(flags, addr, size, pages, &count, &page_size);
417     if (ret_code != 0)
418     return -1;
419    
420     *n_pages = count;
421     return 0;
422     #endif
423     #endif
424     // Unsupported
425     return -1;
426     }
427    
428     /* Reset the write-tracking state for the specified range [ ADDR, ADDR
429     + SIZE [. Returns 0 if successful, -1 for errors. */
430    
431     int vm_reset_write_watch(void * addr, size_t size)
432     {
433     #ifdef HAVE_VM_WRITE_WATCH
434     #ifdef HAVE_WIN32_VM
435     int ret_code = ResetWriteWatch(addr, size);
436     return ret_code == 0 ? 0 : -1;
437     #endif
438     #endif
439     // Unsupported
440     return -1;
441     }
442    
443 gbeauche 1.15 /* Returns the size of a page. */
444    
445 gbeauche 1.16 int vm_get_page_size(void)
446 gbeauche 1.15 {
447 gbeauche 1.20 #ifdef HAVE_WIN32_VM
448 gbeauche 1.28 static vm_uintptr_t page_size = 0;
449 gbeauche 1.20 if (page_size == 0) {
450     SYSTEM_INFO si;
451     GetSystemInfo(&si);
452     page_size = si.dwAllocationGranularity;
453     }
454     return page_size;
455 gbeauche 1.15 #else
456 gbeauche 1.20 return getpagesize();
457 gbeauche 1.15 #endif
458     }
459    
460 gbeauche 1.29 #ifdef CONFIGURE_TEST_VM_WRITE_WATCH
461     int main(void)
462     {
463     int i, j;
464    
465     vm_init();
466    
467     vm_uintptr_t page_size = vm_get_page_size();
468    
469     char *area;
470     const int n_pages = 7;
471     const int area_size = n_pages * page_size;
472     const int map_options = VM_MAP_DEFAULT | VM_MAP_WRITE_WATCH;
473     if ((area = (char *)vm_acquire(area_size, map_options)) == VM_MAP_FAILED)
474     return 1;
475    
476     unsigned int n_modified_pages_expected = 0;
477     static const int touch_page[n_pages] = { 0, 1, 1, 0, 1, 0, 1 };
478     for (i = 0; i < n_pages; i++) {
479     if (touch_page[i]) {
480     area[i * page_size] = 1;
481     ++n_modified_pages_expected;
482     }
483     }
484    
485     char *modified_pages[n_pages];
486     unsigned int n_modified_pages = n_pages;
487     if (vm_get_write_watch(area, area_size, (void **)modified_pages, &n_modified_pages) < 0)
488     return 2;
489     if (n_modified_pages != n_modified_pages_expected)
490     return 3;
491     for (i = 0, j = 0; i < n_pages; i++) {
492     char v = area[i * page_size];
493     if ((touch_page[i] && !v) || (!touch_page[i] && v))
494     return 4;
495     if (!touch_page[i])
496     continue;
497     if (modified_pages[j] != (area + i * page_size))
498     return 5;
499     ++j;
500     }
501    
502     vm_release(area, area_size);
503     return 0;
504     }
505     #endif
506    
507 gbeauche 1.1 #ifdef CONFIGURE_TEST_VM_MAP
508 gbeauche 1.18 #include <stdlib.h>
509     #include <signal.h>
510    
511     static void fault_handler(int sig)
512     {
513     exit(1);
514     }
515    
516 gbeauche 1.1 /* Tests covered here:
517     - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
518     - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
519     */
520     int main(void)
521     {
522     vm_init();
523 gbeauche 1.18
524     signal(SIGSEGV, fault_handler);
525     #ifdef SIGBUS
526     signal(SIGBUS, fault_handler);
527     #endif
528 gbeauche 1.1
529 gbeauche 1.28 #define page_align(address) ((char *)((vm_uintptr_t)(address) & -page_size))
530     vm_uintptr_t page_size = vm_get_page_size();
531 gbeauche 1.1
532     const int area_size = 6 * page_size;
533     volatile char * area = (volatile char *) vm_acquire(area_size);
534     volatile char * fault_address = area + (page_size * 7) / 2;
535    
536     #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
537     if (area == VM_MAP_FAILED)
538     return 1;
539    
540     if (vm_release((char *)area, area_size) < 0)
541     return 1;
542    
543     return 0;
544     #endif
545    
546     #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
547     if (area == VM_MAP_FAILED)
548     return 0;
549    
550     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
551     return 0;
552     #endif
553    
554     #if defined(TEST_VM_PROT_RDWR_WRITE)
555     if (area == VM_MAP_FAILED)
556     return 1;
557    
558     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
559     return 1;
560    
561     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
562     return 1;
563     #endif
564    
565     #if defined(TEST_VM_PROT_READ_WRITE)
566     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
567     return 0;
568     #endif
569    
570     #if defined(TEST_VM_PROT_NONE_READ)
571     // this should cause a core dump
572     char foo = *fault_address;
573     return 0;
574     #endif
575    
576     #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
577     // this should cause a core dump
578     *fault_address = 'z';
579     return 0;
580     #endif
581    
582     #if defined(TEST_VM_PROT_RDWR_WRITE)
583     // this should not cause a core dump
584     *fault_address = 'z';
585     return 0;
586     #endif
587     }
588     #endif