ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.21
Committed: 2006-02-26T23:45:07Z (18 years, 6 months ago) by gbeauche
Branch: MAIN
Changes since 1.20: +32 -4 lines
Log Message:
add 33-bit addressing support to vm_acquire_fixed()

File Contents

# User Rev Content
1 gbeauche 1.1 /*
2     * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3     * (supports mmap, vm_allocate or fallbacks to malloc)
4     *
5 gbeauche 1.17 * Basilisk II (C) 1997-2005 Christian Bauer
6 gbeauche 1.1 *
7     * This program is free software; you can redistribute it and/or modify
8     * it under the terms of the GNU General Public License as published by
9     * the Free Software Foundation; either version 2 of the License, or
10     * (at your option) any later version.
11     *
12     * This program is distributed in the hope that it will be useful,
13     * but WITHOUT ANY WARRANTY; without even the implied warranty of
14     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15     * GNU General Public License for more details.
16     *
17     * You should have received a copy of the GNU General Public License
18     * along with this program; if not, write to the Free Software
19     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20     */
21    
22     #ifdef HAVE_CONFIG_H
23     #include "config.h"
24     #endif
25    
26 gbeauche 1.13 #ifdef HAVE_FCNTL_H
27     #include <fcntl.h>
28     #endif
29    
30 gbeauche 1.14 #ifdef HAVE_WIN32_VM
31     #define WIN32_LEAN_AND_MEAN /* avoid including junk */
32     #include <windows.h>
33     #endif
34    
35 gbeauche 1.13 #include <stdio.h>
36 gbeauche 1.1 #include <stdlib.h>
37     #include <string.h>
38 gbeauche 1.13 #include <limits.h>
39 gbeauche 1.1 #include "vm_alloc.h"
40    
41     #ifdef HAVE_MACH_VM
42     #ifndef HAVE_MACH_TASK_SELF
43     #ifdef HAVE_TASK_SELF
44     #define mach_task_self task_self
45     #else
46     #error "No task_self(), you lose."
47     #endif
48     #endif
49     #endif
50    
51 gbeauche 1.9 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
52     because the emulated target is 32-bit and this helps to allocate
53     memory so that branches could be resolved more easily (32-bit
54     displacement to code in .text), on AMD64 for example. */
55     #ifndef MAP_32BIT
56     #define MAP_32BIT 0
57     #endif
58 gbeauche 1.13 #ifndef MAP_ANON
59     #define MAP_ANON 0
60     #endif
61     #ifndef MAP_ANONYMOUS
62     #define MAP_ANONYMOUS 0
63     #endif
64 gbeauche 1.9
65     #define MAP_EXTRA_FLAGS (MAP_32BIT)
66    
67 gbeauche 1.1 #ifdef HAVE_MMAP_VM
68 gbeauche 1.9 #if defined(__linux__) && defined(__i386__)
69     /* Force a reasonnable address below 0x80000000 on x86 so that we
70     don't get addresses above when the program is run on AMD64.
71     NOTE: this is empirically determined on Linux/x86. */
72     #define MAP_BASE 0x10000000
73     #else
74     #define MAP_BASE 0x00000000
75     #endif
76     static char * next_address = (char *)MAP_BASE;
77 gbeauche 1.1 #ifdef HAVE_MMAP_ANON
78 gbeauche 1.10 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
79 gbeauche 1.1 #define zero_fd -1
80     #else
81     #ifdef HAVE_MMAP_ANONYMOUS
82 gbeauche 1.10 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
83 gbeauche 1.1 #define zero_fd -1
84     #else
85 gbeauche 1.10 #define map_flags (MAP_EXTRA_FLAGS)
86 gbeauche 1.1 static int zero_fd = -1;
87     #endif
88     #endif
89     #endif
90    
91 gbeauche 1.13 /* Utility functions for POSIX SHM handling. */
92    
93     #ifdef USE_33BIT_ADDRESSING
94     struct shm_range_t {
95     const char *file;
96     void *base;
97     unsigned int size;
98     shm_range_t *next;
99     };
100    
101     static shm_range_t *shm_ranges = NULL;
102    
103     static bool add_shm_range(const char *file, void *base, unsigned int size)
104     {
105     shm_range_t *r = (shm_range_t *)malloc(sizeof(shm_range_t));
106     if (r) {
107     r->file = file;
108     r->base = base;
109     r->size = size;
110     r->next = shm_ranges ? shm_ranges : NULL;
111     shm_ranges = r;
112     return true;
113     }
114     return false;
115     }
116    
117     static shm_range_t *find_shm_range(void *base, unsigned int size)
118     {
119     for (shm_range_t *r = shm_ranges; r != NULL; r = r->next)
120     if (r->base == base && r->size == size)
121     return r;
122     return NULL;
123     }
124    
125     static bool remove_shm_range(shm_range_t *r)
126     {
127     if (r) {
128     for (shm_range_t *p = shm_ranges; p != NULL; p = p->next) {
129     if (p->next == r) {
130     p->next = r->next;
131     free(r);
132     return true;
133     }
134     }
135     }
136     return false;
137     }
138    
139     static bool remove_shm_range(void *base, unsigned int size)
140     {
141     remove_shm_range(find_shm_range(base, size));
142     }
143     #endif
144    
145     /* Build a POSIX SHM memory segment file descriptor name. */
146    
147     #ifdef USE_33BIT_ADDRESSING
148     static const char *build_shm_filename(void)
149     {
150     static int id = 0;
151     static char filename[PATH_MAX];
152    
153     int ret = snprintf(filename, sizeof(filename), "/BasiliskII-%d-shm-%d", getpid(), id);
154     if (ret == -1 || ret >= sizeof(filename))
155     return NULL;
156    
157     id++;
158     return filename;
159     }
160     #endif
161    
162 gbeauche 1.10 /* Translate generic VM map flags to host values. */
163    
164     #ifdef HAVE_MMAP_VM
165     static int translate_map_flags(int vm_flags)
166     {
167     int flags = 0;
168     if (vm_flags & VM_MAP_SHARED)
169     flags |= MAP_SHARED;
170     if (vm_flags & VM_MAP_PRIVATE)
171     flags |= MAP_PRIVATE;
172     if (vm_flags & VM_MAP_FIXED)
173     flags |= MAP_FIXED;
174     if (vm_flags & VM_MAP_32BIT)
175     flags |= MAP_32BIT;
176     return flags;
177     }
178     #endif
179    
180 gbeauche 1.14 /* Align ADDR and SIZE to 64K boundaries. */
181    
182     #ifdef HAVE_WIN32_VM
183     static inline LPVOID align_addr_segment(LPVOID addr)
184     {
185     return (LPVOID)(((DWORD)addr) & -65536);
186     }
187    
188     static inline DWORD align_size_segment(LPVOID addr, DWORD size)
189     {
190     return size + ((DWORD)addr - (DWORD)align_addr_segment(addr));
191     }
192     #endif
193    
194     /* Translate generic VM prot flags to host values. */
195    
196     #ifdef HAVE_WIN32_VM
197     static int translate_prot_flags(int prot_flags)
198     {
199     int prot = PAGE_READWRITE;
200     if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
201     prot = PAGE_EXECUTE_READWRITE;
202     else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
203     prot = PAGE_EXECUTE_READ;
204     else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
205     prot = PAGE_READWRITE;
206     else if (prot_flags == VM_PAGE_READ)
207     prot = PAGE_READONLY;
208     else if (prot_flags == 0)
209     prot = PAGE_NOACCESS;
210     return prot;
211     }
212     #endif
213    
214 gbeauche 1.1 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
215    
216     int vm_init(void)
217     {
218     #ifdef HAVE_MMAP_VM
219     #ifndef zero_fd
220     zero_fd = open("/dev/zero", O_RDWR);
221     if (zero_fd < 0)
222     return -1;
223     #endif
224     #endif
225     return 0;
226     }
227    
228     /* Deallocate all internal data used to wrap virtual memory allocators. */
229    
230     void vm_exit(void)
231     {
232     #ifdef HAVE_MMAP_VM
233     #ifndef zero_fd
234 gbeauche 1.19 if (zero_fd != -1) {
235     close(zero_fd);
236     zero_fd = -1;
237     }
238 gbeauche 1.1 #endif
239     #endif
240     }
241    
242     /* Allocate zero-filled memory of SIZE bytes. The mapping is private
243     and default protection bits are read / write. The return value
244     is the actual mapping address chosen or VM_MAP_FAILED for errors. */
245    
246 gbeauche 1.10 void * vm_acquire(size_t size, int options)
247 gbeauche 1.1 {
248     void * addr;
249 gbeauche 1.10
250     // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
251     if (options & VM_MAP_FIXED)
252     return VM_MAP_FAILED;
253    
254 gbeauche 1.1 #ifdef HAVE_MACH_VM
255     // vm_allocate() returns a zero-filled memory region
256     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
257     return VM_MAP_FAILED;
258     #else
259     #ifdef HAVE_MMAP_VM
260 gbeauche 1.13 int fd = zero_fd;
261     int the_map_flags = translate_map_flags(options) | map_flags;
262    
263     #ifdef USE_33BIT_ADDRESSING
264     const char *shm_file = NULL;
265     if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
266     the_map_flags &= ~(MAP_PRIVATE | MAP_ANON | MAP_ANONYMOUS);
267     the_map_flags |= MAP_SHARED;
268    
269     if ((shm_file = build_shm_filename()) == NULL)
270     return VM_MAP_FAILED;
271    
272     if ((fd = shm_open(shm_file, O_RDWR | O_CREAT | O_EXCL, 0644)) < 0)
273     return VM_MAP_FAILED;
274    
275     if (ftruncate(fd, size) < 0)
276     return VM_MAP_FAILED;
277     }
278     #endif
279 gbeauche 1.10
280 gbeauche 1.13 if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
281 gbeauche 1.1 return VM_MAP_FAILED;
282    
283 gbeauche 1.10 // Sanity checks for 64-bit platforms
284     if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
285     return VM_MAP_FAILED;
286    
287 gbeauche 1.3 next_address = (char *)addr + size;
288    
289 gbeauche 1.1 // Since I don't know the standard behavior of mmap(), zero-fill here
290     if (memset(addr, 0, size) != addr)
291     return VM_MAP_FAILED;
292 gbeauche 1.13
293     // Remap to 33-bit space
294     #ifdef USE_33BIT_ADDRESSING
295     if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
296     if (!add_shm_range(strdup(shm_file), addr, size))
297     return VM_MAP_FAILED;
298    
299     if (mmap((char *)addr + (1L << 32), size, VM_PAGE_DEFAULT, the_map_flags | MAP_FIXED, fd, 0) == (void *)MAP_FAILED)
300     return VM_MAP_FAILED;
301     close(fd);
302     }
303     #endif
304 gbeauche 1.1 #else
305 gbeauche 1.14 #ifdef HAVE_WIN32_VM
306     if ((addr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE)) == NULL)
307     return VM_MAP_FAILED;
308    
309     // Zero newly allocated memory
310     if (memset(addr, 0, size) != addr)
311     return VM_MAP_FAILED;
312     #else
313 gbeauche 1.1 if ((addr = calloc(size, 1)) == 0)
314     return VM_MAP_FAILED;
315    
316     // Omit changes for protections because they are not supported in this mode
317     return addr;
318     #endif
319     #endif
320 gbeauche 1.14 #endif
321 cebix 1.2
322 gbeauche 1.1 // Explicitely protect the newly mapped region here because on some systems,
323     // say MacOS X, mmap() doesn't honour the requested protection flags.
324     if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
325     return VM_MAP_FAILED;
326    
327     return addr;
328     }
329    
330     /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
331     Retuns 0 if successful, -1 on errors. */
332    
333 gbeauche 1.10 int vm_acquire_fixed(void * addr, size_t size, int options)
334 gbeauche 1.1 {
335 gbeauche 1.10 // Fixed mappings are required to be private
336     if (options & VM_MAP_SHARED)
337     return -1;
338    
339 gbeauche 1.1 #ifdef HAVE_MACH_VM
340     // vm_allocate() returns a zero-filled memory region
341     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
342     return -1;
343     #else
344     #ifdef HAVE_MMAP_VM
345 gbeauche 1.21 int fd = zero_fd;
346     int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED;
347 gbeauche 1.10
348 gbeauche 1.21 #ifdef USE_33BIT_ADDRESSING
349     const char *shm_file = NULL;
350     if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
351     the_map_flags &= ~(MAP_PRIVATE | MAP_ANON | MAP_ANONYMOUS);
352     the_map_flags |= MAP_SHARED;
353    
354     if ((shm_file = build_shm_filename()) == NULL)
355     return -1;
356    
357     if ((fd = shm_open(shm_file, O_RDWR | O_CREAT | O_EXCL, 0644)) < 0)
358     return -1;
359    
360     if (ftruncate(fd, size) < 0)
361     return -1;
362     }
363     #endif
364    
365     if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
366 gbeauche 1.1 return -1;
367    
368     // Since I don't know the standard behavior of mmap(), zero-fill here
369 gbeauche 1.8 if (memset(addr, 0, size) != addr)
370 gbeauche 1.1 return -1;
371 gbeauche 1.21
372     // Remap to 33-bit space
373     #ifdef USE_33BIT_ADDRESSING
374     if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
375     if (!add_shm_range(strdup(shm_file), addr, size))
376     return -1;
377    
378     if (mmap((char *)addr + (1L << 32), size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
379     return -1;
380     close(fd);
381     }
382     #endif
383 gbeauche 1.1 #else
384 gbeauche 1.14 #ifdef HAVE_WIN32_VM
385     // Windows cannot allocate Low Memory
386     if (addr == NULL)
387     return -1;
388    
389     // Allocate a possibly offset region to align on 64K boundaries
390     LPVOID req_addr = align_addr_segment(addr);
391     DWORD req_size = align_size_segment(addr, size);
392     LPVOID ret_addr = VirtualAlloc(req_addr, req_size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
393     if (ret_addr != req_addr)
394     return -1;
395    
396     // Zero newly allocated memory
397     if (memset(addr, 0, size) != addr)
398     return -1;
399     #else
400 gbeauche 1.1 // Unsupported
401     return -1;
402     #endif
403     #endif
404 gbeauche 1.14 #endif
405 gbeauche 1.1
406     // Explicitely protect the newly mapped region here because on some systems,
407     // say MacOS X, mmap() doesn't honour the requested protection flags.
408 gbeauche 1.6 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
409 gbeauche 1.1 return -1;
410    
411     return 0;
412     }
413    
414     /* Deallocate any mapping for the region starting at ADDR and extending
415     LEN bytes. Returns 0 if successful, -1 on errors. */
416    
417     int vm_release(void * addr, size_t size)
418     {
419 gbeauche 1.3 // Safety check: don't try to release memory that was not allocated
420     if (addr == VM_MAP_FAILED)
421     return 0;
422    
423 gbeauche 1.1 #ifdef HAVE_MACH_VM
424 gbeauche 1.4 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
425     return -1;
426 gbeauche 1.1 #else
427     #ifdef HAVE_MMAP_VM
428 gbeauche 1.7 if (munmap((caddr_t)addr, size) != 0)
429 gbeauche 1.4 return -1;
430 gbeauche 1.13
431     #ifdef USE_33BIT_ADDRESSING
432     shm_range_t *r = find_shm_range(addr, size);
433     if (r) {
434     if (munmap((char *)r->base + (1L << 32), size) != 0)
435     return -1;
436    
437     if (shm_unlink(r->file) < 0)
438     return -1;
439     free((char *)r->file);
440    
441     if (!remove_shm_range(r))
442     return -1;
443     }
444     #endif
445 gbeauche 1.1 #else
446 gbeauche 1.14 #ifdef HAVE_WIN32_VM
447     if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
448     return -1;
449     #else
450 gbeauche 1.1 free(addr);
451     #endif
452     #endif
453 gbeauche 1.14 #endif
454 gbeauche 1.4
455     return 0;
456 gbeauche 1.1 }
457    
458     /* Change the memory protection of the region starting at ADDR and
459     extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
460    
461     int vm_protect(void * addr, size_t size, int prot)
462     {
463     #ifdef HAVE_MACH_VM
464     int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
465     return ret_code == KERN_SUCCESS ? 0 : -1;
466     #else
467     #ifdef HAVE_MMAP_VM
468 gbeauche 1.7 int ret_code = mprotect((caddr_t)addr, size, prot);
469 gbeauche 1.1 return ret_code == 0 ? 0 : -1;
470     #else
471 gbeauche 1.14 #ifdef HAVE_WIN32_VM
472     DWORD old_prot;
473     int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
474     return ret_code != 0 ? 0 : -1;
475     #else
476 gbeauche 1.1 // Unsupported
477     return -1;
478     #endif
479     #endif
480 gbeauche 1.14 #endif
481 gbeauche 1.1 }
482    
483 gbeauche 1.15 /* Returns the size of a page. */
484    
485 gbeauche 1.16 int vm_get_page_size(void)
486 gbeauche 1.15 {
487 gbeauche 1.20 #ifdef HAVE_WIN32_VM
488     static unsigned long page_size = 0;
489     if (page_size == 0) {
490     SYSTEM_INFO si;
491     GetSystemInfo(&si);
492     page_size = si.dwAllocationGranularity;
493     }
494     return page_size;
495 gbeauche 1.15 #else
496 gbeauche 1.20 return getpagesize();
497 gbeauche 1.15 #endif
498     }
499    
500 gbeauche 1.1 #ifdef CONFIGURE_TEST_VM_MAP
501 gbeauche 1.18 #include <stdlib.h>
502     #include <signal.h>
503    
504     static void fault_handler(int sig)
505     {
506     exit(1);
507     }
508    
509 gbeauche 1.1 /* Tests covered here:
510     - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
511     - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
512     */
513     int main(void)
514     {
515     vm_init();
516 gbeauche 1.18
517     signal(SIGSEGV, fault_handler);
518     #ifdef SIGBUS
519     signal(SIGBUS, fault_handler);
520     #endif
521 gbeauche 1.1
522     #define page_align(address) ((char *)((unsigned long)(address) & -page_size))
523 gbeauche 1.16 unsigned long page_size = vm_get_page_size();
524 gbeauche 1.1
525     const int area_size = 6 * page_size;
526     volatile char * area = (volatile char *) vm_acquire(area_size);
527     volatile char * fault_address = area + (page_size * 7) / 2;
528    
529     #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
530     if (area == VM_MAP_FAILED)
531     return 1;
532    
533     if (vm_release((char *)area, area_size) < 0)
534     return 1;
535    
536     return 0;
537     #endif
538    
539     #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
540     if (area == VM_MAP_FAILED)
541     return 0;
542    
543     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
544     return 0;
545     #endif
546    
547     #if defined(TEST_VM_PROT_RDWR_WRITE)
548     if (area == VM_MAP_FAILED)
549     return 1;
550    
551     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
552     return 1;
553    
554     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
555     return 1;
556     #endif
557    
558     #if defined(TEST_VM_PROT_READ_WRITE)
559     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
560     return 0;
561     #endif
562    
563     #if defined(TEST_VM_PROT_NONE_READ)
564     // this should cause a core dump
565     char foo = *fault_address;
566     return 0;
567     #endif
568    
569     #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
570     // this should cause a core dump
571     *fault_address = 'z';
572     return 0;
573     #endif
574    
575     #if defined(TEST_VM_PROT_RDWR_WRITE)
576     // this should not cause a core dump
577     *fault_address = 'z';
578     return 0;
579     #endif
580     }
581     #endif