ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.13
Committed: 2004-11-08T21:07:07Z (20 years ago) by gbeauche
Branch: MAIN
Changes since 1.12: +132 -5 lines
Log Message:
Enable 33-bit memory addressing on 64-bit JIT capable platforms (e.g. x86-64).
This is useful to get rid of address offset sign extensions. It uses POSIX
shared memory to create aliased regions, fallback to usual sign-extension
way if shm_open et al. don't work (e.g. no /dev/shm mounted)

File Contents

# User Rev Content
1 gbeauche 1.1 /*
2     * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3     * (supports mmap, vm_allocate or fallbacks to malloc)
4     *
5 cebix 1.12 * Basilisk II (C) 1997-2004 Christian Bauer
6 gbeauche 1.1 *
7     * This program is free software; you can redistribute it and/or modify
8     * it under the terms of the GNU General Public License as published by
9     * the Free Software Foundation; either version 2 of the License, or
10     * (at your option) any later version.
11     *
12     * This program is distributed in the hope that it will be useful,
13     * but WITHOUT ANY WARRANTY; without even the implied warranty of
14     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15     * GNU General Public License for more details.
16     *
17     * You should have received a copy of the GNU General Public License
18     * along with this program; if not, write to the Free Software
19     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20     */
21    
22     #ifdef HAVE_CONFIG_H
23     #include "config.h"
24     #endif
25    
26 gbeauche 1.13 #ifdef HAVE_FCNTL_H
27     #include <fcntl.h>
28     #endif
29    
30 gbeauche 1.1 // TODO: Win32 VMs ?
31 gbeauche 1.13 #include <stdio.h>
32 gbeauche 1.1 #include <stdlib.h>
33     #include <string.h>
34 gbeauche 1.13 #include <limits.h>
35 gbeauche 1.1 #include "vm_alloc.h"
36    
37     #ifdef HAVE_MACH_VM
38     #ifndef HAVE_MACH_TASK_SELF
39     #ifdef HAVE_TASK_SELF
40     #define mach_task_self task_self
41     #else
42     #error "No task_self(), you lose."
43     #endif
44     #endif
45     #endif
46    
47 gbeauche 1.9 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
48     because the emulated target is 32-bit and this helps to allocate
49     memory so that branches could be resolved more easily (32-bit
50     displacement to code in .text), on AMD64 for example. */
51     #ifndef MAP_32BIT
52     #define MAP_32BIT 0
53     #endif
54 gbeauche 1.13 #ifndef MAP_ANON
55     #define MAP_ANON 0
56     #endif
57     #ifndef MAP_ANONYMOUS
58     #define MAP_ANONYMOUS 0
59     #endif
60 gbeauche 1.9
61     #define MAP_EXTRA_FLAGS (MAP_32BIT)
62    
63 gbeauche 1.1 #ifdef HAVE_MMAP_VM
64 gbeauche 1.9 #if defined(__linux__) && defined(__i386__)
65     /* Force a reasonnable address below 0x80000000 on x86 so that we
66     don't get addresses above when the program is run on AMD64.
67     NOTE: this is empirically determined on Linux/x86. */
68     #define MAP_BASE 0x10000000
69     #else
70     #define MAP_BASE 0x00000000
71     #endif
72     static char * next_address = (char *)MAP_BASE;
73 gbeauche 1.1 #ifdef HAVE_MMAP_ANON
74 gbeauche 1.10 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
75 gbeauche 1.1 #define zero_fd -1
76     #else
77     #ifdef HAVE_MMAP_ANONYMOUS
78 gbeauche 1.10 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
79 gbeauche 1.1 #define zero_fd -1
80     #else
81 gbeauche 1.10 #define map_flags (MAP_EXTRA_FLAGS)
82 gbeauche 1.1 static int zero_fd = -1;
83     #endif
84     #endif
85     #endif
86    
87 gbeauche 1.13 /* Utility functions for POSIX SHM handling. */
88    
89     #ifdef USE_33BIT_ADDRESSING
90     struct shm_range_t {
91     const char *file;
92     void *base;
93     unsigned int size;
94     shm_range_t *next;
95     };
96    
97     static shm_range_t *shm_ranges = NULL;
98    
99     static bool add_shm_range(const char *file, void *base, unsigned int size)
100     {
101     shm_range_t *r = (shm_range_t *)malloc(sizeof(shm_range_t));
102     if (r) {
103     r->file = file;
104     r->base = base;
105     r->size = size;
106     r->next = shm_ranges ? shm_ranges : NULL;
107     shm_ranges = r;
108     return true;
109     }
110     return false;
111     }
112    
113     static shm_range_t *find_shm_range(void *base, unsigned int size)
114     {
115     for (shm_range_t *r = shm_ranges; r != NULL; r = r->next)
116     if (r->base == base && r->size == size)
117     return r;
118     return NULL;
119     }
120    
121     static bool remove_shm_range(shm_range_t *r)
122     {
123     if (r) {
124     for (shm_range_t *p = shm_ranges; p != NULL; p = p->next) {
125     if (p->next == r) {
126     p->next = r->next;
127     free(r);
128     return true;
129     }
130     }
131     }
132     return false;
133     }
134    
135     static bool remove_shm_range(void *base, unsigned int size)
136     {
137     remove_shm_range(find_shm_range(base, size));
138     }
139     #endif
140    
141     /* Build a POSIX SHM memory segment file descriptor name. */
142    
143     #ifdef USE_33BIT_ADDRESSING
144     static const char *build_shm_filename(void)
145     {
146     static int id = 0;
147     static char filename[PATH_MAX];
148    
149     int ret = snprintf(filename, sizeof(filename), "/BasiliskII-%d-shm-%d", getpid(), id);
150     if (ret == -1 || ret >= sizeof(filename))
151     return NULL;
152    
153     id++;
154     return filename;
155     }
156     #endif
157    
158 gbeauche 1.10 /* Translate generic VM map flags to host values. */
159    
160     #ifdef HAVE_MMAP_VM
161     static int translate_map_flags(int vm_flags)
162     {
163     int flags = 0;
164     if (vm_flags & VM_MAP_SHARED)
165     flags |= MAP_SHARED;
166     if (vm_flags & VM_MAP_PRIVATE)
167     flags |= MAP_PRIVATE;
168     if (vm_flags & VM_MAP_FIXED)
169     flags |= MAP_FIXED;
170     if (vm_flags & VM_MAP_32BIT)
171     flags |= MAP_32BIT;
172     return flags;
173     }
174     #endif
175    
176 gbeauche 1.1 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
177    
178     int vm_init(void)
179     {
180     #ifdef HAVE_MMAP_VM
181     #ifndef zero_fd
182     zero_fd = open("/dev/zero", O_RDWR);
183     if (zero_fd < 0)
184     return -1;
185     #endif
186     #endif
187     return 0;
188     }
189    
190     /* Deallocate all internal data used to wrap virtual memory allocators. */
191    
192     void vm_exit(void)
193     {
194     #ifdef HAVE_MMAP_VM
195     #ifndef zero_fd
196     close(zero_fd);
197     #endif
198     #endif
199     }
200    
201     /* Allocate zero-filled memory of SIZE bytes. The mapping is private
202     and default protection bits are read / write. The return value
203     is the actual mapping address chosen or VM_MAP_FAILED for errors. */
204    
205 gbeauche 1.10 void * vm_acquire(size_t size, int options)
206 gbeauche 1.1 {
207     void * addr;
208 gbeauche 1.10
209     // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
210     if (options & VM_MAP_FIXED)
211     return VM_MAP_FAILED;
212    
213 gbeauche 1.1 #ifdef HAVE_MACH_VM
214     // vm_allocate() returns a zero-filled memory region
215     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
216     return VM_MAP_FAILED;
217     #else
218     #ifdef HAVE_MMAP_VM
219 gbeauche 1.13 int fd = zero_fd;
220     int the_map_flags = translate_map_flags(options) | map_flags;
221    
222     #ifdef USE_33BIT_ADDRESSING
223     const char *shm_file = NULL;
224     if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
225     the_map_flags &= ~(MAP_PRIVATE | MAP_ANON | MAP_ANONYMOUS);
226     the_map_flags |= MAP_SHARED;
227    
228     if ((shm_file = build_shm_filename()) == NULL)
229     return VM_MAP_FAILED;
230    
231     if ((fd = shm_open(shm_file, O_RDWR | O_CREAT | O_EXCL, 0644)) < 0)
232     return VM_MAP_FAILED;
233    
234     if (ftruncate(fd, size) < 0)
235     return VM_MAP_FAILED;
236    
237     the_map_flags |= MAP_SHARED;
238     }
239     #endif
240 gbeauche 1.10
241 gbeauche 1.13 if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
242 gbeauche 1.1 return VM_MAP_FAILED;
243    
244 gbeauche 1.10 // Sanity checks for 64-bit platforms
245     if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
246     return VM_MAP_FAILED;
247    
248 gbeauche 1.3 next_address = (char *)addr + size;
249    
250 gbeauche 1.1 // Since I don't know the standard behavior of mmap(), zero-fill here
251     if (memset(addr, 0, size) != addr)
252     return VM_MAP_FAILED;
253 gbeauche 1.13
254     // Remap to 33-bit space
255     #ifdef USE_33BIT_ADDRESSING
256     if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
257     if (!add_shm_range(strdup(shm_file), addr, size))
258     return VM_MAP_FAILED;
259    
260     if (mmap((char *)addr + (1L << 32), size, VM_PAGE_DEFAULT, the_map_flags | MAP_FIXED, fd, 0) == (void *)MAP_FAILED)
261     return VM_MAP_FAILED;
262     close(fd);
263     }
264     #endif
265 gbeauche 1.1 #else
266     if ((addr = calloc(size, 1)) == 0)
267     return VM_MAP_FAILED;
268    
269     // Omit changes for protections because they are not supported in this mode
270     return addr;
271     #endif
272     #endif
273 cebix 1.2
274 gbeauche 1.1 // Explicitely protect the newly mapped region here because on some systems,
275     // say MacOS X, mmap() doesn't honour the requested protection flags.
276     if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
277     return VM_MAP_FAILED;
278    
279     return addr;
280     }
281    
282     /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
283     Retuns 0 if successful, -1 on errors. */
284    
285 gbeauche 1.10 int vm_acquire_fixed(void * addr, size_t size, int options)
286 gbeauche 1.1 {
287 gbeauche 1.10 // Fixed mappings are required to be private
288     if (options & VM_MAP_SHARED)
289     return -1;
290    
291 gbeauche 1.1 #ifdef HAVE_MACH_VM
292     // vm_allocate() returns a zero-filled memory region
293     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
294     return -1;
295     #else
296     #ifdef HAVE_MMAP_VM
297 gbeauche 1.10 const int extra_map_flags = translate_map_flags(options);
298    
299 gbeauche 1.11 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, extra_map_flags | map_flags | MAP_FIXED, zero_fd, 0) == (void *)MAP_FAILED)
300 gbeauche 1.1 return -1;
301    
302     // Since I don't know the standard behavior of mmap(), zero-fill here
303 gbeauche 1.8 if (memset(addr, 0, size) != addr)
304 gbeauche 1.1 return -1;
305     #else
306     // Unsupported
307     return -1;
308     #endif
309     #endif
310    
311     // Explicitely protect the newly mapped region here because on some systems,
312     // say MacOS X, mmap() doesn't honour the requested protection flags.
313 gbeauche 1.6 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
314 gbeauche 1.1 return -1;
315    
316     return 0;
317     }
318    
319     /* Deallocate any mapping for the region starting at ADDR and extending
320     LEN bytes. Returns 0 if successful, -1 on errors. */
321    
322     int vm_release(void * addr, size_t size)
323     {
324 gbeauche 1.3 // Safety check: don't try to release memory that was not allocated
325     if (addr == VM_MAP_FAILED)
326     return 0;
327    
328 gbeauche 1.1 #ifdef HAVE_MACH_VM
329 gbeauche 1.4 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
330     return -1;
331 gbeauche 1.1 #else
332     #ifdef HAVE_MMAP_VM
333 gbeauche 1.7 if (munmap((caddr_t)addr, size) != 0)
334 gbeauche 1.4 return -1;
335 gbeauche 1.13
336     #ifdef USE_33BIT_ADDRESSING
337     shm_range_t *r = find_shm_range(addr, size);
338     if (r) {
339     if (munmap((char *)r->base + (1L << 32), size) != 0)
340     return -1;
341    
342     if (shm_unlink(r->file) < 0)
343     return -1;
344     free((char *)r->file);
345    
346     if (!remove_shm_range(r))
347     return -1;
348     }
349     #endif
350 gbeauche 1.1 #else
351     free(addr);
352     #endif
353     #endif
354 gbeauche 1.4
355     return 0;
356 gbeauche 1.1 }
357    
358     /* Change the memory protection of the region starting at ADDR and
359     extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
360    
361     int vm_protect(void * addr, size_t size, int prot)
362     {
363     #ifdef HAVE_MACH_VM
364     int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
365     return ret_code == KERN_SUCCESS ? 0 : -1;
366     #else
367     #ifdef HAVE_MMAP_VM
368 gbeauche 1.7 int ret_code = mprotect((caddr_t)addr, size, prot);
369 gbeauche 1.1 return ret_code == 0 ? 0 : -1;
370     #else
371     // Unsupported
372     return -1;
373     #endif
374     #endif
375     }
376    
377     #ifdef CONFIGURE_TEST_VM_MAP
378     /* Tests covered here:
379     - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
380     - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
381     */
382     int main(void)
383     {
384     vm_init();
385    
386     #define page_align(address) ((char *)((unsigned long)(address) & -page_size))
387     unsigned long page_size = getpagesize();
388    
389     const int area_size = 6 * page_size;
390     volatile char * area = (volatile char *) vm_acquire(area_size);
391     volatile char * fault_address = area + (page_size * 7) / 2;
392    
393     #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
394     if (area == VM_MAP_FAILED)
395     return 1;
396    
397     if (vm_release((char *)area, area_size) < 0)
398     return 1;
399    
400     return 0;
401     #endif
402    
403     #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
404     if (area == VM_MAP_FAILED)
405     return 0;
406    
407     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
408     return 0;
409     #endif
410    
411     #if defined(TEST_VM_PROT_RDWR_WRITE)
412     if (area == VM_MAP_FAILED)
413     return 1;
414    
415     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
416     return 1;
417    
418     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
419     return 1;
420     #endif
421    
422     #if defined(TEST_VM_PROT_READ_WRITE)
423     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
424     return 0;
425     #endif
426    
427     #if defined(TEST_VM_PROT_NONE_READ)
428     // this should cause a core dump
429     char foo = *fault_address;
430     return 0;
431     #endif
432    
433     #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
434     // this should cause a core dump
435     *fault_address = 'z';
436     return 0;
437     #endif
438    
439     #if defined(TEST_VM_PROT_RDWR_WRITE)
440     // this should not cause a core dump
441     *fault_address = 'z';
442     return 0;
443     #endif
444     }
445     #endif