ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.12
Committed: 2004-01-12T15:29:25Z (20 years, 10 months ago) by cebix
Branch: MAIN
CVS Tags: nigel-build-16, nigel-build-15
Changes since 1.11: +1 -1 lines
Log Message:
Happy New Year! :)

File Contents

# User Rev Content
1 gbeauche 1.1 /*
2     * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3     * (supports mmap, vm_allocate or fallbacks to malloc)
4     *
5 cebix 1.12 * Basilisk II (C) 1997-2004 Christian Bauer
6 gbeauche 1.1 *
7     * This program is free software; you can redistribute it and/or modify
8     * it under the terms of the GNU General Public License as published by
9     * the Free Software Foundation; either version 2 of the License, or
10     * (at your option) any later version.
11     *
12     * This program is distributed in the hope that it will be useful,
13     * but WITHOUT ANY WARRANTY; without even the implied warranty of
14     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15     * GNU General Public License for more details.
16     *
17     * You should have received a copy of the GNU General Public License
18     * along with this program; if not, write to the Free Software
19     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20     */
21    
22     #ifdef HAVE_CONFIG_H
23     #include "config.h"
24     #endif
25    
26     // TODO: Win32 VMs ?
27     #include <stdlib.h>
28     #include <string.h>
29     #include "vm_alloc.h"
30    
31     #ifdef HAVE_MACH_VM
32     #ifndef HAVE_MACH_TASK_SELF
33     #ifdef HAVE_TASK_SELF
34     #define mach_task_self task_self
35     #else
36     #error "No task_self(), you lose."
37     #endif
38     #endif
39     #endif
40    
41 gbeauche 1.9 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
42     because the emulated target is 32-bit and this helps to allocate
43     memory so that branches could be resolved more easily (32-bit
44     displacement to code in .text), on AMD64 for example. */
45     #ifndef MAP_32BIT
46     #define MAP_32BIT 0
47     #endif
48    
49     #define MAP_EXTRA_FLAGS (MAP_32BIT)
50    
51 gbeauche 1.1 #ifdef HAVE_MMAP_VM
52 gbeauche 1.9 #if defined(__linux__) && defined(__i386__)
53     /* Force a reasonnable address below 0x80000000 on x86 so that we
54     don't get addresses above when the program is run on AMD64.
55     NOTE: this is empirically determined on Linux/x86. */
56     #define MAP_BASE 0x10000000
57     #else
58     #define MAP_BASE 0x00000000
59     #endif
60     static char * next_address = (char *)MAP_BASE;
61 gbeauche 1.1 #ifdef HAVE_MMAP_ANON
62 gbeauche 1.10 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
63 gbeauche 1.1 #define zero_fd -1
64     #else
65     #ifdef HAVE_MMAP_ANONYMOUS
66 gbeauche 1.10 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
67 gbeauche 1.1 #define zero_fd -1
68     #else
69     #ifdef HAVE_FCNTL_H
70     #include <fcntl.h>
71     #endif
72 gbeauche 1.10 #define map_flags (MAP_EXTRA_FLAGS)
73 gbeauche 1.1 static int zero_fd = -1;
74     #endif
75     #endif
76     #endif
77    
78 gbeauche 1.10 /* Translate generic VM map flags to host values. */
79    
80     #ifdef HAVE_MMAP_VM
81     static int translate_map_flags(int vm_flags)
82     {
83     int flags = 0;
84     if (vm_flags & VM_MAP_SHARED)
85     flags |= MAP_SHARED;
86     if (vm_flags & VM_MAP_PRIVATE)
87     flags |= MAP_PRIVATE;
88     if (vm_flags & VM_MAP_FIXED)
89     flags |= MAP_FIXED;
90     if (vm_flags & VM_MAP_32BIT)
91     flags |= MAP_32BIT;
92     return flags;
93     }
94     #endif
95    
96 gbeauche 1.1 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
97    
98     int vm_init(void)
99     {
100     #ifdef HAVE_MMAP_VM
101     #ifndef zero_fd
102     zero_fd = open("/dev/zero", O_RDWR);
103     if (zero_fd < 0)
104     return -1;
105     #endif
106     #endif
107     return 0;
108     }
109    
110     /* Deallocate all internal data used to wrap virtual memory allocators. */
111    
112     void vm_exit(void)
113     {
114     #ifdef HAVE_MMAP_VM
115     #ifndef zero_fd
116     close(zero_fd);
117     #endif
118     #endif
119     }
120    
121     /* Allocate zero-filled memory of SIZE bytes. The mapping is private
122     and default protection bits are read / write. The return value
123     is the actual mapping address chosen or VM_MAP_FAILED for errors. */
124    
125 gbeauche 1.10 void * vm_acquire(size_t size, int options)
126 gbeauche 1.1 {
127     void * addr;
128 gbeauche 1.10
129     // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
130     if (options & VM_MAP_FIXED)
131     return VM_MAP_FAILED;
132    
133 gbeauche 1.1 #ifdef HAVE_MACH_VM
134     // vm_allocate() returns a zero-filled memory region
135     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
136     return VM_MAP_FAILED;
137     #else
138     #ifdef HAVE_MMAP_VM
139 gbeauche 1.10 const int extra_map_flags = translate_map_flags(options);
140    
141 gbeauche 1.11 if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, extra_map_flags | map_flags, zero_fd, 0)) == (void *)MAP_FAILED)
142 gbeauche 1.1 return VM_MAP_FAILED;
143    
144 gbeauche 1.10 // Sanity checks for 64-bit platforms
145     if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
146     return VM_MAP_FAILED;
147    
148 gbeauche 1.3 next_address = (char *)addr + size;
149    
150 gbeauche 1.1 // Since I don't know the standard behavior of mmap(), zero-fill here
151     if (memset(addr, 0, size) != addr)
152     return VM_MAP_FAILED;
153     #else
154     if ((addr = calloc(size, 1)) == 0)
155     return VM_MAP_FAILED;
156    
157     // Omit changes for protections because they are not supported in this mode
158     return addr;
159     #endif
160     #endif
161 cebix 1.2
162 gbeauche 1.1 // Explicitely protect the newly mapped region here because on some systems,
163     // say MacOS X, mmap() doesn't honour the requested protection flags.
164     if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
165     return VM_MAP_FAILED;
166    
167     return addr;
168     }
169    
170     /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
171     Retuns 0 if successful, -1 on errors. */
172    
173 gbeauche 1.10 int vm_acquire_fixed(void * addr, size_t size, int options)
174 gbeauche 1.1 {
175 gbeauche 1.10 // Fixed mappings are required to be private
176     if (options & VM_MAP_SHARED)
177     return -1;
178    
179 gbeauche 1.1 #ifdef HAVE_MACH_VM
180     // vm_allocate() returns a zero-filled memory region
181     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
182     return -1;
183     #else
184     #ifdef HAVE_MMAP_VM
185 gbeauche 1.10 const int extra_map_flags = translate_map_flags(options);
186    
187 gbeauche 1.11 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, extra_map_flags | map_flags | MAP_FIXED, zero_fd, 0) == (void *)MAP_FAILED)
188 gbeauche 1.1 return -1;
189    
190     // Since I don't know the standard behavior of mmap(), zero-fill here
191 gbeauche 1.8 if (memset(addr, 0, size) != addr)
192 gbeauche 1.1 return -1;
193     #else
194     // Unsupported
195     return -1;
196     #endif
197     #endif
198    
199     // Explicitely protect the newly mapped region here because on some systems,
200     // say MacOS X, mmap() doesn't honour the requested protection flags.
201 gbeauche 1.6 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
202 gbeauche 1.1 return -1;
203    
204     return 0;
205     }
206    
207     /* Deallocate any mapping for the region starting at ADDR and extending
208     LEN bytes. Returns 0 if successful, -1 on errors. */
209    
210     int vm_release(void * addr, size_t size)
211     {
212 gbeauche 1.3 // Safety check: don't try to release memory that was not allocated
213     if (addr == VM_MAP_FAILED)
214     return 0;
215    
216 gbeauche 1.1 #ifdef HAVE_MACH_VM
217 gbeauche 1.4 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
218     return -1;
219 gbeauche 1.1 #else
220     #ifdef HAVE_MMAP_VM
221 gbeauche 1.7 if (munmap((caddr_t)addr, size) != 0)
222 gbeauche 1.4 return -1;
223 gbeauche 1.1 #else
224     free(addr);
225     #endif
226     #endif
227 gbeauche 1.4
228     return 0;
229 gbeauche 1.1 }
230    
231     /* Change the memory protection of the region starting at ADDR and
232     extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
233    
234     int vm_protect(void * addr, size_t size, int prot)
235     {
236     #ifdef HAVE_MACH_VM
237     int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
238     return ret_code == KERN_SUCCESS ? 0 : -1;
239     #else
240     #ifdef HAVE_MMAP_VM
241 gbeauche 1.7 int ret_code = mprotect((caddr_t)addr, size, prot);
242 gbeauche 1.1 return ret_code == 0 ? 0 : -1;
243     #else
244     // Unsupported
245     return -1;
246     #endif
247     #endif
248     }
249    
250     #ifdef CONFIGURE_TEST_VM_MAP
251     /* Tests covered here:
252     - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
253     - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
254     */
255     int main(void)
256     {
257     vm_init();
258    
259     #define page_align(address) ((char *)((unsigned long)(address) & -page_size))
260     unsigned long page_size = getpagesize();
261    
262     const int area_size = 6 * page_size;
263     volatile char * area = (volatile char *) vm_acquire(area_size);
264     volatile char * fault_address = area + (page_size * 7) / 2;
265    
266     #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
267     if (area == VM_MAP_FAILED)
268     return 1;
269    
270     if (vm_release((char *)area, area_size) < 0)
271     return 1;
272    
273     return 0;
274     #endif
275    
276     #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
277     if (area == VM_MAP_FAILED)
278     return 0;
279    
280     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
281     return 0;
282     #endif
283    
284     #if defined(TEST_VM_PROT_RDWR_WRITE)
285     if (area == VM_MAP_FAILED)
286     return 1;
287    
288     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
289     return 1;
290    
291     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
292     return 1;
293     #endif
294    
295     #if defined(TEST_VM_PROT_READ_WRITE)
296     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
297     return 0;
298     #endif
299    
300     #if defined(TEST_VM_PROT_NONE_READ)
301     // this should cause a core dump
302     char foo = *fault_address;
303     return 0;
304     #endif
305    
306     #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
307     // this should cause a core dump
308     *fault_address = 'z';
309     return 0;
310     #endif
311    
312     #if defined(TEST_VM_PROT_RDWR_WRITE)
313     // this should not cause a core dump
314     *fault_address = 'z';
315     return 0;
316     #endif
317     }
318     #endif