ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.26
Committed: 2007-01-13T18:21:30Z (17 years, 6 months ago) by gbeauche
Branch: MAIN
Changes since 1.25: +0 -144 lines
Log Message:
Remove the 33-bit addressing hack as it's overly complex for not much gain.
Rather, use an address override prefix (0x67) though Intel Core optimization
reference guide says to avoid LCP prefixes. In practise, impact on performance
is measurably marginal on e.g. Speedometer tests.

File Contents

# User Rev Content
1 gbeauche 1.1 /*
2     * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3     * (supports mmap, vm_allocate or fallbacks to malloc)
4     *
5 gbeauche 1.17 * Basilisk II (C) 1997-2005 Christian Bauer
6 gbeauche 1.1 *
7     * This program is free software; you can redistribute it and/or modify
8     * it under the terms of the GNU General Public License as published by
9     * the Free Software Foundation; either version 2 of the License, or
10     * (at your option) any later version.
11     *
12     * This program is distributed in the hope that it will be useful,
13     * but WITHOUT ANY WARRANTY; without even the implied warranty of
14     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15     * GNU General Public License for more details.
16     *
17     * You should have received a copy of the GNU General Public License
18     * along with this program; if not, write to the Free Software
19     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20     */
21    
22     #ifdef HAVE_CONFIG_H
23     #include "config.h"
24     #endif
25    
26 gbeauche 1.13 #ifdef HAVE_FCNTL_H
27     #include <fcntl.h>
28     #endif
29    
30 gbeauche 1.14 #ifdef HAVE_WIN32_VM
31     #define WIN32_LEAN_AND_MEAN /* avoid including junk */
32     #include <windows.h>
33     #endif
34    
35 gbeauche 1.13 #include <stdio.h>
36 gbeauche 1.1 #include <stdlib.h>
37     #include <string.h>
38 gbeauche 1.13 #include <limits.h>
39 gbeauche 1.1 #include "vm_alloc.h"
40    
41     #ifdef HAVE_MACH_VM
42     #ifndef HAVE_MACH_TASK_SELF
43     #ifdef HAVE_TASK_SELF
44     #define mach_task_self task_self
45     #else
46     #error "No task_self(), you lose."
47     #endif
48     #endif
49     #endif
50    
51 gbeauche 1.9 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
52     because the emulated target is 32-bit and this helps to allocate
53     memory so that branches could be resolved more easily (32-bit
54     displacement to code in .text), on AMD64 for example. */
55     #ifndef MAP_32BIT
56     #define MAP_32BIT 0
57     #endif
58 gbeauche 1.13 #ifndef MAP_ANON
59     #define MAP_ANON 0
60     #endif
61     #ifndef MAP_ANONYMOUS
62     #define MAP_ANONYMOUS 0
63     #endif
64 gbeauche 1.9
65     #define MAP_EXTRA_FLAGS (MAP_32BIT)
66    
67 gbeauche 1.1 #ifdef HAVE_MMAP_VM
68 gbeauche 1.23 #if (defined(__linux__) && defined(__i386__)) || HAVE_LINKER_SCRIPT
69 gbeauche 1.9 /* Force a reasonnable address below 0x80000000 on x86 so that we
70     don't get addresses above when the program is run on AMD64.
71     NOTE: this is empirically determined on Linux/x86. */
72     #define MAP_BASE 0x10000000
73     #else
74     #define MAP_BASE 0x00000000
75     #endif
76     static char * next_address = (char *)MAP_BASE;
77 gbeauche 1.1 #ifdef HAVE_MMAP_ANON
78 gbeauche 1.10 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
79 gbeauche 1.1 #define zero_fd -1
80     #else
81     #ifdef HAVE_MMAP_ANONYMOUS
82 gbeauche 1.10 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
83 gbeauche 1.1 #define zero_fd -1
84     #else
85 gbeauche 1.10 #define map_flags (MAP_EXTRA_FLAGS)
86 gbeauche 1.1 static int zero_fd = -1;
87     #endif
88     #endif
89     #endif
90    
91 gbeauche 1.10 /* Translate generic VM map flags to host values. */
92    
93     #ifdef HAVE_MMAP_VM
94     static int translate_map_flags(int vm_flags)
95     {
96     int flags = 0;
97     if (vm_flags & VM_MAP_SHARED)
98     flags |= MAP_SHARED;
99     if (vm_flags & VM_MAP_PRIVATE)
100     flags |= MAP_PRIVATE;
101     if (vm_flags & VM_MAP_FIXED)
102     flags |= MAP_FIXED;
103     if (vm_flags & VM_MAP_32BIT)
104     flags |= MAP_32BIT;
105     return flags;
106     }
107     #endif
108    
109 gbeauche 1.14 /* Align ADDR and SIZE to 64K boundaries. */
110    
111     #ifdef HAVE_WIN32_VM
112     static inline LPVOID align_addr_segment(LPVOID addr)
113     {
114     return (LPVOID)(((DWORD)addr) & -65536);
115     }
116    
117     static inline DWORD align_size_segment(LPVOID addr, DWORD size)
118     {
119     return size + ((DWORD)addr - (DWORD)align_addr_segment(addr));
120     }
121     #endif
122    
123     /* Translate generic VM prot flags to host values. */
124    
125     #ifdef HAVE_WIN32_VM
126     static int translate_prot_flags(int prot_flags)
127     {
128     int prot = PAGE_READWRITE;
129     if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
130     prot = PAGE_EXECUTE_READWRITE;
131     else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
132     prot = PAGE_EXECUTE_READ;
133     else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
134     prot = PAGE_READWRITE;
135     else if (prot_flags == VM_PAGE_READ)
136     prot = PAGE_READONLY;
137     else if (prot_flags == 0)
138     prot = PAGE_NOACCESS;
139     return prot;
140     }
141     #endif
142    
143 gbeauche 1.1 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
144    
145     int vm_init(void)
146     {
147     #ifdef HAVE_MMAP_VM
148     #ifndef zero_fd
149     zero_fd = open("/dev/zero", O_RDWR);
150     if (zero_fd < 0)
151     return -1;
152     #endif
153     #endif
154     return 0;
155     }
156    
157     /* Deallocate all internal data used to wrap virtual memory allocators. */
158    
159     void vm_exit(void)
160     {
161     #ifdef HAVE_MMAP_VM
162     #ifndef zero_fd
163 gbeauche 1.19 if (zero_fd != -1) {
164     close(zero_fd);
165     zero_fd = -1;
166     }
167 gbeauche 1.1 #endif
168     #endif
169     }
170    
171     /* Allocate zero-filled memory of SIZE bytes. The mapping is private
172     and default protection bits are read / write. The return value
173     is the actual mapping address chosen or VM_MAP_FAILED for errors. */
174    
175 gbeauche 1.10 void * vm_acquire(size_t size, int options)
176 gbeauche 1.1 {
177     void * addr;
178 gbeauche 1.10
179     // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
180     if (options & VM_MAP_FIXED)
181     return VM_MAP_FAILED;
182    
183 gbeauche 1.1 #ifdef HAVE_MACH_VM
184     // vm_allocate() returns a zero-filled memory region
185     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
186     return VM_MAP_FAILED;
187     #else
188     #ifdef HAVE_MMAP_VM
189 gbeauche 1.13 int fd = zero_fd;
190     int the_map_flags = translate_map_flags(options) | map_flags;
191    
192     if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
193 gbeauche 1.1 return VM_MAP_FAILED;
194    
195 gbeauche 1.10 // Sanity checks for 64-bit platforms
196     if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
197     return VM_MAP_FAILED;
198    
199 gbeauche 1.3 next_address = (char *)addr + size;
200 gbeauche 1.1 #else
201 gbeauche 1.14 #ifdef HAVE_WIN32_VM
202     if ((addr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE)) == NULL)
203     return VM_MAP_FAILED;
204     #else
205 gbeauche 1.1 if ((addr = calloc(size, 1)) == 0)
206     return VM_MAP_FAILED;
207    
208     // Omit changes for protections because they are not supported in this mode
209     return addr;
210     #endif
211     #endif
212 gbeauche 1.14 #endif
213 cebix 1.2
214 gbeauche 1.1 // Explicitely protect the newly mapped region here because on some systems,
215     // say MacOS X, mmap() doesn't honour the requested protection flags.
216     if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
217     return VM_MAP_FAILED;
218    
219     return addr;
220     }
221    
222     /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
223     Retuns 0 if successful, -1 on errors. */
224    
225 gbeauche 1.10 int vm_acquire_fixed(void * addr, size_t size, int options)
226 gbeauche 1.1 {
227 gbeauche 1.10 // Fixed mappings are required to be private
228     if (options & VM_MAP_SHARED)
229     return -1;
230    
231 gbeauche 1.1 #ifdef HAVE_MACH_VM
232     // vm_allocate() returns a zero-filled memory region
233     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
234     return -1;
235     #else
236     #ifdef HAVE_MMAP_VM
237 gbeauche 1.21 int fd = zero_fd;
238     int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED;
239 gbeauche 1.10
240 gbeauche 1.21 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
241 gbeauche 1.1 return -1;
242     #else
243 gbeauche 1.14 #ifdef HAVE_WIN32_VM
244     // Windows cannot allocate Low Memory
245     if (addr == NULL)
246     return -1;
247    
248     // Allocate a possibly offset region to align on 64K boundaries
249     LPVOID req_addr = align_addr_segment(addr);
250     DWORD req_size = align_size_segment(addr, size);
251     LPVOID ret_addr = VirtualAlloc(req_addr, req_size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
252     if (ret_addr != req_addr)
253     return -1;
254     #else
255 gbeauche 1.1 // Unsupported
256     return -1;
257     #endif
258     #endif
259 gbeauche 1.14 #endif
260 gbeauche 1.1
261     // Explicitely protect the newly mapped region here because on some systems,
262     // say MacOS X, mmap() doesn't honour the requested protection flags.
263 gbeauche 1.6 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
264 gbeauche 1.1 return -1;
265    
266     return 0;
267     }
268    
269     /* Deallocate any mapping for the region starting at ADDR and extending
270     LEN bytes. Returns 0 if successful, -1 on errors. */
271    
272     int vm_release(void * addr, size_t size)
273     {
274 gbeauche 1.3 // Safety check: don't try to release memory that was not allocated
275     if (addr == VM_MAP_FAILED)
276     return 0;
277    
278 gbeauche 1.1 #ifdef HAVE_MACH_VM
279 gbeauche 1.4 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
280     return -1;
281 gbeauche 1.1 #else
282     #ifdef HAVE_MMAP_VM
283 gbeauche 1.7 if (munmap((caddr_t)addr, size) != 0)
284 gbeauche 1.4 return -1;
285 gbeauche 1.1 #else
286 gbeauche 1.14 #ifdef HAVE_WIN32_VM
287     if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
288     return -1;
289     #else
290 gbeauche 1.1 free(addr);
291     #endif
292     #endif
293 gbeauche 1.14 #endif
294 gbeauche 1.4
295     return 0;
296 gbeauche 1.1 }
297    
298     /* Change the memory protection of the region starting at ADDR and
299     extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
300    
301     int vm_protect(void * addr, size_t size, int prot)
302     {
303     #ifdef HAVE_MACH_VM
304     int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
305     return ret_code == KERN_SUCCESS ? 0 : -1;
306     #else
307     #ifdef HAVE_MMAP_VM
308 gbeauche 1.7 int ret_code = mprotect((caddr_t)addr, size, prot);
309 gbeauche 1.1 return ret_code == 0 ? 0 : -1;
310     #else
311 gbeauche 1.14 #ifdef HAVE_WIN32_VM
312     DWORD old_prot;
313     int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
314     return ret_code != 0 ? 0 : -1;
315     #else
316 gbeauche 1.1 // Unsupported
317     return -1;
318     #endif
319     #endif
320 gbeauche 1.14 #endif
321 gbeauche 1.1 }
322    
323 gbeauche 1.15 /* Returns the size of a page. */
324    
325 gbeauche 1.16 int vm_get_page_size(void)
326 gbeauche 1.15 {
327 gbeauche 1.20 #ifdef HAVE_WIN32_VM
328     static unsigned long page_size = 0;
329     if (page_size == 0) {
330     SYSTEM_INFO si;
331     GetSystemInfo(&si);
332     page_size = si.dwAllocationGranularity;
333     }
334     return page_size;
335 gbeauche 1.15 #else
336 gbeauche 1.20 return getpagesize();
337 gbeauche 1.15 #endif
338     }
339    
340 gbeauche 1.1 #ifdef CONFIGURE_TEST_VM_MAP
341 gbeauche 1.18 #include <stdlib.h>
342     #include <signal.h>
343    
344     static void fault_handler(int sig)
345     {
346     exit(1);
347     }
348    
349 gbeauche 1.1 /* Tests covered here:
350     - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
351     - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
352     */
353     int main(void)
354     {
355     vm_init();
356 gbeauche 1.18
357     signal(SIGSEGV, fault_handler);
358     #ifdef SIGBUS
359     signal(SIGBUS, fault_handler);
360     #endif
361 gbeauche 1.1
362     #define page_align(address) ((char *)((unsigned long)(address) & -page_size))
363 gbeauche 1.16 unsigned long page_size = vm_get_page_size();
364 gbeauche 1.1
365     const int area_size = 6 * page_size;
366     volatile char * area = (volatile char *) vm_acquire(area_size);
367     volatile char * fault_address = area + (page_size * 7) / 2;
368    
369     #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
370     if (area == VM_MAP_FAILED)
371     return 1;
372    
373     if (vm_release((char *)area, area_size) < 0)
374     return 1;
375    
376     return 0;
377     #endif
378    
379     #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
380     if (area == VM_MAP_FAILED)
381     return 0;
382    
383     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
384     return 0;
385     #endif
386    
387     #if defined(TEST_VM_PROT_RDWR_WRITE)
388     if (area == VM_MAP_FAILED)
389     return 1;
390    
391     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
392     return 1;
393    
394     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
395     return 1;
396     #endif
397    
398     #if defined(TEST_VM_PROT_READ_WRITE)
399     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
400     return 0;
401     #endif
402    
403     #if defined(TEST_VM_PROT_NONE_READ)
404     // this should cause a core dump
405     char foo = *fault_address;
406     return 0;
407     #endif
408    
409     #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
410     // this should cause a core dump
411     *fault_address = 'z';
412     return 0;
413     #endif
414    
415     #if defined(TEST_VM_PROT_RDWR_WRITE)
416     // this should not cause a core dump
417     *fault_address = 'z';
418     return 0;
419     #endif
420     }
421     #endif