ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.31
Committed: 2008-01-20T00:38:52Z (16 years, 10 months ago) by gbeauche
Branch: MAIN
Changes since 1.30: +3 -0 lines
Log Message:
HP-UX does support 32-bit memory mappings.

File Contents

# Content
1 /*
2 * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3 * (supports mmap, vm_allocate or fallbacks to malloc)
4 *
5 * Basilisk II (C) 1997-2008 Christian Bauer
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #ifdef HAVE_FCNTL_H
27 #include <fcntl.h>
28 #endif
29
30 #ifdef HAVE_WIN32_VM
31 #define WIN32_LEAN_AND_MEAN /* avoid including junk */
32 #include <windows.h>
33 #endif
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <limits.h>
39 #include "vm_alloc.h"
40
41 #ifdef HAVE_MACH_VM
42 #ifndef HAVE_MACH_TASK_SELF
43 #ifdef HAVE_TASK_SELF
44 #define mach_task_self task_self
45 #else
46 #error "No task_self(), you lose."
47 #endif
48 #endif
49 #endif
50
51 #ifdef HAVE_WIN32_VM
52 /* Windows is either ILP32 or LLP64 */
53 typedef UINT_PTR vm_uintptr_t;
54 #else
55 /* Other systems are sane as they are either ILP32 or LP64 */
56 typedef unsigned long vm_uintptr_t;
57 #endif
58
59 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
60 because the emulated target is 32-bit and this helps to allocate
61 memory so that branches could be resolved more easily (32-bit
62 displacement to code in .text), on AMD64 for example. */
63 #if defined(__hpux)
64 #define MAP_32BIT MAP_ADDR32
65 #endif
66 #ifndef MAP_32BIT
67 #define MAP_32BIT 0
68 #endif
69 #ifndef MAP_ANON
70 #define MAP_ANON 0
71 #endif
72 #ifndef MAP_ANONYMOUS
73 #define MAP_ANONYMOUS 0
74 #endif
75
76 #define MAP_EXTRA_FLAGS (MAP_32BIT)
77
78 #ifdef HAVE_MMAP_VM
79 #if (defined(__linux__) && defined(__i386__)) || HAVE_LINKER_SCRIPT
80 /* Force a reasonnable address below 0x80000000 on x86 so that we
81 don't get addresses above when the program is run on AMD64.
82 NOTE: this is empirically determined on Linux/x86. */
83 #define MAP_BASE 0x10000000
84 #else
85 #define MAP_BASE 0x00000000
86 #endif
87 static char * next_address = (char *)MAP_BASE;
88 #ifdef HAVE_MMAP_ANON
89 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
90 #define zero_fd -1
91 #else
92 #ifdef HAVE_MMAP_ANONYMOUS
93 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
94 #define zero_fd -1
95 #else
96 #define map_flags (MAP_EXTRA_FLAGS)
97 static int zero_fd = -1;
98 #endif
99 #endif
100 #endif
101
102 /* Translate generic VM map flags to host values. */
103
104 #ifdef HAVE_MMAP_VM
105 static int translate_map_flags(int vm_flags)
106 {
107 int flags = 0;
108 if (vm_flags & VM_MAP_SHARED)
109 flags |= MAP_SHARED;
110 if (vm_flags & VM_MAP_PRIVATE)
111 flags |= MAP_PRIVATE;
112 if (vm_flags & VM_MAP_FIXED)
113 flags |= MAP_FIXED;
114 if (vm_flags & VM_MAP_32BIT)
115 flags |= MAP_32BIT;
116 return flags;
117 }
118 #endif
119
120 /* Align ADDR and SIZE to 64K boundaries. */
121
122 #ifdef HAVE_WIN32_VM
123 static inline LPVOID align_addr_segment(LPVOID addr)
124 {
125 return (LPVOID)(((vm_uintptr_t)addr) & -((vm_uintptr_t)65536));
126 }
127
128 static inline DWORD align_size_segment(LPVOID addr, DWORD size)
129 {
130 return size + ((vm_uintptr_t)addr - (vm_uintptr_t)align_addr_segment(addr));
131 }
132 #endif
133
134 /* Translate generic VM prot flags to host values. */
135
136 #ifdef HAVE_WIN32_VM
137 static int translate_prot_flags(int prot_flags)
138 {
139 int prot = PAGE_READWRITE;
140 if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
141 prot = PAGE_EXECUTE_READWRITE;
142 else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
143 prot = PAGE_EXECUTE_READ;
144 else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
145 prot = PAGE_READWRITE;
146 else if (prot_flags == VM_PAGE_READ)
147 prot = PAGE_READONLY;
148 else if (prot_flags == 0)
149 prot = PAGE_NOACCESS;
150 return prot;
151 }
152 #endif
153
154 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
155
156 int vm_init(void)
157 {
158 #ifdef HAVE_MMAP_VM
159 #ifndef zero_fd
160 zero_fd = open("/dev/zero", O_RDWR);
161 if (zero_fd < 0)
162 return -1;
163 #endif
164 #endif
165 return 0;
166 }
167
168 /* Deallocate all internal data used to wrap virtual memory allocators. */
169
170 void vm_exit(void)
171 {
172 #ifdef HAVE_MMAP_VM
173 #ifndef zero_fd
174 if (zero_fd != -1) {
175 close(zero_fd);
176 zero_fd = -1;
177 }
178 #endif
179 #endif
180 }
181
182 /* Allocate zero-filled memory of SIZE bytes. The mapping is private
183 and default protection bits are read / write. The return value
184 is the actual mapping address chosen or VM_MAP_FAILED for errors. */
185
186 void * vm_acquire(size_t size, int options)
187 {
188 void * addr;
189
190 // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
191 if (options & VM_MAP_FIXED)
192 return VM_MAP_FAILED;
193
194 #ifndef HAVE_VM_WRITE_WATCH
195 if (options & VM_MAP_WRITE_WATCH)
196 return VM_MAP_FAILED;
197 #endif
198
199 #ifdef HAVE_MACH_VM
200 // vm_allocate() returns a zero-filled memory region
201 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
202 return VM_MAP_FAILED;
203 #else
204 #ifdef HAVE_MMAP_VM
205 int fd = zero_fd;
206 int the_map_flags = translate_map_flags(options) | map_flags;
207
208 if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
209 return VM_MAP_FAILED;
210
211 // Sanity checks for 64-bit platforms
212 if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
213 return VM_MAP_FAILED;
214
215 next_address = (char *)addr + size;
216 #else
217 #ifdef HAVE_WIN32_VM
218 int alloc_type = MEM_RESERVE | MEM_COMMIT;
219 if (options & VM_MAP_WRITE_WATCH)
220 alloc_type |= MEM_WRITE_WATCH;
221
222 if ((addr = VirtualAlloc(NULL, size, alloc_type, PAGE_EXECUTE_READWRITE)) == NULL)
223 return VM_MAP_FAILED;
224 #else
225 if ((addr = calloc(size, 1)) == 0)
226 return VM_MAP_FAILED;
227
228 // Omit changes for protections because they are not supported in this mode
229 return addr;
230 #endif
231 #endif
232 #endif
233
234 // Explicitely protect the newly mapped region here because on some systems,
235 // say MacOS X, mmap() doesn't honour the requested protection flags.
236 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
237 return VM_MAP_FAILED;
238
239 return addr;
240 }
241
242 /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
243 Retuns 0 if successful, -1 on errors. */
244
245 int vm_acquire_fixed(void * addr, size_t size, int options)
246 {
247 // Fixed mappings are required to be private
248 if (options & VM_MAP_SHARED)
249 return -1;
250
251 #ifndef HAVE_VM_WRITE_WATCH
252 if (options & VM_MAP_WRITE_WATCH)
253 return -1;
254 #endif
255
256 #ifdef HAVE_MACH_VM
257 // vm_allocate() returns a zero-filled memory region
258 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
259 return -1;
260 #else
261 #ifdef HAVE_MMAP_VM
262 int fd = zero_fd;
263 int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED;
264
265 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
266 return -1;
267 #else
268 #ifdef HAVE_WIN32_VM
269 // Windows cannot allocate Low Memory
270 if (addr == NULL)
271 return -1;
272
273 int alloc_type = MEM_RESERVE | MEM_COMMIT;
274 if (options & VM_MAP_WRITE_WATCH)
275 alloc_type |= MEM_WRITE_WATCH;
276
277 // Allocate a possibly offset region to align on 64K boundaries
278 LPVOID req_addr = align_addr_segment(addr);
279 DWORD req_size = align_size_segment(addr, size);
280 LPVOID ret_addr = VirtualAlloc(req_addr, req_size, alloc_type, PAGE_EXECUTE_READWRITE);
281 if (ret_addr != req_addr)
282 return -1;
283 #else
284 // Unsupported
285 return -1;
286 #endif
287 #endif
288 #endif
289
290 // Explicitely protect the newly mapped region here because on some systems,
291 // say MacOS X, mmap() doesn't honour the requested protection flags.
292 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
293 return -1;
294
295 return 0;
296 }
297
298 /* Deallocate any mapping for the region starting at ADDR and extending
299 LEN bytes. Returns 0 if successful, -1 on errors. */
300
301 int vm_release(void * addr, size_t size)
302 {
303 // Safety check: don't try to release memory that was not allocated
304 if (addr == VM_MAP_FAILED)
305 return 0;
306
307 #ifdef HAVE_MACH_VM
308 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
309 return -1;
310 #else
311 #ifdef HAVE_MMAP_VM
312 if (munmap((caddr_t)addr, size) != 0)
313 return -1;
314 #else
315 #ifdef HAVE_WIN32_VM
316 if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
317 return -1;
318 #else
319 free(addr);
320 #endif
321 #endif
322 #endif
323
324 return 0;
325 }
326
327 /* Change the memory protection of the region starting at ADDR and
328 extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
329
330 int vm_protect(void * addr, size_t size, int prot)
331 {
332 #ifdef HAVE_MACH_VM
333 int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
334 return ret_code == KERN_SUCCESS ? 0 : -1;
335 #else
336 #ifdef HAVE_MMAP_VM
337 int ret_code = mprotect((caddr_t)addr, size, prot);
338 return ret_code == 0 ? 0 : -1;
339 #else
340 #ifdef HAVE_WIN32_VM
341 DWORD old_prot;
342 int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
343 return ret_code != 0 ? 0 : -1;
344 #else
345 // Unsupported
346 return -1;
347 #endif
348 #endif
349 #endif
350 }
351
352 /* Return the addresses of the pages that got modified in the
353 specified range [ ADDR, ADDR + SIZE [ since the last reset of the watch
354 bits. Returns 0 if successful, -1 for errors. */
355
356 int vm_get_write_watch(void * addr, size_t size,
357 void ** pages, unsigned int * n_pages,
358 int options)
359 {
360 #ifdef HAVE_VM_WRITE_WATCH
361 #ifdef HAVE_WIN32_VM
362 DWORD flags = 0;
363 if (options & VM_WRITE_WATCH_RESET)
364 flags |= WRITE_WATCH_FLAG_RESET;
365
366 ULONG page_size;
367 ULONG_PTR count = *n_pages;
368 int ret_code = GetWriteWatch(flags, addr, size, pages, &count, &page_size);
369 if (ret_code != 0)
370 return -1;
371
372 *n_pages = count;
373 return 0;
374 #endif
375 #endif
376 // Unsupported
377 return -1;
378 }
379
380 /* Reset the write-tracking state for the specified range [ ADDR, ADDR
381 + SIZE [. Returns 0 if successful, -1 for errors. */
382
383 int vm_reset_write_watch(void * addr, size_t size)
384 {
385 #ifdef HAVE_VM_WRITE_WATCH
386 #ifdef HAVE_WIN32_VM
387 int ret_code = ResetWriteWatch(addr, size);
388 return ret_code == 0 ? 0 : -1;
389 #endif
390 #endif
391 // Unsupported
392 return -1;
393 }
394
395 /* Returns the size of a page. */
396
397 int vm_get_page_size(void)
398 {
399 #ifdef HAVE_WIN32_VM
400 static vm_uintptr_t page_size = 0;
401 if (page_size == 0) {
402 SYSTEM_INFO si;
403 GetSystemInfo(&si);
404 page_size = si.dwAllocationGranularity;
405 }
406 return page_size;
407 #else
408 return getpagesize();
409 #endif
410 }
411
412 #ifdef CONFIGURE_TEST_VM_WRITE_WATCH
413 int main(void)
414 {
415 int i, j;
416
417 vm_init();
418
419 vm_uintptr_t page_size = vm_get_page_size();
420
421 char *area;
422 const int n_pages = 7;
423 const int area_size = n_pages * page_size;
424 const int map_options = VM_MAP_DEFAULT | VM_MAP_WRITE_WATCH;
425 if ((area = (char *)vm_acquire(area_size, map_options)) == VM_MAP_FAILED)
426 return 1;
427
428 unsigned int n_modified_pages_expected = 0;
429 static const int touch_page[n_pages] = { 0, 1, 1, 0, 1, 0, 1 };
430 for (i = 0; i < n_pages; i++) {
431 if (touch_page[i]) {
432 area[i * page_size] = 1;
433 ++n_modified_pages_expected;
434 }
435 }
436
437 char *modified_pages[n_pages];
438 unsigned int n_modified_pages = n_pages;
439 if (vm_get_write_watch(area, area_size, (void **)modified_pages, &n_modified_pages) < 0)
440 return 2;
441 if (n_modified_pages != n_modified_pages_expected)
442 return 3;
443 for (i = 0, j = 0; i < n_pages; i++) {
444 char v = area[i * page_size];
445 if ((touch_page[i] && !v) || (!touch_page[i] && v))
446 return 4;
447 if (!touch_page[i])
448 continue;
449 if (modified_pages[j] != (area + i * page_size))
450 return 5;
451 ++j;
452 }
453
454 vm_release(area, area_size);
455 return 0;
456 }
457 #endif
458
459 #ifdef CONFIGURE_TEST_VM_MAP
460 #include <stdlib.h>
461 #include <signal.h>
462
463 static void fault_handler(int sig)
464 {
465 exit(1);
466 }
467
468 /* Tests covered here:
469 - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
470 - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
471 */
472 int main(void)
473 {
474 vm_init();
475
476 signal(SIGSEGV, fault_handler);
477 #ifdef SIGBUS
478 signal(SIGBUS, fault_handler);
479 #endif
480
481 #define page_align(address) ((char *)((vm_uintptr_t)(address) & -page_size))
482 vm_uintptr_t page_size = vm_get_page_size();
483
484 const int area_size = 6 * page_size;
485 volatile char * area = (volatile char *) vm_acquire(area_size);
486 volatile char * fault_address = area + (page_size * 7) / 2;
487
488 #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
489 if (area == VM_MAP_FAILED)
490 return 1;
491
492 if (vm_release((char *)area, area_size) < 0)
493 return 1;
494
495 return 0;
496 #endif
497
498 #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
499 if (area == VM_MAP_FAILED)
500 return 0;
501
502 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
503 return 0;
504 #endif
505
506 #if defined(TEST_VM_PROT_RDWR_WRITE)
507 if (area == VM_MAP_FAILED)
508 return 1;
509
510 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
511 return 1;
512
513 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
514 return 1;
515 #endif
516
517 #if defined(TEST_VM_PROT_READ_WRITE)
518 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
519 return 0;
520 #endif
521
522 #if defined(TEST_VM_PROT_NONE_READ)
523 // this should cause a core dump
524 char foo = *fault_address;
525 return 0;
526 #endif
527
528 #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
529 // this should cause a core dump
530 *fault_address = 'z';
531 return 0;
532 #endif
533
534 #if defined(TEST_VM_PROT_RDWR_WRITE)
535 // this should not cause a core dump
536 *fault_address = 'z';
537 return 0;
538 #endif
539 }
540 #endif