ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.30
Committed: 2008-01-17T23:19:01Z (16 years, 10 months ago) by gbeauche
Branch: MAIN
Changes since 1.29: +1 -1 lines
Log Message:
Fix build on 64-bit Vista.

File Contents

# Content
1 /*
2 * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3 * (supports mmap, vm_allocate or fallbacks to malloc)
4 *
5 * Basilisk II (C) 1997-2008 Christian Bauer
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #ifdef HAVE_FCNTL_H
27 #include <fcntl.h>
28 #endif
29
30 #ifdef HAVE_WIN32_VM
31 #define WIN32_LEAN_AND_MEAN /* avoid including junk */
32 #include <windows.h>
33 #endif
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <limits.h>
39 #include "vm_alloc.h"
40
41 #ifdef HAVE_MACH_VM
42 #ifndef HAVE_MACH_TASK_SELF
43 #ifdef HAVE_TASK_SELF
44 #define mach_task_self task_self
45 #else
46 #error "No task_self(), you lose."
47 #endif
48 #endif
49 #endif
50
51 #ifdef HAVE_WIN32_VM
52 /* Windows is either ILP32 or LLP64 */
53 typedef UINT_PTR vm_uintptr_t;
54 #else
55 /* Other systems are sane as they are either ILP32 or LP64 */
56 typedef unsigned long vm_uintptr_t;
57 #endif
58
59 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
60 because the emulated target is 32-bit and this helps to allocate
61 memory so that branches could be resolved more easily (32-bit
62 displacement to code in .text), on AMD64 for example. */
63 #ifndef MAP_32BIT
64 #define MAP_32BIT 0
65 #endif
66 #ifndef MAP_ANON
67 #define MAP_ANON 0
68 #endif
69 #ifndef MAP_ANONYMOUS
70 #define MAP_ANONYMOUS 0
71 #endif
72
73 #define MAP_EXTRA_FLAGS (MAP_32BIT)
74
75 #ifdef HAVE_MMAP_VM
76 #if (defined(__linux__) && defined(__i386__)) || HAVE_LINKER_SCRIPT
77 /* Force a reasonnable address below 0x80000000 on x86 so that we
78 don't get addresses above when the program is run on AMD64.
79 NOTE: this is empirically determined on Linux/x86. */
80 #define MAP_BASE 0x10000000
81 #else
82 #define MAP_BASE 0x00000000
83 #endif
84 static char * next_address = (char *)MAP_BASE;
85 #ifdef HAVE_MMAP_ANON
86 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
87 #define zero_fd -1
88 #else
89 #ifdef HAVE_MMAP_ANONYMOUS
90 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
91 #define zero_fd -1
92 #else
93 #define map_flags (MAP_EXTRA_FLAGS)
94 static int zero_fd = -1;
95 #endif
96 #endif
97 #endif
98
99 /* Translate generic VM map flags to host values. */
100
101 #ifdef HAVE_MMAP_VM
102 static int translate_map_flags(int vm_flags)
103 {
104 int flags = 0;
105 if (vm_flags & VM_MAP_SHARED)
106 flags |= MAP_SHARED;
107 if (vm_flags & VM_MAP_PRIVATE)
108 flags |= MAP_PRIVATE;
109 if (vm_flags & VM_MAP_FIXED)
110 flags |= MAP_FIXED;
111 if (vm_flags & VM_MAP_32BIT)
112 flags |= MAP_32BIT;
113 return flags;
114 }
115 #endif
116
117 /* Align ADDR and SIZE to 64K boundaries. */
118
119 #ifdef HAVE_WIN32_VM
120 static inline LPVOID align_addr_segment(LPVOID addr)
121 {
122 return (LPVOID)(((vm_uintptr_t)addr) & -((vm_uintptr_t)65536));
123 }
124
125 static inline DWORD align_size_segment(LPVOID addr, DWORD size)
126 {
127 return size + ((vm_uintptr_t)addr - (vm_uintptr_t)align_addr_segment(addr));
128 }
129 #endif
130
131 /* Translate generic VM prot flags to host values. */
132
133 #ifdef HAVE_WIN32_VM
134 static int translate_prot_flags(int prot_flags)
135 {
136 int prot = PAGE_READWRITE;
137 if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
138 prot = PAGE_EXECUTE_READWRITE;
139 else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
140 prot = PAGE_EXECUTE_READ;
141 else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
142 prot = PAGE_READWRITE;
143 else if (prot_flags == VM_PAGE_READ)
144 prot = PAGE_READONLY;
145 else if (prot_flags == 0)
146 prot = PAGE_NOACCESS;
147 return prot;
148 }
149 #endif
150
151 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
152
153 int vm_init(void)
154 {
155 #ifdef HAVE_MMAP_VM
156 #ifndef zero_fd
157 zero_fd = open("/dev/zero", O_RDWR);
158 if (zero_fd < 0)
159 return -1;
160 #endif
161 #endif
162 return 0;
163 }
164
165 /* Deallocate all internal data used to wrap virtual memory allocators. */
166
167 void vm_exit(void)
168 {
169 #ifdef HAVE_MMAP_VM
170 #ifndef zero_fd
171 if (zero_fd != -1) {
172 close(zero_fd);
173 zero_fd = -1;
174 }
175 #endif
176 #endif
177 }
178
179 /* Allocate zero-filled memory of SIZE bytes. The mapping is private
180 and default protection bits are read / write. The return value
181 is the actual mapping address chosen or VM_MAP_FAILED for errors. */
182
183 void * vm_acquire(size_t size, int options)
184 {
185 void * addr;
186
187 // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
188 if (options & VM_MAP_FIXED)
189 return VM_MAP_FAILED;
190
191 #ifndef HAVE_VM_WRITE_WATCH
192 if (options & VM_MAP_WRITE_WATCH)
193 return VM_MAP_FAILED;
194 #endif
195
196 #ifdef HAVE_MACH_VM
197 // vm_allocate() returns a zero-filled memory region
198 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
199 return VM_MAP_FAILED;
200 #else
201 #ifdef HAVE_MMAP_VM
202 int fd = zero_fd;
203 int the_map_flags = translate_map_flags(options) | map_flags;
204
205 if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
206 return VM_MAP_FAILED;
207
208 // Sanity checks for 64-bit platforms
209 if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
210 return VM_MAP_FAILED;
211
212 next_address = (char *)addr + size;
213 #else
214 #ifdef HAVE_WIN32_VM
215 int alloc_type = MEM_RESERVE | MEM_COMMIT;
216 if (options & VM_MAP_WRITE_WATCH)
217 alloc_type |= MEM_WRITE_WATCH;
218
219 if ((addr = VirtualAlloc(NULL, size, alloc_type, PAGE_EXECUTE_READWRITE)) == NULL)
220 return VM_MAP_FAILED;
221 #else
222 if ((addr = calloc(size, 1)) == 0)
223 return VM_MAP_FAILED;
224
225 // Omit changes for protections because they are not supported in this mode
226 return addr;
227 #endif
228 #endif
229 #endif
230
231 // Explicitely protect the newly mapped region here because on some systems,
232 // say MacOS X, mmap() doesn't honour the requested protection flags.
233 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
234 return VM_MAP_FAILED;
235
236 return addr;
237 }
238
239 /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
240 Retuns 0 if successful, -1 on errors. */
241
242 int vm_acquire_fixed(void * addr, size_t size, int options)
243 {
244 // Fixed mappings are required to be private
245 if (options & VM_MAP_SHARED)
246 return -1;
247
248 #ifndef HAVE_VM_WRITE_WATCH
249 if (options & VM_MAP_WRITE_WATCH)
250 return -1;
251 #endif
252
253 #ifdef HAVE_MACH_VM
254 // vm_allocate() returns a zero-filled memory region
255 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
256 return -1;
257 #else
258 #ifdef HAVE_MMAP_VM
259 int fd = zero_fd;
260 int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED;
261
262 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
263 return -1;
264 #else
265 #ifdef HAVE_WIN32_VM
266 // Windows cannot allocate Low Memory
267 if (addr == NULL)
268 return -1;
269
270 int alloc_type = MEM_RESERVE | MEM_COMMIT;
271 if (options & VM_MAP_WRITE_WATCH)
272 alloc_type |= MEM_WRITE_WATCH;
273
274 // Allocate a possibly offset region to align on 64K boundaries
275 LPVOID req_addr = align_addr_segment(addr);
276 DWORD req_size = align_size_segment(addr, size);
277 LPVOID ret_addr = VirtualAlloc(req_addr, req_size, alloc_type, PAGE_EXECUTE_READWRITE);
278 if (ret_addr != req_addr)
279 return -1;
280 #else
281 // Unsupported
282 return -1;
283 #endif
284 #endif
285 #endif
286
287 // Explicitely protect the newly mapped region here because on some systems,
288 // say MacOS X, mmap() doesn't honour the requested protection flags.
289 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
290 return -1;
291
292 return 0;
293 }
294
295 /* Deallocate any mapping for the region starting at ADDR and extending
296 LEN bytes. Returns 0 if successful, -1 on errors. */
297
298 int vm_release(void * addr, size_t size)
299 {
300 // Safety check: don't try to release memory that was not allocated
301 if (addr == VM_MAP_FAILED)
302 return 0;
303
304 #ifdef HAVE_MACH_VM
305 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
306 return -1;
307 #else
308 #ifdef HAVE_MMAP_VM
309 if (munmap((caddr_t)addr, size) != 0)
310 return -1;
311 #else
312 #ifdef HAVE_WIN32_VM
313 if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
314 return -1;
315 #else
316 free(addr);
317 #endif
318 #endif
319 #endif
320
321 return 0;
322 }
323
324 /* Change the memory protection of the region starting at ADDR and
325 extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
326
327 int vm_protect(void * addr, size_t size, int prot)
328 {
329 #ifdef HAVE_MACH_VM
330 int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
331 return ret_code == KERN_SUCCESS ? 0 : -1;
332 #else
333 #ifdef HAVE_MMAP_VM
334 int ret_code = mprotect((caddr_t)addr, size, prot);
335 return ret_code == 0 ? 0 : -1;
336 #else
337 #ifdef HAVE_WIN32_VM
338 DWORD old_prot;
339 int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
340 return ret_code != 0 ? 0 : -1;
341 #else
342 // Unsupported
343 return -1;
344 #endif
345 #endif
346 #endif
347 }
348
349 /* Return the addresses of the pages that got modified in the
350 specified range [ ADDR, ADDR + SIZE [ since the last reset of the watch
351 bits. Returns 0 if successful, -1 for errors. */
352
353 int vm_get_write_watch(void * addr, size_t size,
354 void ** pages, unsigned int * n_pages,
355 int options)
356 {
357 #ifdef HAVE_VM_WRITE_WATCH
358 #ifdef HAVE_WIN32_VM
359 DWORD flags = 0;
360 if (options & VM_WRITE_WATCH_RESET)
361 flags |= WRITE_WATCH_FLAG_RESET;
362
363 ULONG page_size;
364 ULONG_PTR count = *n_pages;
365 int ret_code = GetWriteWatch(flags, addr, size, pages, &count, &page_size);
366 if (ret_code != 0)
367 return -1;
368
369 *n_pages = count;
370 return 0;
371 #endif
372 #endif
373 // Unsupported
374 return -1;
375 }
376
377 /* Reset the write-tracking state for the specified range [ ADDR, ADDR
378 + SIZE [. Returns 0 if successful, -1 for errors. */
379
380 int vm_reset_write_watch(void * addr, size_t size)
381 {
382 #ifdef HAVE_VM_WRITE_WATCH
383 #ifdef HAVE_WIN32_VM
384 int ret_code = ResetWriteWatch(addr, size);
385 return ret_code == 0 ? 0 : -1;
386 #endif
387 #endif
388 // Unsupported
389 return -1;
390 }
391
392 /* Returns the size of a page. */
393
394 int vm_get_page_size(void)
395 {
396 #ifdef HAVE_WIN32_VM
397 static vm_uintptr_t page_size = 0;
398 if (page_size == 0) {
399 SYSTEM_INFO si;
400 GetSystemInfo(&si);
401 page_size = si.dwAllocationGranularity;
402 }
403 return page_size;
404 #else
405 return getpagesize();
406 #endif
407 }
408
409 #ifdef CONFIGURE_TEST_VM_WRITE_WATCH
410 int main(void)
411 {
412 int i, j;
413
414 vm_init();
415
416 vm_uintptr_t page_size = vm_get_page_size();
417
418 char *area;
419 const int n_pages = 7;
420 const int area_size = n_pages * page_size;
421 const int map_options = VM_MAP_DEFAULT | VM_MAP_WRITE_WATCH;
422 if ((area = (char *)vm_acquire(area_size, map_options)) == VM_MAP_FAILED)
423 return 1;
424
425 unsigned int n_modified_pages_expected = 0;
426 static const int touch_page[n_pages] = { 0, 1, 1, 0, 1, 0, 1 };
427 for (i = 0; i < n_pages; i++) {
428 if (touch_page[i]) {
429 area[i * page_size] = 1;
430 ++n_modified_pages_expected;
431 }
432 }
433
434 char *modified_pages[n_pages];
435 unsigned int n_modified_pages = n_pages;
436 if (vm_get_write_watch(area, area_size, (void **)modified_pages, &n_modified_pages) < 0)
437 return 2;
438 if (n_modified_pages != n_modified_pages_expected)
439 return 3;
440 for (i = 0, j = 0; i < n_pages; i++) {
441 char v = area[i * page_size];
442 if ((touch_page[i] && !v) || (!touch_page[i] && v))
443 return 4;
444 if (!touch_page[i])
445 continue;
446 if (modified_pages[j] != (area + i * page_size))
447 return 5;
448 ++j;
449 }
450
451 vm_release(area, area_size);
452 return 0;
453 }
454 #endif
455
456 #ifdef CONFIGURE_TEST_VM_MAP
457 #include <stdlib.h>
458 #include <signal.h>
459
460 static void fault_handler(int sig)
461 {
462 exit(1);
463 }
464
465 /* Tests covered here:
466 - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
467 - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
468 */
469 int main(void)
470 {
471 vm_init();
472
473 signal(SIGSEGV, fault_handler);
474 #ifdef SIGBUS
475 signal(SIGBUS, fault_handler);
476 #endif
477
478 #define page_align(address) ((char *)((vm_uintptr_t)(address) & -page_size))
479 vm_uintptr_t page_size = vm_get_page_size();
480
481 const int area_size = 6 * page_size;
482 volatile char * area = (volatile char *) vm_acquire(area_size);
483 volatile char * fault_address = area + (page_size * 7) / 2;
484
485 #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
486 if (area == VM_MAP_FAILED)
487 return 1;
488
489 if (vm_release((char *)area, area_size) < 0)
490 return 1;
491
492 return 0;
493 #endif
494
495 #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
496 if (area == VM_MAP_FAILED)
497 return 0;
498
499 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
500 return 0;
501 #endif
502
503 #if defined(TEST_VM_PROT_RDWR_WRITE)
504 if (area == VM_MAP_FAILED)
505 return 1;
506
507 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
508 return 1;
509
510 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
511 return 1;
512 #endif
513
514 #if defined(TEST_VM_PROT_READ_WRITE)
515 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
516 return 0;
517 #endif
518
519 #if defined(TEST_VM_PROT_NONE_READ)
520 // this should cause a core dump
521 char foo = *fault_address;
522 return 0;
523 #endif
524
525 #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
526 // this should cause a core dump
527 *fault_address = 'z';
528 return 0;
529 #endif
530
531 #if defined(TEST_VM_PROT_RDWR_WRITE)
532 // this should not cause a core dump
533 *fault_address = 'z';
534 return 0;
535 #endif
536 }
537 #endif