ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.32
Committed: 2009-08-11T07:43:46Z (15 years, 3 months ago) by asvitkine
Branch: MAIN
Changes since 1.31: +31 -2 lines
Log Message:
[Michael Schmitt]
SheepShaver includes the C errno string in many error messages. One case is when it calls the memory allocation routines in the Basilisk II vm_alloc.cpp program.

This works when the memory allocation routine uses functions that set errno (such as mmap or malloc). For example, running SheepShaver on a Linux hosts produces meaningful error messages.

The problem is that when run on an OS X host, the memory allocation uses Mach routines such as vm_allocate, which do not set errno.

So when SheepShaver reported the error, it used a stale value of errno, which happened to be 17. The result was an extremely misleading error message: "Cannot map RAM: File already exists".

The fix is to change vm_alloc so that it translates Mac return codes into POSIX errno values.

It also initializes errno to 0 at the start of the memory allocation routine, so that no matter what path it takes, it won't return a stale value.

File Contents

# Content
1 /*
2 * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3 * (supports mmap, vm_allocate or fallbacks to malloc)
4 *
5 * Basilisk II (C) 1997-2008 Christian Bauer
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #ifdef HAVE_FCNTL_H
27 #include <fcntl.h>
28 #endif
29
30 #ifdef HAVE_WIN32_VM
31 #define WIN32_LEAN_AND_MEAN /* avoid including junk */
32 #include <windows.h>
33 #endif
34
35 #include <errno.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <limits.h>
40 #include "vm_alloc.h"
41
42 #ifdef HAVE_MACH_VM
43 #ifndef HAVE_MACH_TASK_SELF
44 #ifdef HAVE_TASK_SELF
45 #define mach_task_self task_self
46 #else
47 #error "No task_self(), you lose."
48 #endif
49 #endif
50 #endif
51
52 #ifdef HAVE_WIN32_VM
53 /* Windows is either ILP32 or LLP64 */
54 typedef UINT_PTR vm_uintptr_t;
55 #else
56 /* Other systems are sane as they are either ILP32 or LP64 */
57 typedef unsigned long vm_uintptr_t;
58 #endif
59
60 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
61 because the emulated target is 32-bit and this helps to allocate
62 memory so that branches could be resolved more easily (32-bit
63 displacement to code in .text), on AMD64 for example. */
64 #if defined(__hpux)
65 #define MAP_32BIT MAP_ADDR32
66 #endif
67 #ifndef MAP_32BIT
68 #define MAP_32BIT 0
69 #endif
70 #ifndef MAP_ANON
71 #define MAP_ANON 0
72 #endif
73 #ifndef MAP_ANONYMOUS
74 #define MAP_ANONYMOUS 0
75 #endif
76
77 #define MAP_EXTRA_FLAGS (MAP_32BIT)
78
79 #ifdef HAVE_MMAP_VM
80 #if (defined(__linux__) && defined(__i386__)) || HAVE_LINKER_SCRIPT
81 /* Force a reasonnable address below 0x80000000 on x86 so that we
82 don't get addresses above when the program is run on AMD64.
83 NOTE: this is empirically determined on Linux/x86. */
84 #define MAP_BASE 0x10000000
85 #else
86 #define MAP_BASE 0x00000000
87 #endif
88 static char * next_address = (char *)MAP_BASE;
89 #ifdef HAVE_MMAP_ANON
90 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
91 #define zero_fd -1
92 #else
93 #ifdef HAVE_MMAP_ANONYMOUS
94 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
95 #define zero_fd -1
96 #else
97 #define map_flags (MAP_EXTRA_FLAGS)
98 static int zero_fd = -1;
99 #endif
100 #endif
101 #endif
102
103 /* Translate generic VM map flags to host values. */
104
105 #ifdef HAVE_MMAP_VM
106 static int translate_map_flags(int vm_flags)
107 {
108 int flags = 0;
109 if (vm_flags & VM_MAP_SHARED)
110 flags |= MAP_SHARED;
111 if (vm_flags & VM_MAP_PRIVATE)
112 flags |= MAP_PRIVATE;
113 if (vm_flags & VM_MAP_FIXED)
114 flags |= MAP_FIXED;
115 if (vm_flags & VM_MAP_32BIT)
116 flags |= MAP_32BIT;
117 return flags;
118 }
119 #endif
120
121 /* Align ADDR and SIZE to 64K boundaries. */
122
123 #ifdef HAVE_WIN32_VM
124 static inline LPVOID align_addr_segment(LPVOID addr)
125 {
126 return (LPVOID)(((vm_uintptr_t)addr) & -((vm_uintptr_t)65536));
127 }
128
129 static inline DWORD align_size_segment(LPVOID addr, DWORD size)
130 {
131 return size + ((vm_uintptr_t)addr - (vm_uintptr_t)align_addr_segment(addr));
132 }
133 #endif
134
135 /* Translate generic VM prot flags to host values. */
136
137 #ifdef HAVE_WIN32_VM
138 static int translate_prot_flags(int prot_flags)
139 {
140 int prot = PAGE_READWRITE;
141 if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
142 prot = PAGE_EXECUTE_READWRITE;
143 else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
144 prot = PAGE_EXECUTE_READ;
145 else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
146 prot = PAGE_READWRITE;
147 else if (prot_flags == VM_PAGE_READ)
148 prot = PAGE_READONLY;
149 else if (prot_flags == 0)
150 prot = PAGE_NOACCESS;
151 return prot;
152 }
153 #endif
154
155 /* Translate Mach return codes to POSIX errno values. */
156 #ifdef HAVE_MACH_VM
157 static int vm_error(kern_return_t ret_code)
158 {
159 switch (ret_code) {
160 case KERN_SUCCESS:
161 return 0;
162 case KERN_INVALID_ADDRESS:
163 case KERN_NO_SPACE:
164 return ENOMEM;
165 case KERN_PROTECTION_FAILURE:
166 return EACCES;
167 default:
168 return EINVAL;
169 }
170 }
171 #endif
172
173 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
174
175 int vm_init(void)
176 {
177 #ifdef HAVE_MMAP_VM
178 #ifndef zero_fd
179 zero_fd = open("/dev/zero", O_RDWR);
180 if (zero_fd < 0)
181 return -1;
182 #endif
183 #endif
184 return 0;
185 }
186
187 /* Deallocate all internal data used to wrap virtual memory allocators. */
188
189 void vm_exit(void)
190 {
191 #ifdef HAVE_MMAP_VM
192 #ifndef zero_fd
193 if (zero_fd != -1) {
194 close(zero_fd);
195 zero_fd = -1;
196 }
197 #endif
198 #endif
199 }
200
201 /* Allocate zero-filled memory of SIZE bytes. The mapping is private
202 and default protection bits are read / write. The return value
203 is the actual mapping address chosen or VM_MAP_FAILED for errors. */
204
205 void * vm_acquire(size_t size, int options)
206 {
207 void * addr;
208
209 errno = 0;
210
211 // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
212 if (options & VM_MAP_FIXED)
213 return VM_MAP_FAILED;
214
215 #ifndef HAVE_VM_WRITE_WATCH
216 if (options & VM_MAP_WRITE_WATCH)
217 return VM_MAP_FAILED;
218 #endif
219
220 #ifdef HAVE_MACH_VM
221 // vm_allocate() returns a zero-filled memory region
222 kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE);
223 if (ret_code != KERN_SUCCESS) {
224 errno = vm_error(ret_code);
225 return VM_MAP_FAILED;
226 }
227 #else
228 #ifdef HAVE_MMAP_VM
229 int fd = zero_fd;
230 int the_map_flags = translate_map_flags(options) | map_flags;
231
232 if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
233 return VM_MAP_FAILED;
234
235 // Sanity checks for 64-bit platforms
236 if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
237 return VM_MAP_FAILED;
238
239 next_address = (char *)addr + size;
240 #else
241 #ifdef HAVE_WIN32_VM
242 int alloc_type = MEM_RESERVE | MEM_COMMIT;
243 if (options & VM_MAP_WRITE_WATCH)
244 alloc_type |= MEM_WRITE_WATCH;
245
246 if ((addr = VirtualAlloc(NULL, size, alloc_type, PAGE_EXECUTE_READWRITE)) == NULL)
247 return VM_MAP_FAILED;
248 #else
249 if ((addr = calloc(size, 1)) == 0)
250 return VM_MAP_FAILED;
251
252 // Omit changes for protections because they are not supported in this mode
253 return addr;
254 #endif
255 #endif
256 #endif
257
258 // Explicitely protect the newly mapped region here because on some systems,
259 // say MacOS X, mmap() doesn't honour the requested protection flags.
260 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
261 return VM_MAP_FAILED;
262
263 return addr;
264 }
265
266 /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
267 Retuns 0 if successful, -1 on errors. */
268
269 int vm_acquire_fixed(void * addr, size_t size, int options)
270 {
271 errno = 0;
272
273 // Fixed mappings are required to be private
274 if (options & VM_MAP_SHARED)
275 return -1;
276
277 #ifndef HAVE_VM_WRITE_WATCH
278 if (options & VM_MAP_WRITE_WATCH)
279 return -1;
280 #endif
281
282 #ifdef HAVE_MACH_VM
283 // vm_allocate() returns a zero-filled memory region
284 kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0);
285 if (ret_code != KERN_SUCCESS) {
286 errno = vm_error(ret_code);
287 return -1;
288 }
289 #else
290 #ifdef HAVE_MMAP_VM
291 int fd = zero_fd;
292 int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED;
293
294 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED)
295 return -1;
296 #else
297 #ifdef HAVE_WIN32_VM
298 // Windows cannot allocate Low Memory
299 if (addr == NULL)
300 return -1;
301
302 int alloc_type = MEM_RESERVE | MEM_COMMIT;
303 if (options & VM_MAP_WRITE_WATCH)
304 alloc_type |= MEM_WRITE_WATCH;
305
306 // Allocate a possibly offset region to align on 64K boundaries
307 LPVOID req_addr = align_addr_segment(addr);
308 DWORD req_size = align_size_segment(addr, size);
309 LPVOID ret_addr = VirtualAlloc(req_addr, req_size, alloc_type, PAGE_EXECUTE_READWRITE);
310 if (ret_addr != req_addr)
311 return -1;
312 #else
313 // Unsupported
314 return -1;
315 #endif
316 #endif
317 #endif
318
319 // Explicitely protect the newly mapped region here because on some systems,
320 // say MacOS X, mmap() doesn't honour the requested protection flags.
321 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
322 return -1;
323
324 return 0;
325 }
326
327 /* Deallocate any mapping for the region starting at ADDR and extending
328 LEN bytes. Returns 0 if successful, -1 on errors. */
329
330 int vm_release(void * addr, size_t size)
331 {
332 // Safety check: don't try to release memory that was not allocated
333 if (addr == VM_MAP_FAILED)
334 return 0;
335
336 #ifdef HAVE_MACH_VM
337 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
338 return -1;
339 #else
340 #ifdef HAVE_MMAP_VM
341 if (munmap((caddr_t)addr, size) != 0)
342 return -1;
343 #else
344 #ifdef HAVE_WIN32_VM
345 if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
346 return -1;
347 #else
348 free(addr);
349 #endif
350 #endif
351 #endif
352
353 return 0;
354 }
355
356 /* Change the memory protection of the region starting at ADDR and
357 extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
358
359 int vm_protect(void * addr, size_t size, int prot)
360 {
361 #ifdef HAVE_MACH_VM
362 int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
363 return ret_code == KERN_SUCCESS ? 0 : -1;
364 #else
365 #ifdef HAVE_MMAP_VM
366 int ret_code = mprotect((caddr_t)addr, size, prot);
367 return ret_code == 0 ? 0 : -1;
368 #else
369 #ifdef HAVE_WIN32_VM
370 DWORD old_prot;
371 int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
372 return ret_code != 0 ? 0 : -1;
373 #else
374 // Unsupported
375 return -1;
376 #endif
377 #endif
378 #endif
379 }
380
381 /* Return the addresses of the pages that got modified in the
382 specified range [ ADDR, ADDR + SIZE [ since the last reset of the watch
383 bits. Returns 0 if successful, -1 for errors. */
384
385 int vm_get_write_watch(void * addr, size_t size,
386 void ** pages, unsigned int * n_pages,
387 int options)
388 {
389 #ifdef HAVE_VM_WRITE_WATCH
390 #ifdef HAVE_WIN32_VM
391 DWORD flags = 0;
392 if (options & VM_WRITE_WATCH_RESET)
393 flags |= WRITE_WATCH_FLAG_RESET;
394
395 ULONG page_size;
396 ULONG_PTR count = *n_pages;
397 int ret_code = GetWriteWatch(flags, addr, size, pages, &count, &page_size);
398 if (ret_code != 0)
399 return -1;
400
401 *n_pages = count;
402 return 0;
403 #endif
404 #endif
405 // Unsupported
406 return -1;
407 }
408
409 /* Reset the write-tracking state for the specified range [ ADDR, ADDR
410 + SIZE [. Returns 0 if successful, -1 for errors. */
411
412 int vm_reset_write_watch(void * addr, size_t size)
413 {
414 #ifdef HAVE_VM_WRITE_WATCH
415 #ifdef HAVE_WIN32_VM
416 int ret_code = ResetWriteWatch(addr, size);
417 return ret_code == 0 ? 0 : -1;
418 #endif
419 #endif
420 // Unsupported
421 return -1;
422 }
423
424 /* Returns the size of a page. */
425
426 int vm_get_page_size(void)
427 {
428 #ifdef HAVE_WIN32_VM
429 static vm_uintptr_t page_size = 0;
430 if (page_size == 0) {
431 SYSTEM_INFO si;
432 GetSystemInfo(&si);
433 page_size = si.dwAllocationGranularity;
434 }
435 return page_size;
436 #else
437 return getpagesize();
438 #endif
439 }
440
441 #ifdef CONFIGURE_TEST_VM_WRITE_WATCH
442 int main(void)
443 {
444 int i, j;
445
446 vm_init();
447
448 vm_uintptr_t page_size = vm_get_page_size();
449
450 char *area;
451 const int n_pages = 7;
452 const int area_size = n_pages * page_size;
453 const int map_options = VM_MAP_DEFAULT | VM_MAP_WRITE_WATCH;
454 if ((area = (char *)vm_acquire(area_size, map_options)) == VM_MAP_FAILED)
455 return 1;
456
457 unsigned int n_modified_pages_expected = 0;
458 static const int touch_page[n_pages] = { 0, 1, 1, 0, 1, 0, 1 };
459 for (i = 0; i < n_pages; i++) {
460 if (touch_page[i]) {
461 area[i * page_size] = 1;
462 ++n_modified_pages_expected;
463 }
464 }
465
466 char *modified_pages[n_pages];
467 unsigned int n_modified_pages = n_pages;
468 if (vm_get_write_watch(area, area_size, (void **)modified_pages, &n_modified_pages) < 0)
469 return 2;
470 if (n_modified_pages != n_modified_pages_expected)
471 return 3;
472 for (i = 0, j = 0; i < n_pages; i++) {
473 char v = area[i * page_size];
474 if ((touch_page[i] && !v) || (!touch_page[i] && v))
475 return 4;
476 if (!touch_page[i])
477 continue;
478 if (modified_pages[j] != (area + i * page_size))
479 return 5;
480 ++j;
481 }
482
483 vm_release(area, area_size);
484 return 0;
485 }
486 #endif
487
488 #ifdef CONFIGURE_TEST_VM_MAP
489 #include <stdlib.h>
490 #include <signal.h>
491
492 static void fault_handler(int sig)
493 {
494 exit(1);
495 }
496
497 /* Tests covered here:
498 - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
499 - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
500 */
501 int main(void)
502 {
503 vm_init();
504
505 signal(SIGSEGV, fault_handler);
506 #ifdef SIGBUS
507 signal(SIGBUS, fault_handler);
508 #endif
509
510 #define page_align(address) ((char *)((vm_uintptr_t)(address) & -page_size))
511 vm_uintptr_t page_size = vm_get_page_size();
512
513 const int area_size = 6 * page_size;
514 volatile char * area = (volatile char *) vm_acquire(area_size);
515 volatile char * fault_address = area + (page_size * 7) / 2;
516
517 #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
518 if (area == VM_MAP_FAILED)
519 return 1;
520
521 if (vm_release((char *)area, area_size) < 0)
522 return 1;
523
524 return 0;
525 #endif
526
527 #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
528 if (area == VM_MAP_FAILED)
529 return 0;
530
531 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
532 return 0;
533 #endif
534
535 #if defined(TEST_VM_PROT_RDWR_WRITE)
536 if (area == VM_MAP_FAILED)
537 return 1;
538
539 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
540 return 1;
541
542 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
543 return 1;
544 #endif
545
546 #if defined(TEST_VM_PROT_READ_WRITE)
547 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
548 return 0;
549 #endif
550
551 #if defined(TEST_VM_PROT_NONE_READ)
552 // this should cause a core dump
553 char foo = *fault_address;
554 return 0;
555 #endif
556
557 #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
558 // this should cause a core dump
559 *fault_address = 'z';
560 return 0;
561 #endif
562
563 #if defined(TEST_VM_PROT_RDWR_WRITE)
564 // this should not cause a core dump
565 *fault_address = 'z';
566 return 0;
567 #endif
568 }
569 #endif