1 |
/* |
2 |
* vm_alloc.cpp - Wrapper to various virtual memory allocation schemes |
3 |
* (supports mmap, vm_allocate or fallbacks to malloc) |
4 |
* |
5 |
* Basilisk II (C) 1997-2008 Christian Bauer |
6 |
* |
7 |
* This program is free software; you can redistribute it and/or modify |
8 |
* it under the terms of the GNU General Public License as published by |
9 |
* the Free Software Foundation; either version 2 of the License, or |
10 |
* (at your option) any later version. |
11 |
* |
12 |
* This program is distributed in the hope that it will be useful, |
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 |
* GNU General Public License for more details. |
16 |
* |
17 |
* You should have received a copy of the GNU General Public License |
18 |
* along with this program; if not, write to the Free Software |
19 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 |
*/ |
21 |
|
22 |
#ifdef HAVE_CONFIG_H |
23 |
#include "config.h" |
24 |
#endif |
25 |
|
26 |
#ifdef HAVE_FCNTL_H |
27 |
#include <fcntl.h> |
28 |
#endif |
29 |
|
30 |
#ifdef HAVE_WIN32_VM |
31 |
#define WIN32_LEAN_AND_MEAN /* avoid including junk */ |
32 |
#include <windows.h> |
33 |
#endif |
34 |
|
35 |
#include <errno.h> |
36 |
#include <stdio.h> |
37 |
#include <stdlib.h> |
38 |
#include <string.h> |
39 |
#include <limits.h> |
40 |
#include "vm_alloc.h" |
41 |
|
42 |
#if defined(__APPLE__) && defined(__MACH__) |
43 |
#include <sys/utsname.h> |
44 |
#endif |
45 |
|
46 |
#ifdef HAVE_MACH_VM |
47 |
#ifndef HAVE_MACH_TASK_SELF |
48 |
#ifdef HAVE_TASK_SELF |
49 |
#define mach_task_self task_self |
50 |
#else |
51 |
#error "No task_self(), you lose." |
52 |
#endif |
53 |
#endif |
54 |
#endif |
55 |
|
56 |
#ifdef HAVE_WIN32_VM |
57 |
/* Windows is either ILP32 or LLP64 */ |
58 |
typedef UINT_PTR vm_uintptr_t; |
59 |
#else |
60 |
/* Other systems are sane as they are either ILP32 or LP64 */ |
61 |
typedef unsigned long vm_uintptr_t; |
62 |
#endif |
63 |
|
64 |
/* We want MAP_32BIT, if available, for SheepShaver and BasiliskII |
65 |
because the emulated target is 32-bit and this helps to allocate |
66 |
memory so that branches could be resolved more easily (32-bit |
67 |
displacement to code in .text), on AMD64 for example. */ |
68 |
#if defined(__hpux) |
69 |
#define MAP_32BIT MAP_ADDR32 |
70 |
#endif |
71 |
#ifndef MAP_32BIT |
72 |
#define MAP_32BIT 0 |
73 |
#endif |
74 |
#ifndef MAP_ANON |
75 |
#define MAP_ANON 0 |
76 |
#endif |
77 |
#ifndef MAP_ANONYMOUS |
78 |
#define MAP_ANONYMOUS 0 |
79 |
#endif |
80 |
|
81 |
#define MAP_EXTRA_FLAGS (MAP_32BIT) |
82 |
|
83 |
#ifdef HAVE_MMAP_VM |
84 |
#if (defined(__linux__) && defined(__i386__)) || HAVE_LINKER_SCRIPT |
85 |
/* Force a reasonnable address below 0x80000000 on x86 so that we |
86 |
don't get addresses above when the program is run on AMD64. |
87 |
NOTE: this is empirically determined on Linux/x86. */ |
88 |
#define MAP_BASE 0x10000000 |
89 |
#else |
90 |
#define MAP_BASE 0x00000000 |
91 |
#endif |
92 |
static char * next_address = (char *)MAP_BASE; |
93 |
#ifdef HAVE_MMAP_ANON |
94 |
#define map_flags (MAP_ANON | MAP_EXTRA_FLAGS) |
95 |
#define zero_fd -1 |
96 |
#else |
97 |
#ifdef HAVE_MMAP_ANONYMOUS |
98 |
#define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS) |
99 |
#define zero_fd -1 |
100 |
#else |
101 |
#define map_flags (MAP_EXTRA_FLAGS) |
102 |
static int zero_fd = -1; |
103 |
#endif |
104 |
#endif |
105 |
#endif |
106 |
|
107 |
/* Translate generic VM map flags to host values. */ |
108 |
|
109 |
#ifdef HAVE_MMAP_VM |
110 |
static int translate_map_flags(int vm_flags) |
111 |
{ |
112 |
int flags = 0; |
113 |
if (vm_flags & VM_MAP_SHARED) |
114 |
flags |= MAP_SHARED; |
115 |
if (vm_flags & VM_MAP_PRIVATE) |
116 |
flags |= MAP_PRIVATE; |
117 |
if (vm_flags & VM_MAP_FIXED) |
118 |
flags |= MAP_FIXED; |
119 |
if (vm_flags & VM_MAP_32BIT) |
120 |
flags |= MAP_32BIT; |
121 |
return flags; |
122 |
} |
123 |
#endif |
124 |
|
125 |
/* Align ADDR and SIZE to 64K boundaries. */ |
126 |
|
127 |
#ifdef HAVE_WIN32_VM |
128 |
static inline LPVOID align_addr_segment(LPVOID addr) |
129 |
{ |
130 |
return (LPVOID)(((vm_uintptr_t)addr) & -((vm_uintptr_t)65536)); |
131 |
} |
132 |
|
133 |
static inline DWORD align_size_segment(LPVOID addr, DWORD size) |
134 |
{ |
135 |
return size + ((vm_uintptr_t)addr - (vm_uintptr_t)align_addr_segment(addr)); |
136 |
} |
137 |
#endif |
138 |
|
139 |
/* Translate generic VM prot flags to host values. */ |
140 |
|
141 |
#ifdef HAVE_WIN32_VM |
142 |
static int translate_prot_flags(int prot_flags) |
143 |
{ |
144 |
int prot = PAGE_READWRITE; |
145 |
if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE)) |
146 |
prot = PAGE_EXECUTE_READWRITE; |
147 |
else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ)) |
148 |
prot = PAGE_EXECUTE_READ; |
149 |
else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE)) |
150 |
prot = PAGE_READWRITE; |
151 |
else if (prot_flags == VM_PAGE_READ) |
152 |
prot = PAGE_READONLY; |
153 |
else if (prot_flags == 0) |
154 |
prot = PAGE_NOACCESS; |
155 |
return prot; |
156 |
} |
157 |
#endif |
158 |
|
159 |
/* Translate Mach return codes to POSIX errno values. */ |
160 |
#ifdef HAVE_MACH_VM |
161 |
static int vm_error(kern_return_t ret_code) |
162 |
{ |
163 |
switch (ret_code) { |
164 |
case KERN_SUCCESS: |
165 |
return 0; |
166 |
case KERN_INVALID_ADDRESS: |
167 |
case KERN_NO_SPACE: |
168 |
return ENOMEM; |
169 |
case KERN_PROTECTION_FAILURE: |
170 |
return EACCES; |
171 |
default: |
172 |
return EINVAL; |
173 |
} |
174 |
} |
175 |
#endif |
176 |
|
177 |
/* Initialize the VM system. Returns 0 if successful, -1 for errors. */ |
178 |
|
179 |
int vm_init(void) |
180 |
{ |
181 |
#ifdef HAVE_MMAP_VM |
182 |
#ifndef zero_fd |
183 |
zero_fd = open("/dev/zero", O_RDWR); |
184 |
if (zero_fd < 0) |
185 |
return -1; |
186 |
#endif |
187 |
#endif |
188 |
|
189 |
// On 10.4 and earlier, reset CrashReporter's task signal handler to |
190 |
// avoid having it show up for signals that get handled. |
191 |
#if defined(__APPLE__) && defined(__MACH__) |
192 |
struct utsname info; |
193 |
|
194 |
if (!uname(&info) && atoi(info.release) <= 8) { |
195 |
task_set_exception_ports(mach_task_self(), |
196 |
EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC, |
197 |
MACH_PORT_NULL, |
198 |
EXCEPTION_STATE_IDENTITY, |
199 |
MACHINE_THREAD_STATE); |
200 |
} |
201 |
#endif |
202 |
|
203 |
return 0; |
204 |
} |
205 |
|
206 |
/* Deallocate all internal data used to wrap virtual memory allocators. */ |
207 |
|
208 |
void vm_exit(void) |
209 |
{ |
210 |
#ifdef HAVE_MMAP_VM |
211 |
#ifndef zero_fd |
212 |
if (zero_fd != -1) { |
213 |
close(zero_fd); |
214 |
zero_fd = -1; |
215 |
} |
216 |
#endif |
217 |
#endif |
218 |
} |
219 |
|
220 |
/* Allocate zero-filled memory of SIZE bytes. The mapping is private |
221 |
and default protection bits are read / write. The return value |
222 |
is the actual mapping address chosen or VM_MAP_FAILED for errors. */ |
223 |
|
224 |
void * vm_acquire(size_t size, int options) |
225 |
{ |
226 |
void * addr; |
227 |
|
228 |
errno = 0; |
229 |
|
230 |
// VM_MAP_FIXED are to be used with vm_acquire_fixed() only |
231 |
if (options & VM_MAP_FIXED) |
232 |
return VM_MAP_FAILED; |
233 |
|
234 |
#ifndef HAVE_VM_WRITE_WATCH |
235 |
if (options & VM_MAP_WRITE_WATCH) |
236 |
return VM_MAP_FAILED; |
237 |
#endif |
238 |
|
239 |
#if defined(HAVE_MACH_VM) |
240 |
// vm_allocate() returns a zero-filled memory region |
241 |
kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE); |
242 |
if (ret_code != KERN_SUCCESS) { |
243 |
errno = vm_error(ret_code); |
244 |
return VM_MAP_FAILED; |
245 |
} |
246 |
#elif defined(HAVE_MMAP_VM) |
247 |
int fd = zero_fd; |
248 |
int the_map_flags = translate_map_flags(options) | map_flags; |
249 |
|
250 |
if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED) |
251 |
return VM_MAP_FAILED; |
252 |
|
253 |
// Sanity checks for 64-bit platforms |
254 |
if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff)) |
255 |
return VM_MAP_FAILED; |
256 |
|
257 |
next_address = (char *)addr + size; |
258 |
#elif defined(HAVE_WIN32_VM) |
259 |
int alloc_type = MEM_RESERVE | MEM_COMMIT; |
260 |
if (options & VM_MAP_WRITE_WATCH) |
261 |
alloc_type |= MEM_WRITE_WATCH; |
262 |
|
263 |
if ((addr = VirtualAlloc(NULL, size, alloc_type, PAGE_EXECUTE_READWRITE)) == NULL) |
264 |
return VM_MAP_FAILED; |
265 |
#else |
266 |
if ((addr = calloc(size, 1)) == 0) |
267 |
return VM_MAP_FAILED; |
268 |
|
269 |
// Omit changes for protections because they are not supported in this mode |
270 |
return addr; |
271 |
#endif |
272 |
|
273 |
// Explicitely protect the newly mapped region here because on some systems, |
274 |
// say MacOS X, mmap() doesn't honour the requested protection flags. |
275 |
if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0) |
276 |
return VM_MAP_FAILED; |
277 |
|
278 |
return addr; |
279 |
} |
280 |
|
281 |
/* Allocate zero-filled memory at exactly ADDR (which must be page-aligned). |
282 |
Retuns 0 if successful, -1 on errors. */ |
283 |
|
284 |
int vm_acquire_fixed(void * addr, size_t size, int options) |
285 |
{ |
286 |
errno = 0; |
287 |
|
288 |
// Fixed mappings are required to be private |
289 |
if (options & VM_MAP_SHARED) |
290 |
return -1; |
291 |
|
292 |
#ifndef HAVE_VM_WRITE_WATCH |
293 |
if (options & VM_MAP_WRITE_WATCH) |
294 |
return -1; |
295 |
#endif |
296 |
|
297 |
#if defined(HAVE_MACH_VM) |
298 |
// vm_allocate() returns a zero-filled memory region |
299 |
kern_return_t ret_code = vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0); |
300 |
if (ret_code != KERN_SUCCESS) { |
301 |
errno = vm_error(ret_code); |
302 |
return -1; |
303 |
} |
304 |
#elif defined(HAVE_MMAP_VM) |
305 |
int fd = zero_fd; |
306 |
int the_map_flags = translate_map_flags(options) | map_flags | MAP_FIXED; |
307 |
|
308 |
if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0) == (void *)MAP_FAILED) |
309 |
return -1; |
310 |
#elif defined(HAVE_WIN32_VM) |
311 |
// Windows cannot allocate Low Memory |
312 |
if (addr == NULL) |
313 |
return -1; |
314 |
|
315 |
int alloc_type = MEM_RESERVE | MEM_COMMIT; |
316 |
if (options & VM_MAP_WRITE_WATCH) |
317 |
alloc_type |= MEM_WRITE_WATCH; |
318 |
|
319 |
// Allocate a possibly offset region to align on 64K boundaries |
320 |
LPVOID req_addr = align_addr_segment(addr); |
321 |
DWORD req_size = align_size_segment(addr, size); |
322 |
LPVOID ret_addr = VirtualAlloc(req_addr, req_size, alloc_type, PAGE_EXECUTE_READWRITE); |
323 |
if (ret_addr != req_addr) |
324 |
return -1; |
325 |
#else |
326 |
// Unsupported |
327 |
return -1; |
328 |
#endif |
329 |
|
330 |
// Explicitely protect the newly mapped region here because on some systems, |
331 |
// say MacOS X, mmap() doesn't honour the requested protection flags. |
332 |
if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0) |
333 |
return -1; |
334 |
|
335 |
return 0; |
336 |
} |
337 |
|
338 |
/* Deallocate any mapping for the region starting at ADDR and extending |
339 |
LEN bytes. Returns 0 if successful, -1 on errors. */ |
340 |
|
341 |
int vm_release(void * addr, size_t size) |
342 |
{ |
343 |
// Safety check: don't try to release memory that was not allocated |
344 |
if (addr == VM_MAP_FAILED) |
345 |
return 0; |
346 |
|
347 |
#ifdef HAVE_MACH_VM |
348 |
if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS) |
349 |
return -1; |
350 |
#else |
351 |
#ifdef HAVE_MMAP_VM |
352 |
if (munmap((caddr_t)addr, size) != 0) |
353 |
return -1; |
354 |
#else |
355 |
#ifdef HAVE_WIN32_VM |
356 |
if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0) |
357 |
return -1; |
358 |
#else |
359 |
free(addr); |
360 |
#endif |
361 |
#endif |
362 |
#endif |
363 |
|
364 |
return 0; |
365 |
} |
366 |
|
367 |
/* Change the memory protection of the region starting at ADDR and |
368 |
extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */ |
369 |
|
370 |
int vm_protect(void * addr, size_t size, int prot) |
371 |
{ |
372 |
#ifdef HAVE_MACH_VM |
373 |
int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot); |
374 |
return ret_code == KERN_SUCCESS ? 0 : -1; |
375 |
#else |
376 |
#ifdef HAVE_MMAP_VM |
377 |
int ret_code = mprotect((caddr_t)addr, size, prot); |
378 |
return ret_code == 0 ? 0 : -1; |
379 |
#else |
380 |
#ifdef HAVE_WIN32_VM |
381 |
DWORD old_prot; |
382 |
int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot); |
383 |
return ret_code != 0 ? 0 : -1; |
384 |
#else |
385 |
// Unsupported |
386 |
return -1; |
387 |
#endif |
388 |
#endif |
389 |
#endif |
390 |
} |
391 |
|
392 |
/* Return the addresses of the pages that got modified in the |
393 |
specified range [ ADDR, ADDR + SIZE [ since the last reset of the watch |
394 |
bits. Returns 0 if successful, -1 for errors. */ |
395 |
|
396 |
int vm_get_write_watch(void * addr, size_t size, |
397 |
void ** pages, unsigned int * n_pages, |
398 |
int options) |
399 |
{ |
400 |
#ifdef HAVE_VM_WRITE_WATCH |
401 |
#ifdef HAVE_WIN32_VM |
402 |
DWORD flags = 0; |
403 |
if (options & VM_WRITE_WATCH_RESET) |
404 |
flags |= WRITE_WATCH_FLAG_RESET; |
405 |
|
406 |
ULONG page_size; |
407 |
ULONG_PTR count = *n_pages; |
408 |
int ret_code = GetWriteWatch(flags, addr, size, pages, &count, &page_size); |
409 |
if (ret_code != 0) |
410 |
return -1; |
411 |
|
412 |
*n_pages = count; |
413 |
return 0; |
414 |
#endif |
415 |
#endif |
416 |
// Unsupported |
417 |
return -1; |
418 |
} |
419 |
|
420 |
/* Reset the write-tracking state for the specified range [ ADDR, ADDR |
421 |
+ SIZE [. Returns 0 if successful, -1 for errors. */ |
422 |
|
423 |
int vm_reset_write_watch(void * addr, size_t size) |
424 |
{ |
425 |
#ifdef HAVE_VM_WRITE_WATCH |
426 |
#ifdef HAVE_WIN32_VM |
427 |
int ret_code = ResetWriteWatch(addr, size); |
428 |
return ret_code == 0 ? 0 : -1; |
429 |
#endif |
430 |
#endif |
431 |
// Unsupported |
432 |
return -1; |
433 |
} |
434 |
|
435 |
/* Returns the size of a page. */ |
436 |
|
437 |
int vm_get_page_size(void) |
438 |
{ |
439 |
#ifdef HAVE_WIN32_VM |
440 |
static vm_uintptr_t page_size = 0; |
441 |
if (page_size == 0) { |
442 |
SYSTEM_INFO si; |
443 |
GetSystemInfo(&si); |
444 |
page_size = si.dwAllocationGranularity; |
445 |
} |
446 |
return page_size; |
447 |
#else |
448 |
return getpagesize(); |
449 |
#endif |
450 |
} |
451 |
|
452 |
#ifdef CONFIGURE_TEST_VM_WRITE_WATCH |
453 |
int main(void) |
454 |
{ |
455 |
int i, j; |
456 |
|
457 |
vm_init(); |
458 |
|
459 |
vm_uintptr_t page_size = vm_get_page_size(); |
460 |
|
461 |
char *area; |
462 |
const int n_pages = 7; |
463 |
const int area_size = n_pages * page_size; |
464 |
const int map_options = VM_MAP_DEFAULT | VM_MAP_WRITE_WATCH; |
465 |
if ((area = (char *)vm_acquire(area_size, map_options)) == VM_MAP_FAILED) |
466 |
return 1; |
467 |
|
468 |
unsigned int n_modified_pages_expected = 0; |
469 |
static const int touch_page[n_pages] = { 0, 1, 1, 0, 1, 0, 1 }; |
470 |
for (i = 0; i < n_pages; i++) { |
471 |
if (touch_page[i]) { |
472 |
area[i * page_size] = 1; |
473 |
++n_modified_pages_expected; |
474 |
} |
475 |
} |
476 |
|
477 |
char *modified_pages[n_pages]; |
478 |
unsigned int n_modified_pages = n_pages; |
479 |
if (vm_get_write_watch(area, area_size, (void **)modified_pages, &n_modified_pages) < 0) |
480 |
return 2; |
481 |
if (n_modified_pages != n_modified_pages_expected) |
482 |
return 3; |
483 |
for (i = 0, j = 0; i < n_pages; i++) { |
484 |
char v = area[i * page_size]; |
485 |
if ((touch_page[i] && !v) || (!touch_page[i] && v)) |
486 |
return 4; |
487 |
if (!touch_page[i]) |
488 |
continue; |
489 |
if (modified_pages[j] != (area + i * page_size)) |
490 |
return 5; |
491 |
++j; |
492 |
} |
493 |
|
494 |
vm_release(area, area_size); |
495 |
return 0; |
496 |
} |
497 |
#endif |
498 |
|
499 |
#ifdef CONFIGURE_TEST_VM_MAP |
500 |
#include <stdlib.h> |
501 |
#include <signal.h> |
502 |
|
503 |
static void fault_handler(int sig) |
504 |
{ |
505 |
exit(1); |
506 |
} |
507 |
|
508 |
/* Tests covered here: |
509 |
- TEST_VM_PROT_* program slices actually succeeds when a crash occurs |
510 |
- TEST_VM_MAP_ANON* program slices succeeds when it could be compiled |
511 |
*/ |
512 |
int main(void) |
513 |
{ |
514 |
vm_init(); |
515 |
|
516 |
signal(SIGSEGV, fault_handler); |
517 |
#ifdef SIGBUS |
518 |
signal(SIGBUS, fault_handler); |
519 |
#endif |
520 |
|
521 |
#define page_align(address) ((char *)((vm_uintptr_t)(address) & -page_size)) |
522 |
vm_uintptr_t page_size = vm_get_page_size(); |
523 |
|
524 |
const int area_size = 6 * page_size; |
525 |
volatile char * area = (volatile char *) vm_acquire(area_size); |
526 |
volatile char * fault_address = area + (page_size * 7) / 2; |
527 |
|
528 |
#if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS) |
529 |
if (area == VM_MAP_FAILED) |
530 |
return 1; |
531 |
|
532 |
if (vm_release((char *)area, area_size) < 0) |
533 |
return 1; |
534 |
|
535 |
return 0; |
536 |
#endif |
537 |
|
538 |
#if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE) |
539 |
if (area == VM_MAP_FAILED) |
540 |
return 0; |
541 |
|
542 |
if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0) |
543 |
return 0; |
544 |
#endif |
545 |
|
546 |
#if defined(TEST_VM_PROT_RDWR_WRITE) |
547 |
if (area == VM_MAP_FAILED) |
548 |
return 1; |
549 |
|
550 |
if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0) |
551 |
return 1; |
552 |
|
553 |
if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0) |
554 |
return 1; |
555 |
#endif |
556 |
|
557 |
#if defined(TEST_VM_PROT_READ_WRITE) |
558 |
if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0) |
559 |
return 0; |
560 |
#endif |
561 |
|
562 |
#if defined(TEST_VM_PROT_NONE_READ) |
563 |
// this should cause a core dump |
564 |
char foo = *fault_address; |
565 |
return 0; |
566 |
#endif |
567 |
|
568 |
#if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE) |
569 |
// this should cause a core dump |
570 |
*fault_address = 'z'; |
571 |
return 0; |
572 |
#endif |
573 |
|
574 |
#if defined(TEST_VM_PROT_RDWR_WRITE) |
575 |
// this should not cause a core dump |
576 |
*fault_address = 'z'; |
577 |
return 0; |
578 |
#endif |
579 |
} |
580 |
#endif |