ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.17
Committed: 2005-01-30T21:42:14Z (19 years, 9 months ago) by gbeauche
Branch: MAIN
Changes since 1.16: +1 -1 lines
Log Message:
Happy New Year!

File Contents

# Content
1 /*
2 * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3 * (supports mmap, vm_allocate or fallbacks to malloc)
4 *
5 * Basilisk II (C) 1997-2005 Christian Bauer
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #ifdef HAVE_FCNTL_H
27 #include <fcntl.h>
28 #endif
29
30 #ifdef HAVE_WIN32_VM
31 #define WIN32_LEAN_AND_MEAN /* avoid including junk */
32 #include <windows.h>
33 #endif
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <limits.h>
39 #include "vm_alloc.h"
40
41 #ifdef HAVE_MACH_VM
42 #ifndef HAVE_MACH_TASK_SELF
43 #ifdef HAVE_TASK_SELF
44 #define mach_task_self task_self
45 #else
46 #error "No task_self(), you lose."
47 #endif
48 #endif
49 #endif
50
51 /* We want MAP_32BIT, if available, for SheepShaver and BasiliskII
52 because the emulated target is 32-bit and this helps to allocate
53 memory so that branches could be resolved more easily (32-bit
54 displacement to code in .text), on AMD64 for example. */
55 #ifndef MAP_32BIT
56 #define MAP_32BIT 0
57 #endif
58 #ifndef MAP_ANON
59 #define MAP_ANON 0
60 #endif
61 #ifndef MAP_ANONYMOUS
62 #define MAP_ANONYMOUS 0
63 #endif
64
65 #define MAP_EXTRA_FLAGS (MAP_32BIT)
66
67 #ifdef HAVE_MMAP_VM
68 #if defined(__linux__) && defined(__i386__)
69 /* Force a reasonnable address below 0x80000000 on x86 so that we
70 don't get addresses above when the program is run on AMD64.
71 NOTE: this is empirically determined on Linux/x86. */
72 #define MAP_BASE 0x10000000
73 #else
74 #define MAP_BASE 0x00000000
75 #endif
76 static char * next_address = (char *)MAP_BASE;
77 #ifdef HAVE_MMAP_ANON
78 #define map_flags (MAP_ANON | MAP_EXTRA_FLAGS)
79 #define zero_fd -1
80 #else
81 #ifdef HAVE_MMAP_ANONYMOUS
82 #define map_flags (MAP_ANONYMOUS | MAP_EXTRA_FLAGS)
83 #define zero_fd -1
84 #else
85 #define map_flags (MAP_EXTRA_FLAGS)
86 static int zero_fd = -1;
87 #endif
88 #endif
89 #endif
90
91 /* Utility functions for POSIX SHM handling. */
92
93 #ifdef USE_33BIT_ADDRESSING
94 struct shm_range_t {
95 const char *file;
96 void *base;
97 unsigned int size;
98 shm_range_t *next;
99 };
100
101 static shm_range_t *shm_ranges = NULL;
102
103 static bool add_shm_range(const char *file, void *base, unsigned int size)
104 {
105 shm_range_t *r = (shm_range_t *)malloc(sizeof(shm_range_t));
106 if (r) {
107 r->file = file;
108 r->base = base;
109 r->size = size;
110 r->next = shm_ranges ? shm_ranges : NULL;
111 shm_ranges = r;
112 return true;
113 }
114 return false;
115 }
116
117 static shm_range_t *find_shm_range(void *base, unsigned int size)
118 {
119 for (shm_range_t *r = shm_ranges; r != NULL; r = r->next)
120 if (r->base == base && r->size == size)
121 return r;
122 return NULL;
123 }
124
125 static bool remove_shm_range(shm_range_t *r)
126 {
127 if (r) {
128 for (shm_range_t *p = shm_ranges; p != NULL; p = p->next) {
129 if (p->next == r) {
130 p->next = r->next;
131 free(r);
132 return true;
133 }
134 }
135 }
136 return false;
137 }
138
139 static bool remove_shm_range(void *base, unsigned int size)
140 {
141 remove_shm_range(find_shm_range(base, size));
142 }
143 #endif
144
145 /* Build a POSIX SHM memory segment file descriptor name. */
146
147 #ifdef USE_33BIT_ADDRESSING
148 static const char *build_shm_filename(void)
149 {
150 static int id = 0;
151 static char filename[PATH_MAX];
152
153 int ret = snprintf(filename, sizeof(filename), "/BasiliskII-%d-shm-%d", getpid(), id);
154 if (ret == -1 || ret >= sizeof(filename))
155 return NULL;
156
157 id++;
158 return filename;
159 }
160 #endif
161
162 /* Translate generic VM map flags to host values. */
163
164 #ifdef HAVE_MMAP_VM
165 static int translate_map_flags(int vm_flags)
166 {
167 int flags = 0;
168 if (vm_flags & VM_MAP_SHARED)
169 flags |= MAP_SHARED;
170 if (vm_flags & VM_MAP_PRIVATE)
171 flags |= MAP_PRIVATE;
172 if (vm_flags & VM_MAP_FIXED)
173 flags |= MAP_FIXED;
174 if (vm_flags & VM_MAP_32BIT)
175 flags |= MAP_32BIT;
176 return flags;
177 }
178 #endif
179
180 /* Align ADDR and SIZE to 64K boundaries. */
181
182 #ifdef HAVE_WIN32_VM
183 static inline LPVOID align_addr_segment(LPVOID addr)
184 {
185 return (LPVOID)(((DWORD)addr) & -65536);
186 }
187
188 static inline DWORD align_size_segment(LPVOID addr, DWORD size)
189 {
190 return size + ((DWORD)addr - (DWORD)align_addr_segment(addr));
191 }
192 #endif
193
194 /* Translate generic VM prot flags to host values. */
195
196 #ifdef HAVE_WIN32_VM
197 static int translate_prot_flags(int prot_flags)
198 {
199 int prot = PAGE_READWRITE;
200 if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ | VM_PAGE_WRITE))
201 prot = PAGE_EXECUTE_READWRITE;
202 else if (prot_flags == (VM_PAGE_EXECUTE | VM_PAGE_READ))
203 prot = PAGE_EXECUTE_READ;
204 else if (prot_flags == (VM_PAGE_READ | VM_PAGE_WRITE))
205 prot = PAGE_READWRITE;
206 else if (prot_flags == VM_PAGE_READ)
207 prot = PAGE_READONLY;
208 else if (prot_flags == 0)
209 prot = PAGE_NOACCESS;
210 return prot;
211 }
212 #endif
213
214 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
215
216 int vm_init(void)
217 {
218 #ifdef HAVE_MMAP_VM
219 #ifndef zero_fd
220 zero_fd = open("/dev/zero", O_RDWR);
221 if (zero_fd < 0)
222 return -1;
223 #endif
224 #endif
225 return 0;
226 }
227
228 /* Deallocate all internal data used to wrap virtual memory allocators. */
229
230 void vm_exit(void)
231 {
232 #ifdef HAVE_MMAP_VM
233 #ifndef zero_fd
234 close(zero_fd);
235 #endif
236 #endif
237 }
238
239 /* Allocate zero-filled memory of SIZE bytes. The mapping is private
240 and default protection bits are read / write. The return value
241 is the actual mapping address chosen or VM_MAP_FAILED for errors. */
242
243 void * vm_acquire(size_t size, int options)
244 {
245 void * addr;
246
247 // VM_MAP_FIXED are to be used with vm_acquire_fixed() only
248 if (options & VM_MAP_FIXED)
249 return VM_MAP_FAILED;
250
251 #ifdef HAVE_MACH_VM
252 // vm_allocate() returns a zero-filled memory region
253 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
254 return VM_MAP_FAILED;
255 #else
256 #ifdef HAVE_MMAP_VM
257 int fd = zero_fd;
258 int the_map_flags = translate_map_flags(options) | map_flags;
259
260 #ifdef USE_33BIT_ADDRESSING
261 const char *shm_file = NULL;
262 if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
263 the_map_flags &= ~(MAP_PRIVATE | MAP_ANON | MAP_ANONYMOUS);
264 the_map_flags |= MAP_SHARED;
265
266 if ((shm_file = build_shm_filename()) == NULL)
267 return VM_MAP_FAILED;
268
269 if ((fd = shm_open(shm_file, O_RDWR | O_CREAT | O_EXCL, 0644)) < 0)
270 return VM_MAP_FAILED;
271
272 if (ftruncate(fd, size) < 0)
273 return VM_MAP_FAILED;
274
275 the_map_flags |= MAP_SHARED;
276 }
277 #endif
278
279 if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, the_map_flags, fd, 0)) == (void *)MAP_FAILED)
280 return VM_MAP_FAILED;
281
282 // Sanity checks for 64-bit platforms
283 if (sizeof(void *) == 8 && (options & VM_MAP_32BIT) && !((char *)addr <= (char *)0xffffffff))
284 return VM_MAP_FAILED;
285
286 next_address = (char *)addr + size;
287
288 // Since I don't know the standard behavior of mmap(), zero-fill here
289 if (memset(addr, 0, size) != addr)
290 return VM_MAP_FAILED;
291
292 // Remap to 33-bit space
293 #ifdef USE_33BIT_ADDRESSING
294 if (sizeof(void *) == 8 && (options & VM_MAP_33BIT)) {
295 if (!add_shm_range(strdup(shm_file), addr, size))
296 return VM_MAP_FAILED;
297
298 if (mmap((char *)addr + (1L << 32), size, VM_PAGE_DEFAULT, the_map_flags | MAP_FIXED, fd, 0) == (void *)MAP_FAILED)
299 return VM_MAP_FAILED;
300 close(fd);
301 }
302 #endif
303 #else
304 #ifdef HAVE_WIN32_VM
305 if ((addr = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE)) == NULL)
306 return VM_MAP_FAILED;
307
308 // Zero newly allocated memory
309 if (memset(addr, 0, size) != addr)
310 return VM_MAP_FAILED;
311 #else
312 if ((addr = calloc(size, 1)) == 0)
313 return VM_MAP_FAILED;
314
315 // Omit changes for protections because they are not supported in this mode
316 return addr;
317 #endif
318 #endif
319 #endif
320
321 // Explicitely protect the newly mapped region here because on some systems,
322 // say MacOS X, mmap() doesn't honour the requested protection flags.
323 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
324 return VM_MAP_FAILED;
325
326 return addr;
327 }
328
329 /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
330 Retuns 0 if successful, -1 on errors. */
331
332 int vm_acquire_fixed(void * addr, size_t size, int options)
333 {
334 // Fixed mappings are required to be private
335 if (options & VM_MAP_SHARED)
336 return -1;
337
338 #ifdef HAVE_MACH_VM
339 // vm_allocate() returns a zero-filled memory region
340 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
341 return -1;
342 #else
343 #ifdef HAVE_MMAP_VM
344 const int extra_map_flags = translate_map_flags(options);
345
346 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, extra_map_flags | map_flags | MAP_FIXED, zero_fd, 0) == (void *)MAP_FAILED)
347 return -1;
348
349 // Since I don't know the standard behavior of mmap(), zero-fill here
350 if (memset(addr, 0, size) != addr)
351 return -1;
352 #else
353 #ifdef HAVE_WIN32_VM
354 // Windows cannot allocate Low Memory
355 if (addr == NULL)
356 return -1;
357
358 // Allocate a possibly offset region to align on 64K boundaries
359 LPVOID req_addr = align_addr_segment(addr);
360 DWORD req_size = align_size_segment(addr, size);
361 LPVOID ret_addr = VirtualAlloc(req_addr, req_size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
362 if (ret_addr != req_addr)
363 return -1;
364
365 // Zero newly allocated memory
366 if (memset(addr, 0, size) != addr)
367 return -1;
368 #else
369 // Unsupported
370 return -1;
371 #endif
372 #endif
373 #endif
374
375 // Explicitely protect the newly mapped region here because on some systems,
376 // say MacOS X, mmap() doesn't honour the requested protection flags.
377 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
378 return -1;
379
380 return 0;
381 }
382
383 /* Deallocate any mapping for the region starting at ADDR and extending
384 LEN bytes. Returns 0 if successful, -1 on errors. */
385
386 int vm_release(void * addr, size_t size)
387 {
388 // Safety check: don't try to release memory that was not allocated
389 if (addr == VM_MAP_FAILED)
390 return 0;
391
392 #ifdef HAVE_MACH_VM
393 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
394 return -1;
395 #else
396 #ifdef HAVE_MMAP_VM
397 if (munmap((caddr_t)addr, size) != 0)
398 return -1;
399
400 #ifdef USE_33BIT_ADDRESSING
401 shm_range_t *r = find_shm_range(addr, size);
402 if (r) {
403 if (munmap((char *)r->base + (1L << 32), size) != 0)
404 return -1;
405
406 if (shm_unlink(r->file) < 0)
407 return -1;
408 free((char *)r->file);
409
410 if (!remove_shm_range(r))
411 return -1;
412 }
413 #endif
414 #else
415 #ifdef HAVE_WIN32_VM
416 if (VirtualFree(align_addr_segment(addr), 0, MEM_RELEASE) == 0)
417 return -1;
418 #else
419 free(addr);
420 #endif
421 #endif
422 #endif
423
424 return 0;
425 }
426
427 /* Change the memory protection of the region starting at ADDR and
428 extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
429
430 int vm_protect(void * addr, size_t size, int prot)
431 {
432 #ifdef HAVE_MACH_VM
433 int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
434 return ret_code == KERN_SUCCESS ? 0 : -1;
435 #else
436 #ifdef HAVE_MMAP_VM
437 int ret_code = mprotect((caddr_t)addr, size, prot);
438 return ret_code == 0 ? 0 : -1;
439 #else
440 #ifdef HAVE_WIN32_VM
441 DWORD old_prot;
442 int ret_code = VirtualProtect(addr, size, translate_prot_flags(prot), &old_prot);
443 return ret_code != 0 ? 0 : -1;
444 #else
445 // Unsupported
446 return -1;
447 #endif
448 #endif
449 #endif
450 }
451
452 /* Returns the size of a page. */
453
454 int vm_get_page_size(void)
455 {
456 #ifdef _WIN32
457 return 4096;
458 #else
459 return getpagesize();
460 #endif
461 }
462
463 #ifdef CONFIGURE_TEST_VM_MAP
464 /* Tests covered here:
465 - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
466 - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
467 */
468 int main(void)
469 {
470 vm_init();
471
472 #define page_align(address) ((char *)((unsigned long)(address) & -page_size))
473 unsigned long page_size = vm_get_page_size();
474
475 const int area_size = 6 * page_size;
476 volatile char * area = (volatile char *) vm_acquire(area_size);
477 volatile char * fault_address = area + (page_size * 7) / 2;
478
479 #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
480 if (area == VM_MAP_FAILED)
481 return 1;
482
483 if (vm_release((char *)area, area_size) < 0)
484 return 1;
485
486 return 0;
487 #endif
488
489 #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
490 if (area == VM_MAP_FAILED)
491 return 0;
492
493 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
494 return 0;
495 #endif
496
497 #if defined(TEST_VM_PROT_RDWR_WRITE)
498 if (area == VM_MAP_FAILED)
499 return 1;
500
501 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
502 return 1;
503
504 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
505 return 1;
506 #endif
507
508 #if defined(TEST_VM_PROT_READ_WRITE)
509 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
510 return 0;
511 #endif
512
513 #if defined(TEST_VM_PROT_NONE_READ)
514 // this should cause a core dump
515 char foo = *fault_address;
516 return 0;
517 #endif
518
519 #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
520 // this should cause a core dump
521 *fault_address = 'z';
522 return 0;
523 #endif
524
525 #if defined(TEST_VM_PROT_RDWR_WRITE)
526 // this should not cause a core dump
527 *fault_address = 'z';
528 return 0;
529 #endif
530 }
531 #endif