ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.7
Committed: 2002-03-16T10:51:17Z (22 years, 8 months ago) by gbeauche
Branch: MAIN
CVS Tags: nigel-build-12, nigel-build-13
Changes since 1.6: +4 -4 lines
Log Message:
- Check for caddr_t. On some systems like Solaris/SPARC, mmap() address
  type (first parameter) is caddr_t instead of void *. Explicitly cast
  address to (caddr_t) type and C++ implicit pointer conversion rules
  will do the rest. aka. caddr_t -> void * is OK unlike the opposite.

File Contents

# User Rev Content
1 gbeauche 1.1 /*
2     * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3     * (supports mmap, vm_allocate or fallbacks to malloc)
4     *
5 cebix 1.5 * Basilisk II (C) 1997-2002 Christian Bauer
6 gbeauche 1.1 *
7     * This program is free software; you can redistribute it and/or modify
8     * it under the terms of the GNU General Public License as published by
9     * the Free Software Foundation; either version 2 of the License, or
10     * (at your option) any later version.
11     *
12     * This program is distributed in the hope that it will be useful,
13     * but WITHOUT ANY WARRANTY; without even the implied warranty of
14     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15     * GNU General Public License for more details.
16     *
17     * You should have received a copy of the GNU General Public License
18     * along with this program; if not, write to the Free Software
19     * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20     */
21    
22     #ifdef HAVE_CONFIG_H
23     #include "config.h"
24     #endif
25    
26     // TODO: Win32 VMs ?
27     #include <stdlib.h>
28     #include <string.h>
29     #include "vm_alloc.h"
30    
31     #ifdef HAVE_MACH_VM
32     #ifndef HAVE_MACH_TASK_SELF
33     #ifdef HAVE_TASK_SELF
34     #define mach_task_self task_self
35     #else
36     #error "No task_self(), you lose."
37     #endif
38     #endif
39     #endif
40    
41     #ifdef HAVE_MMAP_VM
42 gbeauche 1.3 static char * next_address = 0;
43 gbeauche 1.1 #ifdef HAVE_MMAP_ANON
44     #define map_flags (MAP_PRIVATE | MAP_ANON)
45     #define zero_fd -1
46     #else
47     #ifdef HAVE_MMAP_ANONYMOUS
48     #define map_flags (MAP_PRIVATE | MAP_ANONYMOUS)
49     #define zero_fd -1
50     #else
51     #ifdef HAVE_FCNTL_H
52     #include <fcntl.h>
53     #endif
54     #define map_flags (MAP_PRIVATE)
55     static int zero_fd = -1;
56     #endif
57     #endif
58     #endif
59    
60     /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
61    
62     int vm_init(void)
63     {
64     #ifdef HAVE_MMAP_VM
65     #ifndef zero_fd
66     zero_fd = open("/dev/zero", O_RDWR);
67     if (zero_fd < 0)
68     return -1;
69     #endif
70     #endif
71     return 0;
72     }
73    
74     /* Deallocate all internal data used to wrap virtual memory allocators. */
75    
76     void vm_exit(void)
77     {
78     #ifdef HAVE_MMAP_VM
79     #ifndef zero_fd
80     close(zero_fd);
81     #endif
82     #endif
83     }
84    
85     /* Allocate zero-filled memory of SIZE bytes. The mapping is private
86     and default protection bits are read / write. The return value
87     is the actual mapping address chosen or VM_MAP_FAILED for errors. */
88    
89     void * vm_acquire(size_t size)
90     {
91     void * addr;
92    
93     #ifdef HAVE_MACH_VM
94     // vm_allocate() returns a zero-filled memory region
95     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
96     return VM_MAP_FAILED;
97     #else
98     #ifdef HAVE_MMAP_VM
99 gbeauche 1.7 if ((addr = mmap((caddr_t)next_address, size, VM_PAGE_DEFAULT, map_flags, zero_fd, 0)) == MAP_FAILED)
100 gbeauche 1.1 return VM_MAP_FAILED;
101    
102 gbeauche 1.3 next_address = (char *)addr + size;
103    
104 gbeauche 1.1 // Since I don't know the standard behavior of mmap(), zero-fill here
105     if (memset(addr, 0, size) != addr)
106     return VM_MAP_FAILED;
107     #else
108     if ((addr = calloc(size, 1)) == 0)
109     return VM_MAP_FAILED;
110    
111     // Omit changes for protections because they are not supported in this mode
112     return addr;
113     #endif
114     #endif
115 cebix 1.2
116 gbeauche 1.1 // Explicitely protect the newly mapped region here because on some systems,
117     // say MacOS X, mmap() doesn't honour the requested protection flags.
118     if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
119     return VM_MAP_FAILED;
120    
121     return addr;
122     }
123    
124     /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
125     Retuns 0 if successful, -1 on errors. */
126    
127     int vm_acquire_fixed(void * addr, size_t size)
128     {
129     #ifdef HAVE_MACH_VM
130     // vm_allocate() returns a zero-filled memory region
131     if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
132     return -1;
133     #else
134     #ifdef HAVE_MMAP_VM
135 gbeauche 1.7 if (mmap((caddr_t)addr, size, VM_PAGE_DEFAULT, map_flags | MAP_FIXED, zero_fd, 0) == MAP_FAILED)
136 gbeauche 1.1 return -1;
137    
138     // Since I don't know the standard behavior of mmap(), zero-fill here
139 gbeauche 1.6 if (memset(addr, 0, size) != 0)
140 gbeauche 1.1 return -1;
141     #else
142     // Unsupported
143     return -1;
144     #endif
145     #endif
146    
147     // Explicitely protect the newly mapped region here because on some systems,
148     // say MacOS X, mmap() doesn't honour the requested protection flags.
149 gbeauche 1.6 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
150 gbeauche 1.1 return -1;
151    
152     return 0;
153     }
154    
155     /* Deallocate any mapping for the region starting at ADDR and extending
156     LEN bytes. Returns 0 if successful, -1 on errors. */
157    
158     int vm_release(void * addr, size_t size)
159     {
160 gbeauche 1.3 // Safety check: don't try to release memory that was not allocated
161     if (addr == VM_MAP_FAILED)
162     return 0;
163    
164 gbeauche 1.1 #ifdef HAVE_MACH_VM
165 gbeauche 1.4 if (vm_deallocate(mach_task_self(), (vm_address_t)addr, size) != KERN_SUCCESS)
166     return -1;
167 gbeauche 1.1 #else
168     #ifdef HAVE_MMAP_VM
169 gbeauche 1.7 if (munmap((caddr_t)addr, size) != 0)
170 gbeauche 1.4 return -1;
171 gbeauche 1.1 #else
172     free(addr);
173     #endif
174     #endif
175 gbeauche 1.4
176     return 0;
177 gbeauche 1.1 }
178    
179     /* Change the memory protection of the region starting at ADDR and
180     extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
181    
182     int vm_protect(void * addr, size_t size, int prot)
183     {
184     #ifdef HAVE_MACH_VM
185     int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
186     return ret_code == KERN_SUCCESS ? 0 : -1;
187     #else
188     #ifdef HAVE_MMAP_VM
189 gbeauche 1.7 int ret_code = mprotect((caddr_t)addr, size, prot);
190 gbeauche 1.1 return ret_code == 0 ? 0 : -1;
191     #else
192     // Unsupported
193     return -1;
194     #endif
195     #endif
196     }
197    
198     #ifdef CONFIGURE_TEST_VM_MAP
199     /* Tests covered here:
200     - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
201     - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
202     */
203     int main(void)
204     {
205     vm_init();
206    
207     #define page_align(address) ((char *)((unsigned long)(address) & -page_size))
208     unsigned long page_size = getpagesize();
209    
210     const int area_size = 6 * page_size;
211     volatile char * area = (volatile char *) vm_acquire(area_size);
212     volatile char * fault_address = area + (page_size * 7) / 2;
213    
214     #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
215     if (area == VM_MAP_FAILED)
216     return 1;
217    
218     if (vm_release((char *)area, area_size) < 0)
219     return 1;
220    
221     return 0;
222     #endif
223    
224     #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
225     if (area == VM_MAP_FAILED)
226     return 0;
227    
228     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
229     return 0;
230     #endif
231    
232     #if defined(TEST_VM_PROT_RDWR_WRITE)
233     if (area == VM_MAP_FAILED)
234     return 1;
235    
236     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
237     return 1;
238    
239     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
240     return 1;
241     #endif
242    
243     #if defined(TEST_VM_PROT_READ_WRITE)
244     if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
245     return 0;
246     #endif
247    
248     #if defined(TEST_VM_PROT_NONE_READ)
249     // this should cause a core dump
250     char foo = *fault_address;
251     return 0;
252     #endif
253    
254     #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
255     // this should cause a core dump
256     *fault_address = 'z';
257     return 0;
258     #endif
259    
260     #if defined(TEST_VM_PROT_RDWR_WRITE)
261     // this should not cause a core dump
262     *fault_address = 'z';
263     return 0;
264     #endif
265     }
266     #endif