ViewVC Help
View File | Revision Log | Show Annotations | Revision Graph | Root Listing
root/cebix/BasiliskII/src/Unix/vm_alloc.cpp
Revision: 1.2
Committed: 2001-07-06T22:37:23Z (23 years, 4 months ago) by cebix
Branch: MAIN
Changes since 1.1: +1 -1 lines
Log Message:
fixed memory leaks in video mode switching

File Contents

# Content
1 /*
2 * vm_alloc.cpp - Wrapper to various virtual memory allocation schemes
3 * (supports mmap, vm_allocate or fallbacks to malloc)
4 *
5 * Basilisk II (C) 1997-2001 Christian Bauer
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 // TODO: Win32 VMs ?
27 #include <stdlib.h>
28 #include <string.h>
29 #include "vm_alloc.h"
30
31 #ifdef HAVE_MACH_VM
32 #ifndef HAVE_MACH_TASK_SELF
33 #ifdef HAVE_TASK_SELF
34 #define mach_task_self task_self
35 #else
36 #error "No task_self(), you lose."
37 #endif
38 #endif
39 #endif
40
41 #ifdef HAVE_MMAP_VM
42 #ifdef HAVE_MMAP_ANON
43 #define map_flags (MAP_PRIVATE | MAP_ANON)
44 #define zero_fd -1
45 #else
46 #ifdef HAVE_MMAP_ANONYMOUS
47 #define map_flags (MAP_PRIVATE | MAP_ANONYMOUS)
48 #define zero_fd -1
49 #else
50 #ifdef HAVE_FCNTL_H
51 #include <fcntl.h>
52 #endif
53 #define map_flags (MAP_PRIVATE)
54 static int zero_fd = -1;
55 #endif
56 #endif
57 #endif
58
59 /* Initialize the VM system. Returns 0 if successful, -1 for errors. */
60
61 int vm_init(void)
62 {
63 #ifdef HAVE_MMAP_VM
64 #ifndef zero_fd
65 zero_fd = open("/dev/zero", O_RDWR);
66 if (zero_fd < 0)
67 return -1;
68 #endif
69 #endif
70 return 0;
71 }
72
73 /* Deallocate all internal data used to wrap virtual memory allocators. */
74
75 void vm_exit(void)
76 {
77 #ifdef HAVE_MMAP_VM
78 #ifndef zero_fd
79 close(zero_fd);
80 #endif
81 #endif
82 }
83
84 /* Allocate zero-filled memory of SIZE bytes. The mapping is private
85 and default protection bits are read / write. The return value
86 is the actual mapping address chosen or VM_MAP_FAILED for errors. */
87
88 void * vm_acquire(size_t size)
89 {
90 void * addr;
91
92 #ifdef HAVE_MACH_VM
93 // vm_allocate() returns a zero-filled memory region
94 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, TRUE) != KERN_SUCCESS)
95 return VM_MAP_FAILED;
96 #else
97 #ifdef HAVE_MMAP_VM
98 if ((addr = mmap(0, size, VM_PAGE_DEFAULT, map_flags, zero_fd, 0)) == MAP_FAILED)
99 return VM_MAP_FAILED;
100
101 // Since I don't know the standard behavior of mmap(), zero-fill here
102 if (memset(addr, 0, size) != addr)
103 return VM_MAP_FAILED;
104 #else
105 if ((addr = calloc(size, 1)) == 0)
106 return VM_MAP_FAILED;
107
108 // Omit changes for protections because they are not supported in this mode
109 return addr;
110 #endif
111 #endif
112
113 // Explicitely protect the newly mapped region here because on some systems,
114 // say MacOS X, mmap() doesn't honour the requested protection flags.
115 if (vm_protect(addr, size, VM_PAGE_DEFAULT) != 0)
116 return VM_MAP_FAILED;
117
118 return addr;
119 }
120
121 /* Allocate zero-filled memory at exactly ADDR (which must be page-aligned).
122 Retuns 0 if successful, -1 on errors. */
123
124 int vm_acquire_fixed(void * addr, size_t size)
125 {
126 #ifdef HAVE_MACH_VM
127 // vm_allocate() returns a zero-filled memory region
128 if (vm_allocate(mach_task_self(), (vm_address_t *)&addr, size, 0) != KERN_SUCCESS)
129 return -1;
130 #else
131 #ifdef HAVE_MMAP_VM
132 if (mmap(addr, size, VM_PAGE_DEFAULT, map_flags | MAP_FIXED, zero_fd, 0) == MAP_FAILED)
133 return -1;
134
135 // Since I don't know the standard behavior of mmap(), zero-fill here
136 if (memset(0, 0, size) != 0)
137 return -1;
138 #else
139 // Unsupported
140 return -1;
141 #endif
142 #endif
143
144 // Explicitely protect the newly mapped region here because on some systems,
145 // say MacOS X, mmap() doesn't honour the requested protection flags.
146 if (vm_protect(0, size, VM_PAGE_DEFAULT) != 0)
147 return -1;
148
149 return 0;
150 }
151
152 /* Deallocate any mapping for the region starting at ADDR and extending
153 LEN bytes. Returns 0 if successful, -1 on errors. */
154
155 int vm_release(void * addr, size_t size)
156 {
157 #ifdef HAVE_MACH_VM
158 int ret_code = vm_deallocate(mach_task_self(), (vm_address_t)addr, size);
159 return ret_code == KERN_SUCCESS ? 0 : -1;
160 #else
161 #ifdef HAVE_MMAP_VM
162 int ret_code = munmap(addr, size);
163 return ret_code == 0 ? 0 : -1;
164 #else
165 free(addr);
166 return 0;
167 #endif
168 #endif
169 }
170
171 /* Change the memory protection of the region starting at ADDR and
172 extending LEN bytes to PROT. Returns 0 if successful, -1 for errors. */
173
174 int vm_protect(void * addr, size_t size, int prot)
175 {
176 #ifdef HAVE_MACH_VM
177 int ret_code = vm_protect(mach_task_self(), (vm_address_t)addr, size, 0, prot);
178 return ret_code == KERN_SUCCESS ? 0 : -1;
179 #else
180 #ifdef HAVE_MMAP_VM
181 int ret_code = mprotect(addr, size, prot);
182 return ret_code == 0 ? 0 : -1;
183 #else
184 // Unsupported
185 return -1;
186 #endif
187 #endif
188 }
189
190 #ifdef CONFIGURE_TEST_VM_MAP
191 /* Tests covered here:
192 - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
193 - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
194 */
195 int main(void)
196 {
197 vm_init();
198
199 #define page_align(address) ((char *)((unsigned long)(address) & -page_size))
200 unsigned long page_size = getpagesize();
201
202 const int area_size = 6 * page_size;
203 volatile char * area = (volatile char *) vm_acquire(area_size);
204 volatile char * fault_address = area + (page_size * 7) / 2;
205
206 #if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
207 if (area == VM_MAP_FAILED)
208 return 1;
209
210 if (vm_release((char *)area, area_size) < 0)
211 return 1;
212
213 return 0;
214 #endif
215
216 #if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
217 if (area == VM_MAP_FAILED)
218 return 0;
219
220 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
221 return 0;
222 #endif
223
224 #if defined(TEST_VM_PROT_RDWR_WRITE)
225 if (area == VM_MAP_FAILED)
226 return 1;
227
228 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
229 return 1;
230
231 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
232 return 1;
233 #endif
234
235 #if defined(TEST_VM_PROT_READ_WRITE)
236 if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
237 return 0;
238 #endif
239
240 #if defined(TEST_VM_PROT_NONE_READ)
241 // this should cause a core dump
242 char foo = *fault_address;
243 return 0;
244 #endif
245
246 #if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
247 // this should cause a core dump
248 *fault_address = 'z';
249 return 0;
250 #endif
251
252 #if defined(TEST_VM_PROT_RDWR_WRITE)
253 // this should not cause a core dump
254 *fault_address = 'z';
255 return 0;
256 #endif
257 }
258 #endif