chunk.c - vx32 - Local 9vx git repository for patches.
(HTM) git clone git://r-36.net/vx32
(DIR) Log
(DIR) Files
(DIR) Refs
---
chunk.c (6053B)
---
1 #define _XOPEN_SOURCE 500
2 #define _GNU_SOURCE // for MAP_32BIT
3
4 #include <stdio.h>
5 #include <unistd.h>
6 #include <fcntl.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <errno.h>
11 #include <assert.h>
12
13 #include "vx32.h"
14 #include "vx32impl.h"
15
16 extern int vx_elfbigmem;
17
18 typedef struct vxmem_chunk vxmem_chunk;
19
20 struct vxmem_chunk {
21 vxmem mem;
22 int fd;
23 off_t size;
24 uint8_t *perm;
25 };
26
27 static void chunk_free(vxmem *mem)
28 {
29 vxmem_chunk *chunk = (vxmem_chunk*)mem;
30
31 if(mem->mapped){
32 vxmem_unmap(mem, mem->mapped);
33 mem->mapped = NULL;
34 }
35
36 free(chunk->perm);
37 close(chunk->fd);
38 free(chunk);
39 }
40
41 static int chunk_resize(vxmem *mem, size_t size)
42 {
43 vxmem_chunk *chunk = (vxmem_chunk*)mem;
44 uint8_t *perm;
45 uint32_t onpage, npage;
46
47 if(size == chunk->size)
48 return 0;
49
50 if(mem->mapped){
51 assert(!vx_elfbigmem);
52 vxmem_unmap(mem, mem->mapped);
53 mem->mapped = NULL;
54 }
55
56 if(ftruncate(chunk->fd, size) < 0)
57 return -1;
58 onpage = VXPAGEROUND(chunk->size) / VXPAGESIZE;
59 npage = VXPAGEROUND(size) / VXPAGESIZE;
60 perm = realloc(chunk->perm, npage);
61 if (perm == NULL)
62 return -1;
63 if(npage > onpage)
64 memset(perm + onpage, 0, npage-onpage);
65 chunk->perm = perm;
66 chunk->size = size;
67 return 0;
68 }
69
70 vxmem *vxmem_chunk_copy(vxmem *mem)
71 {
72 vxmem_chunk *chunk = (vxmem_chunk*)mem;
73 uint8_t *perm;
74 uint32_t i, npage;
75
76 assert(mem->free == chunk_free);
77 vxmem *nmem = vxmem_chunk_new(chunk->size);
78 if (nmem == NULL)
79 return NULL;
80 npage = VXPAGEROUND(chunk->size) / VXPAGESIZE;
81 int n = 0;
82 vxmem_chunk *nchunk = (vxmem_chunk*)nmem;
83 char *buf = malloc(4096);
84 memmove(nchunk->perm, chunk->perm, npage);
85 for (i=0; i<npage; i++) {
86 if (nchunk->perm[i]) {
87 vxmem_read(mem, buf, i*VXPAGESIZE, VXPAGESIZE);
88 vxmem_write(nmem, buf, i*VXPAGESIZE, VXPAGESIZE);
89 n++;
90 }
91 }
92 free(buf);
93 return nmem;
94 }
95
96 static ssize_t chunk_read(vxmem *mem, void *data, uint32_t addr, uint32_t len)
97 {
98 vxmem_chunk *chunk = (vxmem_chunk*)mem;
99
100 if (addr >= chunk->size)
101 return -1;
102 if (len > chunk->size - addr)
103 len = chunk->size - addr;
104 return pread(chunk->fd, data, len, addr);
105 }
106
107 static ssize_t chunk_write(vxmem *mem, const void *data, uint32_t addr, uint32_t len)
108 {
109 vxmem_chunk *chunk = (vxmem_chunk*)mem;
110
111 if (addr >= chunk->size)
112 return -1;
113 if (len > chunk->size - addr)
114 len = chunk->size - addr;
115 return pwrite(chunk->fd, data, len, addr);
116 }
117
118 static int chunk_checkperm(vxmem *mem, uint32_t addr, uint32_t len, uint32_t perm, uint32_t *out_faultva)
119 {
120 uint32_t pn, pe, cpe, va;
121 vxmem_chunk *chunk = (vxmem_chunk*)mem;
122
123 if (addr + len < addr)
124 return 0;
125 if (len == 0)
126 return 1;
127 pn = addr / VXPAGESIZE;
128 pe = VXPAGEROUND(addr + len-1) / VXPAGESIZE;
129 cpe = VXPAGEROUND(chunk->size) / VXPAGESIZE;
130 for (; pn < pe; pn++) {
131 if (pn >= cpe) {
132 if (out_faultva)
133 *out_faultva = chunk->size;
134 return 0;
135 }
136 if ((chunk->perm[pn] & perm) != perm) {
137 if (out_faultva) {
138 va = pn * VXPAGESIZE;
139 if (va < addr)
140 va = addr;
141 *out_faultva = va;
142 }
143 return 0;
144 }
145 }
146 return 1;
147 }
148
149 static int mmperm(int perm)
150 {
151 int m;
152
153 if (perm == 0)
154 m = PROT_NONE;
155 else {
156 m = 0;
157 if (perm & VXPERM_READ)
158 m |= PROT_READ;
159 if (perm & VXPERM_WRITE)
160 m |= PROT_WRITE;
161 if (perm & VXPERM_EXEC)
162 m |= PROT_EXEC;
163 }
164 return m;
165 }
166
167 static int chunk_setperm(vxmem *mem, uint32_t addr, uint32_t len, uint32_t perm)
168 {
169 vxmem_chunk *chunk = (vxmem_chunk*)mem;
170 uint32_t a;
171
172 if (addr + len < addr || addr + len > chunk->size) {
173 errno = EINVAL;
174 return -1;
175 }
176
177 if (mem->mapped) {
178 if (mprotect(mem->mapped->base + addr, len, mmperm(perm)) < 0)
179 return -1;
180 }
181
182 for (a = addr; a < addr + len; a += VXPAGESIZE)
183 chunk->perm[a/VXPAGESIZE] = perm;
184 return 0;
185 }
186
187 static vxmmap *chunk_map(vxmem *mem, uint32_t flags)
188 {
189 vxmmap *mm;
190 void *v;
191 vxmem_chunk *chunk = (vxmem_chunk*)mem;
192
193 if (mem->mapped) {
194 mem->mapped->ref++; // XXX get rid of ref?
195 return mem->mapped;
196 }
197
198 mm = malloc(sizeof *mm);
199 if (mm == NULL)
200 return NULL;
201 if ((v = mmap(0, chunk->size, PROT_NONE, MAP_32BIT | (vx_elfbigmem ? MAP_PRIVATE : MAP_SHARED), chunk->fd, 0)) == (void*)-1) {
202 free(mm);
203 return NULL;
204 }
205 // vxprint("chunk_map %p size %08x\n", v, chunk->size);
206
207 // Now set the permissions on all pages that should be accessible.
208 unsigned npages = chunk->size / VXPAGESIZE;
209 for (unsigned i = 0; i < npages; ) {
210 uint8_t pageperm = chunk->perm[i];
211
212 // For efficiency, merge mprotect calls on page ranges.
213 unsigned j;
214 for (j = i+1; j < npages; j++)
215 if (chunk->perm[j] != pageperm)
216 break;
217
218 if (pageperm == 0) {
219 i = j; // nothing to do for this range
220 continue;
221 }
222
223 // Calculate the effective permission to map with in the host.
224 int prot = mmperm(pageperm);
225 assert(prot >= 0);
226
227 // Set the permissions on this range.
228 if (mprotect((char*)v + i*VXPAGESIZE, (j-i)*VXPAGESIZE, prot) < 0) {
229 munmap(v, chunk->size);
230 free(mm);
231 return NULL;
232 }
233
234 i = j;
235 }
236
237 mm->base = v;
238 mm->size = chunk->size;
239 mm->ref = 2; // XXX get rid of ref?
240 mem->mapped = mm;
241 return mm;
242 }
243
244 static void chunk_unmap(vxmem *mem, vxmmap *mm)
245 {
246 vxmem_chunk *chunk = (vxmem_chunk*)mem;
247
248 if(mm == NULL)
249 return;
250 // if(--mm->ref > 0) // XXX get rid of ref?
251 // return;
252 if(mm == mem->mapped)
253 mem->mapped = NULL;
254 // vxprint("chunk_unmap %p size %08x\n", mm->base, mm->size);
255 munmap(mm->base, mm->size);
256 free(mm);
257 }
258
259 static vxmem chunk_proto =
260 {
261 chunk_read,
262 chunk_write,
263 chunk_map,
264 chunk_unmap,
265 chunk_checkperm,
266 chunk_setperm,
267 chunk_resize,
268 chunk_free,
269 };
270
271 static vxmem *vxmem_chunk_fromfd(int fd, off_t size)
272 {
273 vxmem_chunk *chunk;
274
275 chunk = calloc(sizeof *chunk, 1);
276 if (chunk == NULL)
277 return NULL;
278 chunk->mem = chunk_proto;
279 chunk->fd = fd;
280 if (chunk_resize((vxmem*)chunk, size) < 0) {
281 free(chunk);
282 return NULL;
283 }
284 return (vxmem*)chunk;
285 }
286
287 vxmem *vxmem_chunk_new(int size)
288 {
289 int fd;
290 char tmpfn[] = "/var/tmp/vxXXXXXX";
291 vxmem *mem;
292
293 if ((fd = mkstemp(tmpfn)) < 0)
294 return NULL;
295 unlink(tmpfn);
296 if ((mem = vxmem_chunk_fromfd(fd, size)) == NULL) {
297 close(fd);
298 return NULL;
299 }
300 return mem;
301 }
302