vx32.c - vx32 - Local 9vx git repository for patches.
(HTM) git clone git://r-36.net/vx32
(DIR) Log
(DIR) Files
(DIR) Refs
---
vx32.c (6453B)
---
1 /*
2 * User-space execution.
3 *
4 * On a real x86 processor, Plan 9 calls touser to execute an IRET
5 * instruction to go back into user space, and then eventually a trap
6 * happens and the processor is magically transported back into
7 * kernel space running trap or syscall.
8 *
9 * In Plan 9 VX, touser calls into vx32 to manage user execution;
10 * vx32 eventually returns a trap code and then touser dispatches
11 * to trap or syscall as appropriate. When trap or syscall returns,
12 * touser calls back into vx32 and the cycle repeats.
13 */
14
15 #define WANT_M
16
17 #include "u.h"
18 #include <pthread.h>
19 #include <sys/mman.h>
20 #include "lib.h"
21 #include "mem.h"
22 #include "dat.h"
23 #include "fns.h"
24 #include "error.h"
25 #include "ureg.h"
26
27 enum {
28 ClockTicks = 1,
29 ClockMillis = 25,
30 };
31
32 int nfaults;
33 int traceprocs;
34 int tracesyscalls;
35 extern int abortonfault;
36 extern char *sysctab[];
37 extern void mathemu(Ureg*, void*);
38
39 static void proc2ureg(vxproc*, Ureg*);
40 static void ureg2proc(Ureg*, vxproc*);
41
42 static vxmem thevxmem;
43
44 void
45 vx32sysr1(void)
46 {
47 traceprocs = !traceprocs;
48 // vx32_debugxlate = traceprocs;
49 tracesyscalls = !tracesyscalls;
50 }
51
52 /*
53 * Vx32 hooks to read, write, map, unmap, and check permissions
54 * on user memory. Normally these are more involved, but we're
55 * using the processor to do everything.
56 */
57 static ssize_t
58 vmread(vxmem *vm, void *data, uint32_t addr, uint32_t len)
59 {
60 memmove(data, vm->mapped->base+addr, len);
61 return len;
62 }
63
64 static ssize_t
65 vmwrite(vxmem *vm, const void *data, uint32_t addr, uint32_t len)
66 {
67 memmove(vm->mapped->base+addr, data, len);
68 return len;
69 }
70
71 static vxmmap*
72 vmmap(vxmem *vm, uint32_t flags)
73 {
74 return vm->mapped;
75 }
76
77 static void
78 vmunmap(vxmem *vm, vxmmap *mm)
79 {
80 }
81
82 static int
83 vmcheckperm(vxmem *vm, uint32_t addr, uint32_t len, uint32_t perm, uint32_t *out_faultva)
84 {
85 if(addr >= USTKTOP){
86 *out_faultva = addr;
87 return 0;
88 }
89 if(addr+len < addr || addr +len > USTKTOP){
90 *out_faultva = USTKTOP;
91 return 0;
92 }
93 /* All is allowed - handle faults as they happen. */
94 return 1;
95 }
96
97 static int
98 vmsetperm(vxmem *vm, uint32_t addr, uint32_t len, uint32_t perm)
99 {
100 return 0;
101 }
102
103 static int
104 vmresize(vxmem *vm, size_t size)
105 {
106 return 0;
107 }
108
109 static void
110 vmfree(vxmem *vm)
111 {
112 }
113
114 static vxmem thevxmem =
115 {
116 vmread,
117 vmwrite,
118 vmmap,
119 vmunmap,
120 vmcheckperm,
121 vmsetperm,
122 vmresize,
123 vmfree,
124 };
125
126 /*
127 * Vxnewproc is called at the end of newproc
128 * to fill in vx32-specific entries in the Proc struct
129 * before it gets used.
130 */
131 void
132 vxnewproc(Proc *p)
133 {
134 PMMU *pm;
135
136 pm = &p->pmmu;
137
138 /*
139 * Kernel procs don't need vxprocs; if this proc
140 * already has one, take it away. Also, give
141 * kernel procs very large stacks so they can call
142 * into non-thread-friendly routines like x11
143 * and getgrgid.
144 */
145 if(p->kp){
146 if(pm->vxproc){
147 // vxunmap(p);
148 assert(pm->uzero == nil);
149 pm->vxproc->mem = nil;
150 vxproc_free(pm->vxproc);
151 pm->vxproc = nil;
152 }
153 free(p->kstack);
154 p->kstack = nil;
155 p->kstack = smalloc(512*1024);
156 return;
157 }
158
159 if(pm->vxproc == nil){
160 pm->vxproc = vxproc_alloc();
161 if(pm->vxproc == nil)
162 panic("vxproc_alloc");
163 pm->vxproc->mem = &pm->vxmem;
164 pm->vxmem = thevxmem;
165 pm->vxmem.mapped = &pm->vxmm;
166 memset(&pm->vxmm, 0, sizeof pm->vxmm);
167 }
168 }
169
170 static void
171 setclock(int start)
172 {
173 struct itimerval itv;
174
175 /* Ask for clock tick to interrupt execution after ClockMillis ms. */
176 memset(&itv, 0, sizeof itv);
177 if(start)
178 itv.it_value.tv_usec = ClockMillis*1000;
179 else
180 itv.it_value.tv_usec = 0;
181 setitimer(ITIMER_VIRTUAL, &itv, 0);
182 }
183
184 /*
185 * Newly forked processes start executing at forkret.
186 * The very first process, init, starts executing at touser(sp),
187 * where sp is its stack pointer.
188 */
189 void
190 forkret(void)
191 {
192 extern void kexit(Ureg*);
193
194 kexit(nil);
195 touser(0);
196 }
197
198 void
199 touser(void *initsp)
200 {
201 int rc;
202 void *kp;
203 vxproc *vp;
204 Ureg u;
205 Ureg *u1;
206 uchar *addr;
207
208 vp = up->pmmu.vxproc;
209 if(initsp){
210 /* init: clear register set, setup sp, eip */
211 memset(vp->cpu, 0, sizeof *vp->cpu);
212 vp->cpu->reg[ESP] = (ulong)initsp;
213 vp->cpu->eflags = 0;
214 vp->cpu->eip = UTZERO+32;
215 }else{
216 /* anyone else: registers are sitting at top of kernel stack */
217 kp = (char*)up->kstack + KSTACK - (sizeof(Ureg) + 2*BY2WD);
218 u1 = (Ureg*)((char*)kp + 2*BY2WD);
219 ureg2proc(u1, vp);
220 }
221
222 /*
223 * User-mode execution loop.
224 */
225 for(;;){
226 /*
227 * Optimization: try to fault in code page and stack
228 * page right now, since we're likely to need them.
229 */
230 if(up->pmmu.us->hi == 0){
231 fault(vp->cpu->eip, 1);
232 fault(vp->cpu->reg[ESP], 0);
233 }
234
235 /*
236 * Let vx32 know whether to allow floating point.
237 * TODO: Fix vx32 so that you don't need to flush
238 * on the transition from FPinactive -> FPactive.
239 */
240 if(vp->allowfp != (up->fpstate == FPactive)){
241 vp->allowfp = (up->fpstate == FPactive);
242 vxproc_flush(vp);
243 }
244
245 if(traceprocs)
246 iprint("+vx32 %p %p %s eip=%lux esp=%lux\n",
247 m, up, up->text, vp->cpu->eip, vp->cpu->reg[ESP]);
248
249 setsigsegv(1);
250 setclock(1);
251 rc = vxproc_run(vp);
252 setclock(0);
253 setsigsegv(0);
254
255 if(rc < 0)
256 panic("vxproc_run: %r");
257
258 if(traceprocs)
259 iprint("-vx32 %p %p %s eip=%lux esp=%lux rc=%#x\n",
260 m, up, up->text, vp->cpu->eip, vp->cpu->reg[ESP], rc);
261
262 /*
263 * Handle page faults quickly, without proc2ureg, ureg2proc,
264 * if possible. Otherwise fall back to default trap call.
265 */
266 if(rc == VXTRAP_PAGEFAULT){
267 int read;
268 nfaults++;
269 read = !(vp->cpu->traperr & 2);
270 addr = (uchar*)(uintptr)vp->cpu->trapva;
271 if(traceprocs)
272 print("fault %p read=%d\n", addr, read);
273 if(isuaddr(addr) && fault(addr - up->pmmu.uzero, read) >= 0)
274 continue;
275 print("%ld %s: unhandled fault va=%lux [%lux] eip=%lux\n",
276 up->pid, up->text,
277 addr - up->pmmu.uzero, vp->cpu->trapva, vp->cpu->eip);
278 proc2ureg(vp, &u);
279 dumpregs(&u);
280 if(abortonfault)
281 abort();
282 }
283
284 up->dbgreg = &u;
285 proc2ureg(vp, &u);
286 u.trap = rc;
287 trap(&u);
288 ureg2proc(&u, vp);
289 }
290 }
291
292 static void
293 proc2ureg(vxproc *vp, Ureg *u)
294 {
295 memset(u, 0, sizeof *u);
296 u->pc = vp->cpu->eip;
297 u->ax = vp->cpu->reg[EAX];
298 u->bx = vp->cpu->reg[EBX];
299 u->cx = vp->cpu->reg[ECX];
300 u->dx = vp->cpu->reg[EDX];
301 u->si = vp->cpu->reg[ESI];
302 u->di = vp->cpu->reg[EDI];
303 u->usp = vp->cpu->reg[ESP];
304 }
305
306 static void
307 ureg2proc(Ureg *u, vxproc *vp)
308 {
309 vp->cpu->eip = u->pc;
310 vp->cpu->reg[EAX] = u->ax;
311 vp->cpu->reg[EBX] = u->bx;
312 vp->cpu->reg[ECX] = u->cx;
313 vp->cpu->reg[EDX] = u->dx;
314 vp->cpu->reg[ESI] = u->si;
315 vp->cpu->reg[EDI] = u->di;
316 vp->cpu->reg[ESP] = u->usp;
317 }
318