fault.c - vx32 - Local 9vx git repository for patches.
(HTM) git clone git://r-36.net/vx32
(DIR) Log
(DIR) Files
(DIR) Refs
---
fault.c (8283B)
---
1 #define WANT_M
2
3 #include "u.h"
4 #include "lib.h"
5 #include "mem.h"
6 #include "dat.h"
7 #include "fns.h"
8 #include "error.h"
9
10 int
11 fault(ulong addr, int read)
12 {
13 Segment *s;
14 char *sps;
15
16 if(up == nil)
17 panic("fault: nil up");
18 if(up->nlocks.ref)
19 print("fault: nlocks %ld\n", up->nlocks.ref);
20
21 sps = up->psstate;
22 up->psstate = "Fault";
23 spllo();
24
25 m->pfault++;
26 for(;;) {
27 s = seg(up, addr, 1); /* leaves s->lk qlocked if seg != nil */
28 if(s == 0) {
29 iprint("%ld %s fault %#x no segment\n", up->pid, up->text, addr);
30 { Segment **s, **et, *n;
31
32 et = &up->seg[NSEG];
33 for(s = up->seg; s < et; s++) {
34 n = *s;
35 if(n == 0)
36 continue;
37 print("segment %#lux %#lux\n", n->base, n->top);
38 }
39 }
40 up->psstate = sps;
41 return -1;
42 }
43
44 if(!read && (s->type&SG_RONLY)) {
45 qunlock(&s->lk);
46 iprint("%ld %s fault %#x write in read-only\n", up->pid, up->text, addr);
47 up->psstate = sps;
48 return -1;
49 }
50
51 if(fixfault(s, addr, read, 1) == 0)
52 break;
53 }
54
55 up->psstate = sps;
56 return 0;
57 }
58
59 static void
60 faulterror(char *s, Chan *c, int freemem)
61 {
62 char buf[ERRMAX];
63
64 if(c && c->path){
65 snprint(buf, sizeof buf, "%s accessing %s: %s", s, c->path->s, up->errstr);
66 s = buf;
67 }
68 if(up->nerrlab) {
69 postnote(up, 1, s, NDebug);
70 error(s);
71 }
72 pexit(s, freemem);
73 }
74
75 void (*checkaddr)(ulong, Segment *, Page *);
76 ulong addr2check;
77
78 int
79 fixfault(Segment *s, ulong addr, int read, int doputmmu)
80 {
81 int type;
82 int ref;
83 Pte **p, *etp;
84 ulong mmuphys=0, soff;
85 Page **pg, *lkp, *new;
86 Page *(*fn)(Segment*, ulong);
87
88 addr &= ~(BY2PG-1);
89 soff = addr-s->base;
90 p = &s->map[soff/PTEMAPMEM];
91 if(*p == 0)
92 *p = ptealloc();
93
94 etp = *p;
95 pg = &etp->pages[(soff&(PTEMAPMEM-1))/BY2PG];
96 type = s->type&SG_TYPE;
97
98 if(pg < etp->first)
99 etp->first = pg;
100 if(pg > etp->last)
101 etp->last = pg;
102
103 switch(type) {
104 default:
105 panic("fault");
106 break;
107
108 case SG_TEXT: /* Demand load */
109 if(pagedout(*pg))
110 pio(s, addr, soff, pg);
111
112 mmuphys = PPN((*pg)->pa) | PTERONLY|PTEVALID;
113 (*pg)->modref = PG_REF;
114 break;
115
116 case SG_BSS:
117 case SG_SHARED: /* Zero fill on demand */
118 case SG_STACK:
119 if(*pg == 0) {
120 new = newpage(1, &s, addr);
121 if(s == 0)
122 return -1;
123
124 *pg = new;
125 }
126 goto common;
127
128 case SG_DATA:
129 common: /* Demand load/pagein/copy on write */
130 if(pagedout(*pg))
131 pio(s, addr, soff, pg);
132
133 /*
134 * It's only possible to copy on write if
135 * we're the only user of the segment.
136 */
137 if(read && conf.copymode == 0 && s->ref.ref == 1) {
138 mmuphys = PPN((*pg)->pa)|PTERONLY|PTEVALID;
139 (*pg)->modref |= PG_REF;
140 break;
141 }
142
143 lkp = *pg;
144 lock(&lkp->lk);
145
146 if(lkp->image == &swapimage)
147 ref = lkp->ref + swapcount(lkp->daddr);
148 else
149 ref = lkp->ref;
150 if(ref > 1) {
151 unlock(&lkp->lk);
152
153 if(swapfull()){
154 qunlock(&s->lk);
155 pprint("swap space full\n");
156 faulterror(Enoswap, nil, 1);
157 }
158
159 new = newpage(0, &s, addr);
160 if(s == 0)
161 return -1;
162 *pg = new;
163 copypage(lkp, *pg);
164 putpage(lkp);
165 }
166 else {
167 /* save a copy of the original for the image cache */
168 if(lkp->image && !swapfull())
169 duppage(lkp);
170
171 unlock(&lkp->lk);
172 }
173 mmuphys = PPN((*pg)->pa) | PTEWRITE | PTEVALID;
174 (*pg)->modref = PG_MOD|PG_REF;
175 break;
176
177 case SG_PHYSICAL:
178 if(*pg == 0) {
179 fn = s->pseg->pgalloc;
180 if(fn)
181 *pg = (*fn)(s, addr);
182 else {
183 new = smalloc(sizeof(Page));
184 new->va = addr;
185 new->pa = s->pseg->pa+(addr-s->base);
186 new->ref = 1;
187 *pg = new;
188 }
189 }
190
191 if (checkaddr && addr == addr2check)
192 (*checkaddr)(addr, s, *pg);
193 mmuphys = PPN((*pg)->pa) |PTEWRITE|PTEUNCACHED|PTEVALID;
194 (*pg)->modref = PG_MOD|PG_REF;
195 break;
196 }
197 qunlock(&s->lk);
198
199 if(doputmmu)
200 putmmu(addr, mmuphys, *pg);
201
202 return 0;
203 }
204
205 void
206 pio(Segment *s, ulong addr, ulong soff, Page **p)
207 {
208 Page *new;
209 KMap *k;
210 Chan *c;
211 int n, ask;
212 char *kaddr;
213 ulong daddr;
214 Page *loadrec;
215
216 retry:
217 loadrec = *p;
218 if(loadrec == 0) { /* from a text/data image */
219 daddr = s->fstart+soff;
220 new = lookpage(s->image, daddr);
221 if(new != nil) {
222 *p = new;
223 return;
224 }
225 }
226 else { /* from a swap image */
227 daddr = swapaddr(loadrec);
228 new = lookpage(&swapimage, daddr);
229 if(new != nil) {
230 putswap(loadrec);
231 *p = new;
232 return;
233 }
234 }
235
236
237 qunlock(&s->lk);
238
239 new = newpage(0, 0, addr);
240 k = kmap(new);
241 kaddr = (char*)VA(k);
242
243 if(loadrec == 0) { /* This is demand load */
244 c = s->image->c;
245 while(waserror()) {
246 if(strcmp(up->errstr, Eintr) == 0)
247 continue;
248 kunmap(k);
249 putpage(new);
250 faulterror("sys: demand load I/O error", c, 0);
251 }
252
253 ask = s->flen-soff;
254 if(ask > BY2PG)
255 ask = BY2PG;
256
257 n = devtab[c->type]->read(c, kaddr, ask, daddr);
258 if(n != ask)
259 faulterror(Eioload, c, 0);
260 if(ask < BY2PG)
261 memset(kaddr+ask, 0, BY2PG-ask);
262
263 poperror();
264 kunmap(k);
265 qlock(&s->lk);
266
267 /*
268 * race, another proc may have gotten here first while
269 * s->lk was unlocked
270 */
271 if(*p == 0) {
272 new->daddr = daddr;
273 cachepage(new, s->image);
274 *p = new;
275 }
276 else
277 putpage(new);
278 }
279 else { /* This is paged out */
280 c = swapimage.c;
281 if(waserror()) {
282 kunmap(k);
283 putpage(new);
284 qlock(&s->lk);
285 qunlock(&s->lk);
286 faulterror("sys: page in I/O error", c, 0);
287 }
288
289 n = devtab[c->type]->read(c, kaddr, BY2PG, daddr);
290 if(n != BY2PG)
291 faulterror(Eioload, c, 0);
292
293 poperror();
294 kunmap(k);
295 qlock(&s->lk);
296
297 /*
298 * race, another proc may have gotten here first
299 * (and the pager may have run on that page) while
300 * s->lk was unlocked
301 */
302 if(*p != loadrec){
303 if(!pagedout(*p)){
304 /* another process did it for me */
305 putpage(new);
306 goto done;
307 } else {
308 /* another process and the pager got in */
309 putpage(new);
310 goto retry;
311 }
312 }
313
314 new->daddr = daddr;
315 cachepage(new, &swapimage);
316 *p = new;
317 putswap(loadrec);
318 }
319
320 done:;
321 }
322
323 /*
324 * Called only in a system call
325 */
326 void*
327 okaddr(ulong addr, ulong len, int write)
328 {
329 Segment *s;
330 ulong addr0;
331
332 addr0 = addr;
333
334 if((long)len >= 0) {
335 for(;;) {
336 s = seg(up, addr, 1);
337 if(s == 0)
338 break;
339 if(write && (s->type&SG_RONLY)){
340 qunlock(&s->lk);
341 break;
342 }
343
344 if(addr+len > s->top) {
345 len -= s->top - addr;
346 addr = s->top;
347 qunlock(&s->lk);
348 continue;
349 }
350 qunlock(&s->lk);
351 return up->pmmu.uzero+addr0;
352 }
353 }
354 pprint("suicide: invalid address %#lux/%lud in sys call pc=%#lux\n", addr, len, userpc());
355 return 0;
356 }
357
358 void*
359 uvalidaddr(ulong addr, ulong len, int write)
360 {
361 void *v;
362
363 v = okaddr(addr, len, write);
364 if(v == nil)
365 pexit("Suicide", 0);
366
367 // This is a valid address, but the host kernel
368 // might not know that. In case we're going
369 // to pass the address to the host kernel in a
370 // system call, fault in the pages.
371 volatile char *a = v;
372 ulong i;
373 for(i=0; i<len; i+=BY2PG){
374 if(write)
375 a[i] = a[i];
376 else
377 (void)a[i];
378 }
379 if(len > 0){
380 if(write)
381 a[len-1] = a[len-1];
382 else
383 (void)a[len-1];
384 }
385 return v;
386 }
387
388 /*
389 * &s[0] is known to be a valid address.
390 */
391 void*
392 vmemchr(void *s, int c, int n)
393 {
394 int m_;
395 uchar *a;
396 void *t;
397
398 a = s;
399 while(PGROUND((ulong)a) != PGROUND((ulong)a+n-1)){
400 /* spans pages; handle this page */
401 m_ = BY2PG - ((ulong)a & (BY2PG-1));
402 t = memchr(a, c, m_);
403 if(t)
404 return t;
405 a += m_;
406 n -= m_;
407 if(isuaddr(a))
408 uvalidaddr(a-up->pmmu.uzero, 1, 0);
409 }
410
411 /* fits in one page */
412 return memchr((void*)a, c, n);
413 }
414
415 Segment*
416 seg(Proc *p, ulong addr, int dolock)
417 {
418 Segment **s, **et, *n;
419
420 et = &p->seg[NSEG];
421 for(s = p->seg; s < et; s++) {
422 n = *s;
423 if(n == 0)
424 continue;
425 if(addr >= n->base && addr < n->top) {
426 if(dolock == 0)
427 return n;
428
429 qlock(&n->lk);
430 if(addr >= n->base && addr < n->top)
431 return n;
432 qunlock(&n->lk);
433 }
434 }
435
436 return 0;
437 }
438
439 extern void checkmmu(ulong, ulong);
440 void
441 checkpages(void)
442 {
443 int checked;
444 ulong addr, off;
445 Pte *p;
446 Page *pg;
447 Segment **sp, **ep, *s;
448
449 if(up == nil)
450 return;
451
452 checked = 0;
453 for(sp=up->seg, ep=&up->seg[NSEG]; sp<ep; sp++){
454 s = *sp;
455 if(s == nil)
456 continue;
457 qlock(&s->lk);
458 for(addr=s->base; addr<s->top; addr+=BY2PG){
459 off = addr - s->base;
460 p = s->map[off/PTEMAPMEM];
461 if(p == 0)
462 continue;
463 pg = p->pages[(off&(PTEMAPMEM-1))/BY2PG];
464 if(pg == 0 || pagedout(pg))
465 continue;
466 checkmmu(addr, pg->pa);
467 checked++;
468 }
469 qunlock(&s->lk);
470 }
471 print("%ld %s: checked %d page table entries\n", up->pid, up->text, checked);
472 }