page.c - vx32 - Local 9vx git repository for patches.
(HTM) git clone git://r-36.net/vx32
(DIR) Log
(DIR) Files
(DIR) Refs
---
page.c (11260B)
---
1 #include "u.h"
2 #include "lib.h"
3 #include "mem.h"
4 #include "dat.h"
5 #include "fns.h"
6 #include "error.h"
7
8 #define pghash(daddr) palloc.hash[(daddr>>PGSHIFT)&(PGHSIZE-1)]
9
10 struct Palloc palloc;
11
12 void
13 pageinit(void)
14 {
15 int color, i, j;
16 Page *p;
17 Pallocmem *pm;
18 ulong m, np, k, vkb, pkb;
19
20 np = 0;
21 for(i=0; i<nelem(palloc.mem); i++){
22 pm = &palloc.mem[i];
23 np += pm->npage;
24 }
25 palloc.pages = xalloc(np*sizeof(Page));
26 if(palloc.pages == 0)
27 panic("pageinit");
28
29 color = 0;
30 palloc.head = palloc.pages;
31 p = palloc.head;
32 for(i=0; i<nelem(palloc.mem); i++){
33 pm = &palloc.mem[i];
34 for(j=0; j<pm->npage; j++){
35 p->prev = p-1;
36 p->next = p+1;
37 p->pa = pm->base+j*BY2PG;
38 p->color = color;
39 palloc.freecount++;
40 color = (color+1)%NCOLOR;
41 p++;
42 }
43 }
44 palloc.tail = p - 1;
45 palloc.head->prev = 0;
46 palloc.tail->next = 0;
47
48 palloc.user = p - palloc.pages;
49 pkb = palloc.user*BY2PG/1024;
50 vkb = pkb + (conf.nswap*BY2PG)/1024;
51
52 /* Paging numbers */
53 swapalloc.highwater = (palloc.user*5)/100;
54 swapalloc.headroom = swapalloc.highwater + (swapalloc.highwater/4);
55
56 m = 0;
57 for(i=0; i<nelem(conf.mem); i++)
58 if(conf.mem[i].npage)
59 m += conf.mem[i].npage*BY2PG;
60 k = 0;
61 print("%ldM memory: ", (m+k+1024*1024-1)/(1024*1024));
62 print("%ldM kernel data, ", (m+k-pkb*1024+1024*1024-1)/(1024*1024));
63 print("%ldM user, ", pkb/1024);
64 print("%ldM swap\n", vkb/1024);
65 }
66
67 static void
68 pageunchain(Page *p)
69 {
70 if(canlock(&palloc.lk))
71 panic("pageunchain (palloc %p)", &palloc);
72 if(p->prev)
73 p->prev->next = p->next;
74 else
75 palloc.head = p->next;
76 if(p->next)
77 p->next->prev = p->prev;
78 else
79 palloc.tail = p->prev;
80 p->prev = p->next = nil;
81 palloc.freecount--;
82 }
83
84 void
85 pagechaintail(Page *p)
86 {
87 if(canlock(&palloc.lk))
88 panic("pagechaintail");
89 if(palloc.tail) {
90 p->prev = palloc.tail;
91 palloc.tail->next = p;
92 }
93 else {
94 palloc.head = p;
95 p->prev = 0;
96 }
97 palloc.tail = p;
98 p->next = 0;
99 palloc.freecount++;
100 }
101
102 void
103 pagechainhead(Page *p)
104 {
105 if(canlock(&palloc.lk))
106 panic("pagechainhead");
107 if(palloc.head) {
108 p->next = palloc.head;
109 palloc.head->prev = p;
110 }
111 else {
112 palloc.tail = p;
113 p->next = 0;
114 }
115 palloc.head = p;
116 p->prev = 0;
117 palloc.freecount++;
118 }
119
120 Page*
121 newpage(int clear, Segment **s, ulong va)
122 {
123 Page *p;
124 KMap *k;
125 uchar ct;
126 int hw, dontalloc, color;
127
128 lock(&palloc.lk);
129 color = getpgcolor(va);
130 hw = swapalloc.highwater;
131 for(;;) {
132 if(palloc.freecount > hw)
133 break;
134 if(up->kp && palloc.freecount > 0)
135 break;
136
137 unlock(&palloc.lk);
138 dontalloc = 0;
139 if(s && *s) {
140 qunlock(&((*s)->lk));
141 *s = 0;
142 dontalloc = 1;
143 }
144 qlock(&palloc.pwait); /* Hold memory requesters here */
145
146 while(waserror()) /* Ignore interrupts */
147 ;
148
149 kickpager();
150 tsleep(&palloc.r, ispages, 0, 1000);
151
152 poperror();
153
154 qunlock(&palloc.pwait);
155
156 /*
157 * If called from fault and we lost the segment from
158 * underneath don't waste time allocating and freeing
159 * a page. Fault will call newpage again when it has
160 * reacquired the segment locks
161 */
162 if(dontalloc)
163 return 0;
164
165 lock(&palloc.lk);
166 }
167
168 /* First try for our colour */
169 for(p = palloc.head; p; p = p->next)
170 if(p->color == color)
171 break;
172
173 ct = PG_NOFLUSH;
174 if(p == 0) {
175 p = palloc.head;
176 p->color = color;
177 ct = PG_NEWCOL;
178 }
179 (void)ct;
180
181 pageunchain(p);
182
183 lock(&p->lk);
184 if(p->ref != 0)
185 panic("newpage: p->ref %d != 0", p->ref);
186
187 uncachepage(p);
188 p->ref++;
189 p->va = va;
190 p->modref = 0;
191 unlock(&p->lk);
192 unlock(&palloc.lk);
193
194 if(clear) {
195 k = kmap(p);
196 memset((void*)VA(k), 0, BY2PG);
197 kunmap(k);
198 }
199
200 return p;
201 }
202
203 int
204 ispages(void *v)
205 {
206 return palloc.freecount >= swapalloc.highwater;
207 }
208
209 void
210 putpage(Page *p)
211 {
212 if(onswap(p)) {
213 putswap(p);
214 return;
215 }
216
217 lock(&palloc.lk);
218 lock(&p->lk);
219
220 if(p->ref == 0)
221 panic("putpage");
222
223 if(--p->ref > 0) {
224 unlock(&p->lk);
225 unlock(&palloc.lk);
226 return;
227 }
228
229 if(p->image && p->image != &swapimage)
230 pagechaintail(p);
231 else
232 pagechainhead(p);
233
234 if(palloc.r.p != 0)
235 wakeup(&palloc.r);
236
237 unlock(&p->lk);
238 unlock(&palloc.lk);
239 }
240
241 Page*
242 auxpage(void)
243 {
244 Page *p;
245
246 lock(&palloc.lk);
247 p = palloc.head;
248 if(palloc.freecount < swapalloc.highwater) {
249 unlock(&palloc.lk);
250 return 0;
251 }
252 pageunchain(p);
253
254 lock(&p->lk);
255 if(p->ref != 0)
256 panic("auxpage");
257 p->ref++;
258 uncachepage(p);
259 unlock(&p->lk);
260 unlock(&palloc.lk);
261
262 return p;
263 }
264
265 static int dupretries = 15000;
266
267 int
268 duppage(Page *p) /* Always call with p locked */
269 {
270 Page *np;
271 int color;
272 int retries;
273
274 retries = 0;
275 retry:
276
277 if(retries++ > dupretries){
278 print("duppage %d, up %p\n", retries, up);
279 dupretries += 100;
280 if(dupretries > 100000)
281 panic("duppage\n");
282 uncachepage(p);
283 return 1;
284 }
285
286
287 /* don't dup pages with no image */
288 if(p->ref == 0 || p->image == nil || p->image->notext)
289 return 0;
290
291 /*
292 * normal lock ordering is to call
293 * lock(&palloc.lk) before lock(&p->lk).
294 * To avoid deadlock, we have to drop
295 * our locks and try again.
296 */
297 if(!canlock(&palloc.lk)){
298 unlock(&p->lk);
299 if(up)
300 sched();
301 lock(&p->lk);
302 goto retry;
303 }
304
305 /* No freelist cache when memory is very low */
306 if(palloc.freecount < swapalloc.highwater) {
307 unlock(&palloc.lk);
308 uncachepage(p);
309 return 1;
310 }
311
312 color = getpgcolor(p->va);
313 for(np = palloc.head; np; np = np->next)
314 if(np->color == color)
315 break;
316
317 /* No page of the correct color */
318 if(np == 0) {
319 unlock(&palloc.lk);
320 uncachepage(p);
321 return 1;
322 }
323
324 pageunchain(np);
325 pagechaintail(np);
326 /*
327 * XXX - here's a bug? - np is on the freelist but it's not really free.
328 * when we unlock palloc someone else can come in, decide to
329 * use np, and then try to lock it. they succeed after we've
330 * run copypage and cachepage and unlock(&np->lk). then what?
331 * they call pageunchain before locking(np), so it's removed
332 * from the freelist, but still in the cache because of
333 * cachepage below. if someone else looks in the cache
334 * before they remove it, the page will have a nonzero ref
335 * once they finally lock(&np->lk).
336 */
337 lock(&np->lk);
338 unlock(&palloc.lk);
339
340 /* Cache the new version */
341 uncachepage(np);
342 np->va = p->va;
343 np->daddr = p->daddr;
344 copypage(p, np);
345 cachepage(np, p->image);
346 unlock(&np->lk);
347 uncachepage(p);
348
349 return 0;
350 }
351
352 void
353 copypage(Page *f, Page *t)
354 {
355 KMap *ks, *kd;
356
357 ks = kmap(f);
358 kd = kmap(t);
359 memmove((void*)VA(kd), (void*)VA(ks), BY2PG);
360 kunmap(ks);
361 kunmap(kd);
362 }
363
364 void
365 uncachepage(Page *p) /* Always called with a locked page */
366 {
367 Page **l, *f;
368
369 if(p->image == 0)
370 return;
371
372 lock(&palloc.hashlock);
373 l = &pghash(p->daddr);
374 for(f = *l; f; f = f->hash) {
375 if(f == p) {
376 *l = p->hash;
377 break;
378 }
379 l = &f->hash;
380 }
381 unlock(&palloc.hashlock);
382 putimage(p->image);
383 p->image = 0;
384 p->daddr = 0;
385 }
386
387 void
388 cachepage(Page *p, Image *i)
389 {
390 Page **l;
391
392 /* If this ever happens it should be fixed by calling
393 * uncachepage instead of panic. I think there is a race
394 * with pio in which this can happen. Calling uncachepage is
395 * correct - I just wanted to see if we got here.
396 */
397 if(p->image)
398 panic("cachepage");
399
400 incref(&i->ref);
401 lock(&palloc.hashlock);
402 p->image = i;
403 l = &pghash(p->daddr);
404 p->hash = *l;
405 *l = p;
406 unlock(&palloc.hashlock);
407 }
408
409 void
410 cachedel(Image *i, ulong daddr)
411 {
412 Page *f, **l;
413
414 lock(&palloc.hashlock);
415 l = &pghash(daddr);
416 for(f = *l; f; f = f->hash) {
417 if(f->image == i && f->daddr == daddr) {
418 lock(&f->lk);
419 if(f->image == i && f->daddr == daddr){
420 *l = f->hash;
421 putimage(f->image);
422 f->image = 0;
423 f->daddr = 0;
424 }
425 unlock(&f->lk);
426 break;
427 }
428 l = &f->hash;
429 }
430 unlock(&palloc.hashlock);
431 }
432
433 Page *
434 lookpage(Image *i, ulong daddr)
435 {
436 Page *f;
437
438 lock(&palloc.hashlock);
439 for(f = pghash(daddr); f; f = f->hash) {
440 if(f->image == i && f->daddr == daddr) {
441 unlock(&palloc.hashlock);
442
443 lock(&palloc.lk);
444 lock(&f->lk);
445 if(f->image != i || f->daddr != daddr) {
446 unlock(&f->lk);
447 unlock(&palloc.lk);
448 return 0;
449 }
450 if(++f->ref == 1)
451 pageunchain(f);
452 unlock(&palloc.lk);
453 unlock(&f->lk);
454
455 return f;
456 }
457 }
458 unlock(&palloc.hashlock);
459
460 return 0;
461 }
462
463 Pte*
464 ptecpy(Pte *old)
465 {
466 Pte *new;
467 Page **src, **dst;
468
469 new = ptealloc();
470 dst = &new->pages[old->first-old->pages];
471 new->first = dst;
472 for(src = old->first; src <= old->last; src++, dst++)
473 if(*src) {
474 if(onswap(*src))
475 dupswap(*src);
476 else {
477 lock(&(*src)->lk);
478 (*src)->ref++;
479 unlock(&(*src)->lk);
480 }
481 new->last = dst;
482 *dst = *src;
483 }
484
485 return new;
486 }
487
488 Pte*
489 ptealloc(void)
490 {
491 Pte *new;
492
493 new = smalloc(sizeof(Pte));
494 new->first = &new->pages[PTEPERTAB];
495 new->last = new->pages;
496 return new;
497 }
498
499 void
500 freepte(Segment *s, Pte *p)
501 {
502 int ref;
503 void (*fn)(Page*);
504 Page *pt, **pg, **ptop;
505
506 switch(s->type&SG_TYPE) {
507 case SG_PHYSICAL:
508 fn = s->pseg->pgfree;
509 ptop = &p->pages[PTEPERTAB];
510 if(fn) {
511 for(pg = p->pages; pg < ptop; pg++) {
512 if(*pg == 0)
513 continue;
514 (*fn)(*pg);
515 *pg = 0;
516 }
517 break;
518 }
519 for(pg = p->pages; pg < ptop; pg++) {
520 pt = *pg;
521 if(pt == 0)
522 continue;
523 lock(&pt->lk);
524 ref = --pt->ref;
525 unlock(&pt->lk);
526 if(ref == 0)
527 free(pt);
528 }
529 break;
530 default:
531 for(pg = p->first; pg <= p->last; pg++)
532 if(*pg) {
533 putpage(*pg);
534 *pg = 0;
535 }
536 }
537 free(p);
538 }
539
540 ulong
541 pagenumber(Page *p)
542 {
543 return p-palloc.pages;
544 }
545
546 void
547 checkpagerefs(void)
548 {
549 int s;
550 ulong i, np, nwrong;
551 ulong *ref;
552
553 np = palloc.user;
554 ref = malloc(np*sizeof ref[0]);
555 if(ref == nil){
556 print("checkpagerefs: out of memory\n");
557 return;
558 }
559
560 /*
561 * This may not be exact if there are other processes
562 * holding refs to pages on their stacks. The hope is
563 * that if you run it on a quiescent system it will still
564 * be useful.
565 */
566 s = splhi();
567 lock(&palloc.lk);
568 countpagerefs(ref, 0);
569 portcountpagerefs(ref, 0);
570 nwrong = 0;
571 for(i=0; i<np; i++){
572 if(palloc.pages[i].ref != ref[i]){
573 iprint("page %#.8lux ref %d actual %lud\n",
574 palloc.pages[i].pa, palloc.pages[i].ref, ref[i]);
575 ref[i] = 1;
576 nwrong++;
577 }else
578 ref[i] = 0;
579 }
580 countpagerefs(ref, 1);
581 portcountpagerefs(ref, 1);
582 iprint("%lud mistakes found\n", nwrong);
583 unlock(&palloc.lk);
584 splx(s);
585 }
586
587 void
588 portcountpagerefs(ulong *ref, int print)
589 {
590 ulong i, j, k, ns, n;
591 Page **pg, *entry;
592 Proc *p;
593 Pte *pte;
594 Segment *s;
595
596 /*
597 * Pages in segments. s->mark avoids double-counting.
598 */
599 n = 0;
600 ns = 0;
601 for(i=0; i<conf.nproc; i++){
602 p = proctab(i);
603 for(j=0; j<NSEG; j++){
604 s = p->seg[j];
605 if(s)
606 s->mark = 0;
607 }
608 }
609 for(i=0; i<conf.nproc; i++){
610 p = proctab(i);
611 for(j=0; j<NSEG; j++){
612 s = p->seg[j];
613 if(s == nil || s->mark++)
614 continue;
615 ns++;
616 for(k=0; k<s->mapsize; k++){
617 pte = s->map[k];
618 if(pte == nil)
619 continue;
620 for(pg = pte->first; pg <= pte->last; pg++){
621 entry = *pg;
622 if(pagedout(entry))
623 continue;
624 if(print){
625 if(ref[pagenumber(entry)])
626 iprint("page %#.8lux in segment %#p\n", entry->pa, s);
627 continue;
628 }
629 if(ref[pagenumber(entry)]++ == 0)
630 n++;
631 }
632 }
633 }
634 }
635 if(!print){
636 iprint("%lud pages in %lud segments\n", n, ns);
637 for(i=0; i<conf.nproc; i++){
638 p = proctab(i);
639 for(j=0; j<NSEG; j++){
640 s = p->seg[j];
641 if(s == nil)
642 continue;
643 if(s->ref.ref != s->mark){
644 iprint("segment %#p (used by proc %lud pid %lud) has bad ref count %lud actual %lud\n",
645 s, i, p->pid, s->ref, s->mark);
646 }
647 }
648 }
649 }
650 }
651