Line data Source code
1 : /* $OpenBSD: uvm_page.c,v 1.147 2018/05/12 17:17:27 krw Exp $ */
2 : /* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
3 :
4 : /*
5 : * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 : * Copyright (c) 1991, 1993, The Regents of the University of California.
7 : *
8 : * All rights reserved.
9 : *
10 : * This code is derived from software contributed to Berkeley by
11 : * The Mach Operating System project at Carnegie-Mellon University.
12 : *
13 : * Redistribution and use in source and binary forms, with or without
14 : * modification, are permitted provided that the following conditions
15 : * are met:
16 : * 1. Redistributions of source code must retain the above copyright
17 : * notice, this list of conditions and the following disclaimer.
18 : * 2. Redistributions in binary form must reproduce the above copyright
19 : * notice, this list of conditions and the following disclaimer in the
20 : * documentation and/or other materials provided with the distribution.
21 : * 3. Neither the name of the University nor the names of its contributors
22 : * may be used to endorse or promote products derived from this software
23 : * without specific prior written permission.
24 : *
25 : * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 : * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 : * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 : * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 : * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 : * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 : * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 : * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 : * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 : * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 : * SUCH DAMAGE.
36 : *
37 : * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
38 : * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
39 : *
40 : *
41 : * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 : * All rights reserved.
43 : *
44 : * Permission to use, copy, modify and distribute this software and
45 : * its documentation is hereby granted, provided that both the copyright
46 : * notice and this permission notice appear in all copies of the
47 : * software, derivative works or modified versions, and any portions
48 : * thereof, and that both notices appear in supporting documentation.
49 : *
50 : * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 : * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 : * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 : *
54 : * Carnegie Mellon requests users of this software to return to
55 : *
56 : * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 : * School of Computer Science
58 : * Carnegie Mellon University
59 : * Pittsburgh PA 15213-3890
60 : *
61 : * any improvements or extensions that they make and grant Carnegie the
62 : * rights to redistribute these changes.
63 : */
64 :
65 : /*
66 : * uvm_page.c: page ops.
67 : */
68 :
69 : #include <sys/param.h>
70 : #include <sys/systm.h>
71 : #include <sys/sched.h>
72 : #include <sys/vnode.h>
73 : #include <sys/mount.h>
74 : #include <sys/proc.h>
75 :
76 : #include <uvm/uvm.h>
77 :
78 : /*
79 : * for object trees
80 : */
81 0 : RBT_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
82 :
83 : int
84 0 : uvm_pagecmp(const struct vm_page *a, const struct vm_page *b)
85 : {
86 0 : return (a->offset < b->offset ? -1 : a->offset > b->offset);
87 : }
88 :
89 : /*
90 : * global vars... XXXCDC: move to uvm. structure.
91 : */
92 : /*
93 : * physical memory config is stored in vm_physmem.
94 : */
95 : struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */
96 : int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */
97 :
98 : /*
99 : * Some supported CPUs in a given architecture don't support all
100 : * of the things necessary to do idle page zero'ing efficiently.
101 : * We therefore provide a way to disable it from machdep code here.
102 : */
103 :
104 : /*
105 : * local variables
106 : */
107 : /*
108 : * these variables record the values returned by vm_page_bootstrap,
109 : * for debugging purposes. The implementation of uvm_pageboot_alloc
110 : * and pmap_startup here also uses them internally.
111 : */
112 : static vaddr_t virtual_space_start;
113 : static vaddr_t virtual_space_end;
114 :
115 : /*
116 : * local prototypes
117 : */
118 : static void uvm_pageinsert(struct vm_page *);
119 : static void uvm_pageremove(struct vm_page *);
120 :
121 : /*
122 : * inline functions
123 : */
124 : /*
125 : * uvm_pageinsert: insert a page in the object
126 : *
127 : * => caller must lock page queues XXX questionable
128 : * => call should have already set pg's object and offset pointers
129 : * and bumped the version counter
130 : */
131 : __inline static void
132 0 : uvm_pageinsert(struct vm_page *pg)
133 : {
134 : struct vm_page *dupe;
135 :
136 0 : KASSERT((pg->pg_flags & PG_TABLED) == 0);
137 0 : dupe = RBT_INSERT(uvm_objtree, &pg->uobject->memt, pg);
138 : /* not allowed to insert over another page */
139 0 : KASSERT(dupe == NULL);
140 0 : atomic_setbits_int(&pg->pg_flags, PG_TABLED);
141 0 : pg->uobject->uo_npages++;
142 0 : }
143 :
144 : /*
145 : * uvm_page_remove: remove page from object
146 : *
147 : * => caller must lock page queues
148 : */
149 : static __inline void
150 0 : uvm_pageremove(struct vm_page *pg)
151 : {
152 0 : KASSERT(pg->pg_flags & PG_TABLED);
153 0 : RBT_REMOVE(uvm_objtree, &pg->uobject->memt, pg);
154 :
155 0 : atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
156 0 : pg->uobject->uo_npages--;
157 0 : pg->uobject = NULL;
158 0 : pg->pg_version++;
159 0 : }
160 :
161 : /*
162 : * uvm_page_init: init the page system. called from uvm_init().
163 : *
164 : * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
165 : */
166 : void
167 0 : uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
168 : {
169 : vsize_t freepages, pagecount, n;
170 : vm_page_t pagearray, curpg;
171 : int lcv, i;
172 : paddr_t paddr, pgno;
173 : struct vm_physseg *seg;
174 :
175 : /*
176 : * init the page queues and page queue locks
177 : */
178 :
179 0 : TAILQ_INIT(&uvm.page_active);
180 0 : TAILQ_INIT(&uvm.page_inactive_swp);
181 0 : TAILQ_INIT(&uvm.page_inactive_obj);
182 0 : mtx_init(&uvm.pageqlock, IPL_NONE);
183 0 : mtx_init(&uvm.fpageqlock, IPL_VM);
184 0 : uvm_pmr_init();
185 :
186 : /*
187 : * allocate vm_page structures.
188 : */
189 :
190 : /*
191 : * sanity check:
192 : * before calling this function the MD code is expected to register
193 : * some free RAM with the uvm_page_physload() function. our job
194 : * now is to allocate vm_page structures for this memory.
195 : */
196 :
197 0 : if (vm_nphysseg == 0)
198 0 : panic("uvm_page_bootstrap: no memory pre-allocated");
199 :
200 : /*
201 : * first calculate the number of free pages...
202 : *
203 : * note that we use start/end rather than avail_start/avail_end.
204 : * this allows us to allocate extra vm_page structures in case we
205 : * want to return some memory to the pool after booting.
206 : */
207 :
208 : freepages = 0;
209 0 : for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
210 0 : freepages += (seg->end - seg->start);
211 :
212 : /*
213 : * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
214 : * use. for each page of memory we use we need a vm_page structure.
215 : * thus, the total number of pages we can use is the total size of
216 : * the memory divided by the PAGE_SIZE plus the size of the vm_page
217 : * structure. we add one to freepages as a fudge factor to avoid
218 : * truncation errors (since we can only allocate in terms of whole
219 : * pages).
220 : */
221 :
222 0 : pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) /
223 : (PAGE_SIZE + sizeof(struct vm_page));
224 0 : pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
225 : sizeof(struct vm_page));
226 0 : memset(pagearray, 0, pagecount * sizeof(struct vm_page));
227 :
228 : /* init the vm_page structures and put them in the correct place. */
229 0 : for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) {
230 0 : n = seg->end - seg->start;
231 0 : if (n > pagecount) {
232 0 : panic("uvm_page_init: lost %ld page(s) in init",
233 0 : (long)(n - pagecount));
234 : /* XXXCDC: shouldn't happen? */
235 : /* n = pagecount; */
236 : }
237 :
238 : /* set up page array pointers */
239 0 : seg->pgs = pagearray;
240 0 : pagearray += n;
241 0 : pagecount -= n;
242 0 : seg->lastpg = seg->pgs + (n - 1);
243 :
244 : /* init and free vm_pages (we've already zeroed them) */
245 0 : pgno = seg->start;
246 0 : paddr = ptoa(pgno);
247 0 : for (i = 0, curpg = seg->pgs; i < n;
248 0 : i++, curpg++, pgno++, paddr += PAGE_SIZE) {
249 0 : curpg->phys_addr = paddr;
250 0 : VM_MDPAGE_INIT(curpg);
251 0 : if (pgno >= seg->avail_start &&
252 0 : pgno < seg->avail_end) {
253 0 : uvmexp.npages++;
254 0 : }
255 : }
256 :
257 : /* Add pages to free pool. */
258 0 : uvm_pmr_freepages(&seg->pgs[seg->avail_start - seg->start],
259 0 : seg->avail_end - seg->avail_start);
260 : }
261 :
262 : /*
263 : * pass up the values of virtual_space_start and
264 : * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
265 : * layers of the VM.
266 : */
267 :
268 0 : *kvm_startp = round_page(virtual_space_start);
269 0 : *kvm_endp = trunc_page(virtual_space_end);
270 :
271 : /* init locks for kernel threads */
272 0 : mtx_init(&uvm.aiodoned_lock, IPL_BIO);
273 :
274 : /*
275 : * init reserve thresholds
276 : * XXXCDC - values may need adjusting
277 : */
278 0 : uvmexp.reserve_pagedaemon = 4;
279 0 : uvmexp.reserve_kernel = 6;
280 0 : uvmexp.anonminpct = 10;
281 0 : uvmexp.vnodeminpct = 10;
282 0 : uvmexp.vtextminpct = 5;
283 0 : uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
284 0 : uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
285 0 : uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
286 :
287 0 : uvm.page_init_done = TRUE;
288 0 : }
289 :
290 : /*
291 : * uvm_setpagesize: set the page size
292 : *
293 : * => sets page_shift and page_mask from uvmexp.pagesize.
294 : */
295 : void
296 0 : uvm_setpagesize(void)
297 : {
298 0 : if (uvmexp.pagesize == 0)
299 0 : uvmexp.pagesize = DEFAULT_PAGE_SIZE;
300 0 : uvmexp.pagemask = uvmexp.pagesize - 1;
301 0 : if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
302 0 : panic("uvm_setpagesize: page size not a power of two");
303 0 : for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
304 0 : if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
305 : break;
306 0 : }
307 :
308 : /*
309 : * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
310 : */
311 : vaddr_t
312 0 : uvm_pageboot_alloc(vsize_t size)
313 : {
314 : #if defined(PMAP_STEAL_MEMORY)
315 : vaddr_t addr;
316 :
317 : /*
318 : * defer bootstrap allocation to MD code (it may want to allocate
319 : * from a direct-mapped segment). pmap_steal_memory should round
320 : * off virtual_space_start/virtual_space_end.
321 : */
322 :
323 0 : addr = pmap_steal_memory(size, &virtual_space_start,
324 : &virtual_space_end);
325 :
326 0 : return(addr);
327 :
328 : #else /* !PMAP_STEAL_MEMORY */
329 :
330 : static boolean_t initialized = FALSE;
331 : vaddr_t addr, vaddr;
332 : paddr_t paddr;
333 :
334 : /* round to page size */
335 : size = round_page(size);
336 :
337 : /* on first call to this function, initialize ourselves. */
338 : if (initialized == FALSE) {
339 : pmap_virtual_space(&virtual_space_start, &virtual_space_end);
340 :
341 : /* round it the way we like it */
342 : virtual_space_start = round_page(virtual_space_start);
343 : virtual_space_end = trunc_page(virtual_space_end);
344 :
345 : initialized = TRUE;
346 : }
347 :
348 : /* allocate virtual memory for this request */
349 : if (virtual_space_start == virtual_space_end ||
350 : (virtual_space_end - virtual_space_start) < size)
351 : panic("uvm_pageboot_alloc: out of virtual space");
352 :
353 : addr = virtual_space_start;
354 :
355 : #ifdef PMAP_GROWKERNEL
356 : /*
357 : * If the kernel pmap can't map the requested space,
358 : * then allocate more resources for it.
359 : */
360 : if (uvm_maxkaddr < (addr + size)) {
361 : uvm_maxkaddr = pmap_growkernel(addr + size);
362 : if (uvm_maxkaddr < (addr + size))
363 : panic("uvm_pageboot_alloc: pmap_growkernel() failed");
364 : }
365 : #endif
366 :
367 : virtual_space_start += size;
368 :
369 : /* allocate and mapin physical pages to back new virtual pages */
370 : for (vaddr = round_page(addr) ; vaddr < addr + size ;
371 : vaddr += PAGE_SIZE) {
372 : if (!uvm_page_physget(&paddr))
373 : panic("uvm_pageboot_alloc: out of memory");
374 :
375 : /*
376 : * Note this memory is no longer managed, so using
377 : * pmap_kenter is safe.
378 : */
379 : pmap_kenter_pa(vaddr, paddr, PROT_READ | PROT_WRITE);
380 : }
381 : pmap_update(pmap_kernel());
382 : return(addr);
383 : #endif /* PMAP_STEAL_MEMORY */
384 : }
385 :
386 : #if !defined(PMAP_STEAL_MEMORY)
387 : /*
388 : * uvm_page_physget: "steal" one page from the vm_physmem structure.
389 : *
390 : * => attempt to allocate it off the end of a segment in which the "avail"
391 : * values match the start/end values. if we can't do that, then we
392 : * will advance both values (making them equal, and removing some
393 : * vm_page structures from the non-avail area).
394 : * => return false if out of memory.
395 : */
396 :
397 : boolean_t
398 : uvm_page_physget(paddr_t *paddrp)
399 : {
400 : int lcv;
401 : struct vm_physseg *seg;
402 :
403 : /* pass 1: try allocating from a matching end */
404 : #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
405 : (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
406 : for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0;
407 : lcv--, seg--)
408 : #else
409 : for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
410 : #endif
411 : {
412 : if (uvm.page_init_done == TRUE)
413 : panic("uvm_page_physget: called _after_ bootstrap");
414 :
415 : /* try from front */
416 : if (seg->avail_start == seg->start &&
417 : seg->avail_start < seg->avail_end) {
418 : *paddrp = ptoa(seg->avail_start);
419 : seg->avail_start++;
420 : seg->start++;
421 : /* nothing left? nuke it */
422 : if (seg->avail_start == seg->end) {
423 : if (vm_nphysseg == 1)
424 : panic("uvm_page_physget: out of memory!");
425 : vm_nphysseg--;
426 : for (; lcv < vm_nphysseg; lcv++, seg++)
427 : /* structure copy */
428 : seg[0] = seg[1];
429 : }
430 : return (TRUE);
431 : }
432 :
433 : /* try from rear */
434 : if (seg->avail_end == seg->end &&
435 : seg->avail_start < seg->avail_end) {
436 : *paddrp = ptoa(seg->avail_end - 1);
437 : seg->avail_end--;
438 : seg->end--;
439 : /* nothing left? nuke it */
440 : if (seg->avail_end == seg->start) {
441 : if (vm_nphysseg == 1)
442 : panic("uvm_page_physget: out of memory!");
443 : vm_nphysseg--;
444 : for (; lcv < vm_nphysseg ; lcv++, seg++)
445 : /* structure copy */
446 : seg[0] = seg[1];
447 : }
448 : return (TRUE);
449 : }
450 : }
451 :
452 : /* pass2: forget about matching ends, just allocate something */
453 : #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
454 : (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
455 : for (lcv = vm_nphysseg - 1, seg = vm_physmem + lcv; lcv >= 0;
456 : lcv--, seg--)
457 : #else
458 : for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
459 : #endif
460 : {
461 :
462 : /* any room in this bank? */
463 : if (seg->avail_start >= seg->avail_end)
464 : continue; /* nope */
465 :
466 : *paddrp = ptoa(seg->avail_start);
467 : seg->avail_start++;
468 : /* truncate! */
469 : seg->start = seg->avail_start;
470 :
471 : /* nothing left? nuke it */
472 : if (seg->avail_start == seg->end) {
473 : if (vm_nphysseg == 1)
474 : panic("uvm_page_physget: out of memory!");
475 : vm_nphysseg--;
476 : for (; lcv < vm_nphysseg ; lcv++, seg++)
477 : /* structure copy */
478 : seg[0] = seg[1];
479 : }
480 : return (TRUE);
481 : }
482 :
483 : return (FALSE); /* whoops! */
484 : }
485 :
486 : #endif /* PMAP_STEAL_MEMORY */
487 :
488 : /*
489 : * uvm_page_physload: load physical memory into VM system
490 : *
491 : * => all args are PFs
492 : * => all pages in start/end get vm_page structures
493 : * => areas marked by avail_start/avail_end get added to the free page pool
494 : * => we are limited to VM_PHYSSEG_MAX physical memory segments
495 : */
496 :
497 : void
498 0 : uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
499 : paddr_t avail_end, int flags)
500 : {
501 : int preload, lcv;
502 : psize_t npages;
503 : struct vm_page *pgs;
504 : struct vm_physseg *ps, *seg;
505 :
506 : #ifdef DIAGNOSTIC
507 0 : if (uvmexp.pagesize == 0)
508 0 : panic("uvm_page_physload: page size not set!");
509 :
510 0 : if (start >= end)
511 0 : panic("uvm_page_physload: start >= end");
512 : #endif
513 :
514 : /* do we have room? */
515 0 : if (vm_nphysseg == VM_PHYSSEG_MAX) {
516 0 : printf("uvm_page_physload: unable to load physical memory "
517 : "segment\n");
518 0 : printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
519 : VM_PHYSSEG_MAX, (long long)start, (long long)end);
520 0 : printf("\tincrease VM_PHYSSEG_MAX\n");
521 0 : return;
522 : }
523 :
524 : /*
525 : * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
526 : * called yet, so malloc is not available).
527 : */
528 0 : for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++) {
529 0 : if (seg->pgs)
530 : break;
531 : }
532 0 : preload = (lcv == vm_nphysseg);
533 :
534 : /* if VM is already running, attempt to malloc() vm_page structures */
535 0 : if (!preload) {
536 : /*
537 : * XXXCDC: need some sort of lockout for this case
538 : * right now it is only used by devices so it should be alright.
539 : */
540 : paddr_t paddr;
541 :
542 0 : npages = end - start; /* # of pages */
543 :
544 0 : pgs = (struct vm_page *)uvm_km_zalloc(kernel_map,
545 : npages * sizeof(*pgs));
546 0 : if (pgs == NULL) {
547 0 : printf("uvm_page_physload: can not malloc vm_page "
548 : "structs for segment\n");
549 0 : printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
550 0 : return;
551 : }
552 : /* init phys_addr and free pages, XXX uvmexp.npages */
553 0 : for (lcv = 0, paddr = ptoa(start); lcv < npages;
554 0 : lcv++, paddr += PAGE_SIZE) {
555 0 : pgs[lcv].phys_addr = paddr;
556 0 : VM_MDPAGE_INIT(&pgs[lcv]);
557 0 : if (atop(paddr) >= avail_start &&
558 0 : atop(paddr) < avail_end) {
559 0 : if (flags & PHYSLOAD_DEVICE) {
560 0 : atomic_setbits_int(&pgs[lcv].pg_flags,
561 : PG_DEV);
562 0 : pgs[lcv].wire_count = 1;
563 : } else {
564 : #if defined(VM_PHYSSEG_NOADD)
565 0 : panic("uvm_page_physload: tried to add RAM after vm_mem_init");
566 : #endif
567 : }
568 0 : }
569 : }
570 :
571 : /* Add pages to free pool. */
572 0 : if ((flags & PHYSLOAD_DEVICE) == 0) {
573 0 : uvm_pmr_freepages(&pgs[avail_start - start],
574 0 : avail_end - avail_start);
575 0 : }
576 :
577 : /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
578 0 : } else {
579 : /* gcc complains if these don't get init'd */
580 : pgs = NULL;
581 : npages = 0;
582 :
583 : }
584 :
585 : /* now insert us in the proper place in vm_physmem[] */
586 : #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
587 : /* random: put it at the end (easy!) */
588 : ps = &vm_physmem[vm_nphysseg];
589 : #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
590 : {
591 : int x;
592 : /* sort by address for binary search */
593 : for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++)
594 : if (start < seg->start)
595 : break;
596 : ps = seg;
597 : /* move back other entries, if necessary ... */
598 : for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv;
599 : x--, seg--)
600 : /* structure copy */
601 : seg[1] = seg[0];
602 : }
603 : #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
604 : {
605 : int x;
606 : /* sort by largest segment first */
607 0 : for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg; lcv++, seg++)
608 0 : if ((end - start) >
609 0 : (seg->end - seg->start))
610 : break;
611 0 : ps = &vm_physmem[lcv];
612 : /* move back other entries, if necessary ... */
613 0 : for (x = vm_nphysseg, seg = vm_physmem + x - 1; x > lcv;
614 0 : x--, seg--)
615 : /* structure copy */
616 0 : seg[1] = seg[0];
617 : }
618 : #else
619 : panic("uvm_page_physload: unknown physseg strategy selected!");
620 : #endif
621 :
622 0 : ps->start = start;
623 0 : ps->end = end;
624 0 : ps->avail_start = avail_start;
625 0 : ps->avail_end = avail_end;
626 0 : if (preload) {
627 0 : ps->pgs = NULL;
628 0 : } else {
629 0 : ps->pgs = pgs;
630 0 : ps->lastpg = pgs + npages - 1;
631 : }
632 0 : vm_nphysseg++;
633 :
634 0 : return;
635 0 : }
636 :
637 : #ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
638 :
639 : void uvm_page_physdump(void); /* SHUT UP GCC */
640 :
641 : /* call from DDB */
642 : void
643 0 : uvm_page_physdump(void)
644 : {
645 : int lcv;
646 : struct vm_physseg *seg;
647 :
648 0 : printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n",
649 0 : vm_nphysseg, VM_PHYSSEG_MAX);
650 0 : for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++)
651 0 : printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
652 0 : (long long)seg->start,
653 0 : (long long)seg->end,
654 0 : (long long)seg->avail_start,
655 0 : (long long)seg->avail_end);
656 0 : printf("STRATEGY = ");
657 : switch (VM_PHYSSEG_STRAT) {
658 : case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
659 : case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
660 0 : case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
661 : default: printf("<<UNKNOWN>>!!!!\n");
662 : }
663 0 : }
664 : #endif
665 :
666 : void
667 0 : uvm_shutdown(void)
668 : {
669 : #ifdef UVM_SWAP_ENCRYPT
670 0 : uvm_swap_finicrypt_all();
671 : #endif
672 0 : }
673 :
674 : /*
675 : * Perform insert of a given page in the specified anon of obj.
676 : * This is basically, uvm_pagealloc, but with the page already given.
677 : */
678 : void
679 0 : uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off,
680 : struct vm_anon *anon)
681 : {
682 : int flags;
683 :
684 : flags = PG_BUSY | PG_FAKE;
685 0 : pg->offset = off;
686 0 : pg->uobject = obj;
687 0 : pg->uanon = anon;
688 :
689 0 : if (anon) {
690 0 : anon->an_page = pg;
691 : flags |= PQ_ANON;
692 0 : } else if (obj)
693 0 : uvm_pageinsert(pg);
694 0 : atomic_setbits_int(&pg->pg_flags, flags);
695 : #if defined(UVM_PAGE_TRKOWN)
696 : pg->owner_tag = NULL;
697 : #endif
698 : UVM_PAGE_OWN(pg, "new alloc");
699 0 : }
700 :
701 : /*
702 : * uvm_pglistalloc: allocate a list of pages
703 : *
704 : * => allocated pages are placed at the tail of rlist. rlist is
705 : * assumed to be properly initialized by caller.
706 : * => returns 0 on success or errno on failure
707 : * => doesn't take into account clean non-busy pages on inactive list
708 : * that could be used(?)
709 : * => params:
710 : * size the size of the allocation, rounded to page size.
711 : * low the low address of the allowed allocation range.
712 : * high the high address of the allowed allocation range.
713 : * alignment memory must be aligned to this power-of-two boundary.
714 : * boundary no segment in the allocation may cross this
715 : * power-of-two boundary (relative to zero).
716 : * => flags:
717 : * UVM_PLA_NOWAIT fail if allocation fails
718 : * UVM_PLA_WAITOK wait for memory to become avail
719 : * UVM_PLA_ZERO return zeroed memory
720 : */
721 : int
722 0 : uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
723 : paddr_t boundary, struct pglist *rlist, int nsegs, int flags)
724 : {
725 0 : KASSERT((alignment & (alignment - 1)) == 0);
726 0 : KASSERT((boundary & (boundary - 1)) == 0);
727 0 : KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT));
728 :
729 0 : if (size == 0)
730 0 : return (EINVAL);
731 0 : size = atop(round_page(size));
732 :
733 : /*
734 : * check to see if we need to generate some free pages waking
735 : * the pagedaemon.
736 : */
737 0 : if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin ||
738 0 : ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg &&
739 0 : (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg))
740 0 : wakeup(&uvm.pagedaemon);
741 :
742 : /*
743 : * XXX uvm_pglistalloc is currently only used for kernel
744 : * objects. Unlike the checks in uvm_pagealloc, below, here
745 : * we are always allowed to use the kernel reserve. However, we
746 : * have to enforce the pagedaemon reserve here or allocations
747 : * via this path could consume everything and we can't
748 : * recover in the page daemon.
749 : */
750 : again:
751 0 : if ((uvmexp.free <= uvmexp.reserve_pagedaemon + size &&
752 0 : !((curproc == uvm.pagedaemon_proc) ||
753 0 : (curproc == syncerproc)))) {
754 0 : if (flags & UVM_PLA_WAITOK) {
755 0 : uvm_wait("uvm_pglistalloc");
756 0 : goto again;
757 : }
758 0 : return (ENOMEM);
759 : }
760 :
761 0 : if ((high & PAGE_MASK) != PAGE_MASK) {
762 0 : printf("uvm_pglistalloc: Upper boundary 0x%lx "
763 : "not on pagemask.\n", (unsigned long)high);
764 0 : }
765 :
766 : /*
767 : * Our allocations are always page granularity, so our alignment
768 : * must be, too.
769 : */
770 0 : if (alignment < PAGE_SIZE)
771 0 : alignment = PAGE_SIZE;
772 :
773 0 : low = atop(roundup(low, alignment));
774 : /*
775 : * high + 1 may result in overflow, in which case high becomes 0x0,
776 : * which is the 'don't care' value.
777 : * The only requirement in that case is that low is also 0x0, or the
778 : * low<high assert will fail.
779 : */
780 0 : high = atop(high + 1);
781 0 : alignment = atop(alignment);
782 0 : if (boundary < PAGE_SIZE && boundary != 0)
783 0 : boundary = PAGE_SIZE;
784 0 : boundary = atop(boundary);
785 :
786 0 : return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs,
787 : flags, rlist);
788 0 : }
789 :
790 : /*
791 : * uvm_pglistfree: free a list of pages
792 : *
793 : * => pages should already be unmapped
794 : */
795 : void
796 0 : uvm_pglistfree(struct pglist *list)
797 : {
798 0 : uvm_pmr_freepageq(list);
799 0 : }
800 :
801 : /*
802 : * interface used by the buffer cache to allocate a buffer at a time.
803 : * The pages are allocated wired in DMA accessible memory
804 : */
805 : int
806 0 : uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
807 : int flags)
808 : {
809 0 : struct pglist plist;
810 : struct vm_page *pg;
811 : int i, r;
812 :
813 :
814 0 : TAILQ_INIT(&plist);
815 0 : r = uvm_pglistalloc(size, dma_constraint.ucr_low,
816 0 : dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)),
817 : flags);
818 0 : if (r == 0) {
819 : i = 0;
820 0 : while ((pg = TAILQ_FIRST(&plist)) != NULL) {
821 0 : pg->wire_count = 1;
822 0 : atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
823 0 : KASSERT((pg->pg_flags & PG_DEV) == 0);
824 0 : TAILQ_REMOVE(&plist, pg, pageq);
825 0 : uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL);
826 : }
827 : }
828 0 : return r;
829 0 : }
830 :
831 : /*
832 : * interface used by the buffer cache to reallocate a buffer at a time.
833 : * The pages are reallocated wired outside the DMA accessible region.
834 : *
835 : */
836 : int
837 0 : uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
838 : int flags, struct uvm_constraint_range *where)
839 : {
840 0 : struct pglist plist;
841 : struct vm_page *pg, *tpg;
842 : int i, r;
843 : voff_t offset;
844 :
845 :
846 0 : TAILQ_INIT(&plist);
847 0 : if (size == 0)
848 0 : panic("size 0 uvm_pagerealloc");
849 0 : r = uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0,
850 0 : 0, &plist, atop(round_page(size)), flags);
851 0 : if (r == 0) {
852 : i = 0;
853 0 : while((pg = TAILQ_FIRST(&plist)) != NULL) {
854 0 : offset = off + ptoa(i++);
855 0 : tpg = uvm_pagelookup(obj, offset);
856 0 : KASSERT(tpg != NULL);
857 0 : pg->wire_count = 1;
858 0 : atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
859 0 : KASSERT((pg->pg_flags & PG_DEV) == 0);
860 0 : TAILQ_REMOVE(&plist, pg, pageq);
861 0 : uvm_pagecopy(tpg, pg);
862 0 : KASSERT(tpg->wire_count == 1);
863 0 : tpg->wire_count = 0;
864 0 : uvm_pagefree(tpg);
865 0 : uvm_pagealloc_pg(pg, obj, offset, NULL);
866 : }
867 : }
868 0 : return r;
869 0 : }
870 :
871 : /*
872 : * uvm_pagealloc_strat: allocate vm_page from a particular free list.
873 : *
874 : * => return null if no pages free
875 : * => wake up pagedaemon if number of free pages drops below low water mark
876 : * => only one of obj or anon can be non-null
877 : * => caller must activate/deactivate page if it is not wired.
878 : */
879 :
880 : struct vm_page *
881 0 : uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
882 : int flags)
883 : {
884 : struct vm_page *pg;
885 0 : struct pglist pgl;
886 : int pmr_flags;
887 : boolean_t use_reserve;
888 :
889 0 : KASSERT(obj == NULL || anon == NULL);
890 0 : KASSERT(off == trunc_page(off));
891 :
892 : /*
893 : * check to see if we need to generate some free pages waking
894 : * the pagedaemon.
895 : */
896 0 : if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin ||
897 0 : ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg &&
898 0 : (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg))
899 0 : wakeup(&uvm.pagedaemon);
900 :
901 : /*
902 : * fail if any of these conditions is true:
903 : * [1] there really are no free pages, or
904 : * [2] only kernel "reserved" pages remain and
905 : * the page isn't being allocated to a kernel object.
906 : * [3] only pagedaemon "reserved" pages remain and
907 : * the requestor isn't the pagedaemon.
908 : */
909 0 : use_reserve = (flags & UVM_PGA_USERESERVE) ||
910 0 : (obj && UVM_OBJ_IS_KERN_OBJECT(obj));
911 0 : if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
912 0 : (uvmexp.free <= uvmexp.reserve_pagedaemon &&
913 0 : !((curproc == uvm.pagedaemon_proc) ||
914 0 : (curproc == syncerproc))))
915 : goto fail;
916 :
917 : pmr_flags = UVM_PLA_NOWAIT;
918 0 : if (flags & UVM_PGA_ZERO)
919 0 : pmr_flags |= UVM_PLA_ZERO;
920 0 : TAILQ_INIT(&pgl);
921 0 : if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0)
922 : goto fail;
923 :
924 0 : pg = TAILQ_FIRST(&pgl);
925 0 : KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL);
926 :
927 0 : uvm_pagealloc_pg(pg, obj, off, anon);
928 0 : KASSERT((pg->pg_flags & PG_DEV) == 0);
929 0 : if (flags & UVM_PGA_ZERO)
930 0 : atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
931 : else
932 0 : atomic_setbits_int(&pg->pg_flags, PG_CLEAN);
933 :
934 0 : return(pg);
935 :
936 : fail:
937 0 : return (NULL);
938 0 : }
939 :
940 : /*
941 : * uvm_pagerealloc: reallocate a page from one object to another
942 : */
943 :
944 : void
945 0 : uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
946 : {
947 :
948 : /* remove it from the old object */
949 0 : if (pg->uobject) {
950 0 : uvm_pageremove(pg);
951 0 : }
952 :
953 : /* put it in the new object */
954 0 : if (newobj) {
955 0 : pg->uobject = newobj;
956 0 : pg->offset = newoff;
957 0 : pg->pg_version++;
958 0 : uvm_pageinsert(pg);
959 0 : }
960 0 : }
961 :
962 :
963 : /*
964 : * uvm_pagefree: free page
965 : *
966 : * => erase page's identity (i.e. remove from object)
967 : * => put page on free list
968 : * => caller must lock page queues
969 : * => assumes all valid mappings of pg are gone
970 : */
971 : void
972 0 : uvm_pagefree(struct vm_page *pg)
973 : {
974 : u_int flags_to_clear = 0;
975 :
976 : #ifdef DEBUG
977 : if (pg->uobject == (void *)0xdeadbeef &&
978 : pg->uanon == (void *)0xdeadbeef) {
979 : panic("uvm_pagefree: freeing free page %p", pg);
980 : }
981 : #endif
982 :
983 0 : KASSERT((pg->pg_flags & PG_DEV) == 0);
984 :
985 : /*
986 : * if the page was an object page (and thus "TABLED"), remove it
987 : * from the object.
988 : */
989 0 : if (pg->pg_flags & PG_TABLED)
990 0 : uvm_pageremove(pg);
991 :
992 : /* now remove the page from the queues */
993 0 : if (pg->pg_flags & PQ_ACTIVE) {
994 0 : TAILQ_REMOVE(&uvm.page_active, pg, pageq);
995 : flags_to_clear |= PQ_ACTIVE;
996 0 : uvmexp.active--;
997 0 : }
998 0 : if (pg->pg_flags & PQ_INACTIVE) {
999 0 : if (pg->pg_flags & PQ_SWAPBACKED)
1000 0 : TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1001 : else
1002 0 : TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1003 0 : flags_to_clear |= PQ_INACTIVE;
1004 0 : uvmexp.inactive--;
1005 0 : }
1006 :
1007 : /* if the page was wired, unwire it now. */
1008 0 : if (pg->wire_count) {
1009 0 : pg->wire_count = 0;
1010 0 : uvmexp.wired--;
1011 0 : }
1012 0 : if (pg->uanon) {
1013 0 : pg->uanon->an_page = NULL;
1014 0 : pg->uanon = NULL;
1015 0 : }
1016 :
1017 : /* Clean page state bits. */
1018 0 : flags_to_clear |= PQ_ANON|PQ_AOBJ|PQ_ENCRYPT|PG_ZERO|PG_FAKE|PG_BUSY|
1019 : PG_RELEASED|PG_CLEAN|PG_CLEANCHK;
1020 0 : atomic_clearbits_int(&pg->pg_flags, flags_to_clear);
1021 :
1022 : /* and put on free queue */
1023 : #ifdef DEBUG
1024 : pg->uobject = (void *)0xdeadbeef;
1025 : pg->offset = 0xdeadbeef;
1026 : pg->uanon = (void *)0xdeadbeef;
1027 : #endif
1028 :
1029 0 : uvm_pmr_freepages(pg, 1);
1030 0 : }
1031 :
1032 : /*
1033 : * uvm_page_unbusy: unbusy an array of pages.
1034 : *
1035 : * => pages must either all belong to the same object, or all belong to anons.
1036 : * => if pages are anon-owned, anons must have 0 refcount.
1037 : */
1038 : void
1039 0 : uvm_page_unbusy(struct vm_page **pgs, int npgs)
1040 : {
1041 : struct vm_page *pg;
1042 : struct uvm_object *uobj;
1043 : int i;
1044 :
1045 0 : for (i = 0; i < npgs; i++) {
1046 0 : pg = pgs[i];
1047 :
1048 0 : if (pg == NULL || pg == PGO_DONTCARE) {
1049 : continue;
1050 : }
1051 0 : if (pg->pg_flags & PG_WANTED) {
1052 0 : wakeup(pg);
1053 0 : }
1054 0 : if (pg->pg_flags & PG_RELEASED) {
1055 0 : uobj = pg->uobject;
1056 0 : if (uobj != NULL) {
1057 0 : uvm_lock_pageq();
1058 0 : pmap_page_protect(pg, PROT_NONE);
1059 : /* XXX won't happen right now */
1060 0 : if (pg->pg_flags & PQ_AOBJ)
1061 0 : uao_dropswap(uobj,
1062 0 : pg->offset >> PAGE_SHIFT);
1063 0 : uvm_pagefree(pg);
1064 0 : uvm_unlock_pageq();
1065 0 : } else {
1066 0 : atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
1067 : UVM_PAGE_OWN(pg, NULL);
1068 0 : uvm_anfree(pg->uanon);
1069 : }
1070 : } else {
1071 0 : atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
1072 : UVM_PAGE_OWN(pg, NULL);
1073 : }
1074 : }
1075 0 : }
1076 :
1077 : #if defined(UVM_PAGE_TRKOWN)
1078 : /*
1079 : * uvm_page_own: set or release page ownership
1080 : *
1081 : * => this is a debugging function that keeps track of who sets PG_BUSY
1082 : * and where they do it. it can be used to track down problems
1083 : * such a thread setting "PG_BUSY" and never releasing it.
1084 : * => if "tag" is NULL then we are releasing page ownership
1085 : */
1086 : void
1087 : uvm_page_own(struct vm_page *pg, char *tag)
1088 : {
1089 : /* gain ownership? */
1090 : if (tag) {
1091 : if (pg->owner_tag) {
1092 : printf("uvm_page_own: page %p already owned "
1093 : "by thread %d [%s]\n", pg,
1094 : pg->owner, pg->owner_tag);
1095 : panic("uvm_page_own");
1096 : }
1097 : pg->owner = (curproc) ? curproc->p_tid : (pid_t) -1;
1098 : pg->owner_tag = tag;
1099 : return;
1100 : }
1101 :
1102 : /* drop ownership */
1103 : if (pg->owner_tag == NULL) {
1104 : printf("uvm_page_own: dropping ownership of an non-owned "
1105 : "page (%p)\n", pg);
1106 : panic("uvm_page_own");
1107 : }
1108 : pg->owner_tag = NULL;
1109 : return;
1110 : }
1111 : #endif
1112 :
1113 : /*
1114 : * when VM_PHYSSEG_MAX is 1, we can simplify these functions
1115 : */
1116 :
1117 : #if VM_PHYSSEG_MAX > 1
1118 : /*
1119 : * vm_physseg_find: find vm_physseg structure that belongs to a PA
1120 : */
1121 : int
1122 0 : vm_physseg_find(paddr_t pframe, int *offp)
1123 : {
1124 : struct vm_physseg *seg;
1125 :
1126 : #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
1127 : /* binary search for it */
1128 : int start, len, try;
1129 :
1130 : /*
1131 : * if try is too large (thus target is less than than try) we reduce
1132 : * the length to trunc(len/2) [i.e. everything smaller than "try"]
1133 : *
1134 : * if the try is too small (thus target is greater than try) then
1135 : * we set the new start to be (try + 1). this means we need to
1136 : * reduce the length to (round(len/2) - 1).
1137 : *
1138 : * note "adjust" below which takes advantage of the fact that
1139 : * (round(len/2) - 1) == trunc((len - 1) / 2)
1140 : * for any value of len we may have
1141 : */
1142 :
1143 : for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
1144 : try = start + (len / 2); /* try in the middle */
1145 : seg = vm_physmem + try;
1146 :
1147 : /* start past our try? */
1148 : if (pframe >= seg->start) {
1149 : /* was try correct? */
1150 : if (pframe < seg->end) {
1151 : if (offp)
1152 : *offp = pframe - seg->start;
1153 : return(try); /* got it */
1154 : }
1155 : start = try + 1; /* next time, start here */
1156 : len--; /* "adjust" */
1157 : } else {
1158 : /*
1159 : * pframe before try, just reduce length of
1160 : * region, done in "for" loop
1161 : */
1162 : }
1163 : }
1164 : return(-1);
1165 :
1166 : #else
1167 : /* linear search for it */
1168 : int lcv;
1169 :
1170 180 : for (lcv = 0, seg = vm_physmem; lcv < vm_nphysseg ; lcv++, seg++) {
1171 0 : if (pframe >= seg->start && pframe < seg->end) {
1172 0 : if (offp)
1173 180 : *offp = pframe - seg->start;
1174 0 : return(lcv); /* got it */
1175 : }
1176 : }
1177 0 : return(-1);
1178 :
1179 : #endif
1180 0 : }
1181 :
1182 : /*
1183 : * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
1184 : * back from an I/O mapping (ugh!). used in some MD code as well.
1185 : */
1186 : struct vm_page *
1187 0 : PHYS_TO_VM_PAGE(paddr_t pa)
1188 : {
1189 0 : paddr_t pf = atop(pa);
1190 0 : int off;
1191 : int psi;
1192 :
1193 0 : psi = vm_physseg_find(pf, &off);
1194 :
1195 0 : return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]);
1196 0 : }
1197 : #endif /* VM_PHYSSEG_MAX > 1 */
1198 :
1199 : /*
1200 : * uvm_pagelookup: look up a page
1201 : */
1202 : struct vm_page *
1203 0 : uvm_pagelookup(struct uvm_object *obj, voff_t off)
1204 : {
1205 : /* XXX if stack is too much, handroll */
1206 0 : struct vm_page pg;
1207 :
1208 0 : pg.offset = off;
1209 0 : return (RBT_FIND(uvm_objtree, &obj->memt, &pg));
1210 0 : }
1211 :
1212 : /*
1213 : * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
1214 : *
1215 : * => caller must lock page queues
1216 : */
1217 : void
1218 0 : uvm_pagewire(struct vm_page *pg)
1219 : {
1220 0 : if (pg->wire_count == 0) {
1221 0 : if (pg->pg_flags & PQ_ACTIVE) {
1222 0 : TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1223 0 : atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1224 0 : uvmexp.active--;
1225 0 : }
1226 0 : if (pg->pg_flags & PQ_INACTIVE) {
1227 0 : if (pg->pg_flags & PQ_SWAPBACKED)
1228 0 : TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1229 : else
1230 0 : TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1231 0 : atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1232 0 : uvmexp.inactive--;
1233 0 : }
1234 0 : uvmexp.wired++;
1235 0 : }
1236 0 : pg->wire_count++;
1237 0 : }
1238 :
1239 : /*
1240 : * uvm_pageunwire: unwire the page.
1241 : *
1242 : * => activate if wire count goes to zero.
1243 : * => caller must lock page queues
1244 : */
1245 : void
1246 0 : uvm_pageunwire(struct vm_page *pg)
1247 : {
1248 0 : pg->wire_count--;
1249 0 : if (pg->wire_count == 0) {
1250 0 : TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1251 0 : uvmexp.active++;
1252 0 : atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1253 0 : uvmexp.wired--;
1254 0 : }
1255 0 : }
1256 :
1257 : /*
1258 : * uvm_pagedeactivate: deactivate page -- no pmaps have access to page
1259 : *
1260 : * => caller must lock page queues
1261 : * => caller must check to make sure page is not wired
1262 : * => object that page belongs to must be locked (so we can adjust pg->flags)
1263 : */
1264 : void
1265 0 : uvm_pagedeactivate(struct vm_page *pg)
1266 : {
1267 0 : if (pg->pg_flags & PQ_ACTIVE) {
1268 0 : TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1269 0 : atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1270 0 : uvmexp.active--;
1271 0 : }
1272 0 : if ((pg->pg_flags & PQ_INACTIVE) == 0) {
1273 0 : KASSERT(pg->wire_count == 0);
1274 0 : if (pg->pg_flags & PQ_SWAPBACKED)
1275 0 : TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
1276 : else
1277 0 : TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
1278 0 : atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE);
1279 0 : uvmexp.inactive++;
1280 0 : pmap_clear_reference(pg);
1281 : /*
1282 : * update the "clean" bit. this isn't 100%
1283 : * accurate, and doesn't have to be. we'll
1284 : * re-sync it after we zap all mappings when
1285 : * scanning the inactive list.
1286 : */
1287 0 : if ((pg->pg_flags & PG_CLEAN) != 0 &&
1288 0 : pmap_is_modified(pg))
1289 0 : atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1290 : }
1291 0 : }
1292 :
1293 : /*
1294 : * uvm_pageactivate: activate page
1295 : *
1296 : * => caller must lock page queues
1297 : */
1298 : void
1299 0 : uvm_pageactivate(struct vm_page *pg)
1300 : {
1301 60 : if (pg->pg_flags & PQ_INACTIVE) {
1302 0 : if (pg->pg_flags & PQ_SWAPBACKED)
1303 0 : TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1304 : else
1305 0 : TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1306 0 : atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1307 0 : uvmexp.inactive--;
1308 0 : }
1309 60 : if (pg->wire_count == 0) {
1310 : /*
1311 : * if page is already active, remove it from list so we
1312 : * can put it at tail. if it wasn't active, then mark
1313 : * it active and bump active count
1314 : */
1315 0 : if (pg->pg_flags & PQ_ACTIVE)
1316 60 : TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1317 : else {
1318 0 : atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1319 0 : uvmexp.active++;
1320 : }
1321 :
1322 0 : TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1323 0 : }
1324 0 : }
1325 :
1326 : /*
1327 : * uvm_pagezero: zero fill a page
1328 : */
1329 : void
1330 0 : uvm_pagezero(struct vm_page *pg)
1331 : {
1332 0 : atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1333 0 : pmap_zero_page(pg);
1334 0 : }
1335 :
1336 : /*
1337 : * uvm_pagecopy: copy a page
1338 : */
1339 : void
1340 0 : uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
1341 : {
1342 0 : atomic_clearbits_int(&dst->pg_flags, PG_CLEAN);
1343 0 : pmap_copy_page(src, dst);
1344 0 : }
1345 :
1346 : /*
1347 : * uvm_pagecount: count the number of physical pages in the address range.
1348 : */
1349 : psize_t
1350 0 : uvm_pagecount(struct uvm_constraint_range* constraint)
1351 : {
1352 : int lcv;
1353 : psize_t sz;
1354 : paddr_t low, high;
1355 : paddr_t ps_low, ps_high;
1356 :
1357 : /* Algorithm uses page numbers. */
1358 0 : low = atop(constraint->ucr_low);
1359 0 : high = atop(constraint->ucr_high);
1360 :
1361 : sz = 0;
1362 0 : for (lcv = 0; lcv < vm_nphysseg; lcv++) {
1363 0 : ps_low = MAX(low, vm_physmem[lcv].avail_start);
1364 0 : ps_high = MIN(high, vm_physmem[lcv].avail_end);
1365 0 : if (ps_low < ps_high)
1366 0 : sz += ps_high - ps_low;
1367 : }
1368 0 : return sz;
1369 : }
|