LCOV - code coverage report
Current view: top level - uvm - uvm_pager.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 234 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 11 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*      $OpenBSD: uvm_pager.c,v 1.71 2014/12/17 19:42:15 tedu Exp $     */
       2             : /*      $NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $       */
       3             : 
       4             : /*
       5             :  * Copyright (c) 1997 Charles D. Cranor and Washington University.
       6             :  * All rights reserved.
       7             :  *
       8             :  * Redistribution and use in source and binary forms, with or without
       9             :  * modification, are permitted provided that the following conditions
      10             :  * are met:
      11             :  * 1. Redistributions of source code must retain the above copyright
      12             :  *    notice, this list of conditions and the following disclaimer.
      13             :  * 2. Redistributions in binary form must reproduce the above copyright
      14             :  *    notice, this list of conditions and the following disclaimer in the
      15             :  *    documentation and/or other materials provided with the distribution.
      16             :  *
      17             :  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
      18             :  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
      19             :  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
      20             :  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
      21             :  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
      22             :  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      23             :  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      24             :  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      25             :  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
      26             :  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      27             :  *
      28             :  * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp
      29             :  */
      30             : 
      31             : /*
      32             :  * uvm_pager.c: generic functions used to assist the pagers.
      33             :  */
      34             : 
      35             : #include <sys/param.h>
      36             : #include <sys/systm.h>
      37             : #include <sys/malloc.h>
      38             : #include <sys/pool.h>
      39             : #include <sys/buf.h>
      40             : #include <sys/atomic.h>
      41             : 
      42             : #include <uvm/uvm.h>
      43             : 
      44             : struct pool *uvm_aiobuf_pool;
      45             : 
      46             : struct uvm_pagerops *uvmpagerops[] = {
      47             :         &aobj_pager,
      48             :         &uvm_deviceops,
      49             :         &uvm_vnodeops,
      50             : };
      51             : 
      52             : /*
      53             :  * the pager map: provides KVA for I/O
      54             :  *
      55             :  * Each uvm_pseg has room for MAX_PAGERMAP_SEGS pager io space of
      56             :  * MAXBSIZE bytes.
      57             :  *
      58             :  * The number of uvm_pseg instances is dynamic using an array segs.
      59             :  * At most UVM_PSEG_COUNT instances can exist.
      60             :  *
      61             :  * psegs[0] always exists (so that the pager can always map in pages).
      62             :  * psegs[0] element 0 is always reserved for the pagedaemon.
      63             :  *
      64             :  * Any other pseg is automatically created when no space is available
      65             :  * and automatically destroyed when it is no longer in use.
      66             :  */
      67             : #define MAX_PAGER_SEGS  16
      68             : #define PSEG_NUMSEGS    (PAGER_MAP_SIZE / MAX_PAGER_SEGS / MAXBSIZE)
      69             : struct uvm_pseg {
      70             :         /* Start of virtual space; 0 if not inited. */
      71             :         vaddr_t start;
      72             :         /* Bitmap of the segments in use in this pseg. */
      73             :         int     use;
      74             : };
      75             : struct  mutex uvm_pseg_lck;
      76             : struct  uvm_pseg psegs[PSEG_NUMSEGS];
      77             : 
      78             : #define UVM_PSEG_FULL(pseg)     ((pseg)->use == (1 << MAX_PAGER_SEGS) - 1)
      79             : #define UVM_PSEG_EMPTY(pseg)    ((pseg)->use == 0)
      80             : #define UVM_PSEG_INUSE(pseg,id) (((pseg)->use & (1 << (id))) != 0)
      81             : 
      82             : void            uvm_pseg_init(struct uvm_pseg *);
      83             : vaddr_t         uvm_pseg_get(int);
      84             : void            uvm_pseg_release(vaddr_t);
      85             : 
      86             : /*
      87             :  * uvm_pager_init: init pagers (at boot time)
      88             :  */
      89             : void
      90           0 : uvm_pager_init(void)
      91             : {
      92             :         int lcv;
      93             : 
      94             :         /* init pager map */
      95           0 :         uvm_pseg_init(&psegs[0]);
      96           0 :         mtx_init(&uvm_pseg_lck, IPL_VM);
      97             : 
      98             :         /* init ASYNC I/O queue */
      99           0 :         TAILQ_INIT(&uvm.aio_done);
     100             : 
     101             :         /* call pager init functions */
     102           0 :         for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
     103           0 :             lcv++) {
     104           0 :                 if (uvmpagerops[lcv]->pgo_init)
     105           0 :                         uvmpagerops[lcv]->pgo_init();
     106             :         }
     107           0 : }
     108             : 
     109             : /*
     110             :  * Initialize a uvm_pseg.
     111             :  *
     112             :  * May fail, in which case seg->start == 0.
     113             :  *
     114             :  * Caller locks uvm_pseg_lck.
     115             :  */
     116             : void
     117           0 : uvm_pseg_init(struct uvm_pseg *pseg)
     118             : {
     119           0 :         KASSERT(pseg->start == 0);
     120           0 :         KASSERT(pseg->use == 0);
     121           0 :         pseg->start = uvm_km_valloc_try(kernel_map, MAX_PAGER_SEGS * MAXBSIZE);
     122           0 : }
     123             : 
     124             : /*
     125             :  * Acquire a pager map segment.
     126             :  *
     127             :  * Returns a vaddr for paging. 0 on failure.
     128             :  *
     129             :  * Caller does not lock.
     130             :  */
     131             : vaddr_t
     132           0 : uvm_pseg_get(int flags)
     133             : {
     134             :         int i;
     135             :         struct uvm_pseg *pseg;
     136             : 
     137           0 :         mtx_enter(&uvm_pseg_lck);
     138             : 
     139             : pager_seg_restart:
     140             :         /* Find first pseg that has room. */
     141           0 :         for (pseg = &psegs[0]; pseg != &psegs[PSEG_NUMSEGS]; pseg++) {
     142           0 :                 if (UVM_PSEG_FULL(pseg))
     143             :                         continue;
     144             : 
     145           0 :                 if (pseg->start == 0) {
     146             :                         /* Need initialization. */
     147           0 :                         uvm_pseg_init(pseg);
     148           0 :                         if (pseg->start == 0)
     149             :                                 goto pager_seg_fail;
     150             :                 }
     151             : 
     152             :                 /* Keep index 0 reserved for pagedaemon. */
     153           0 :                 if (pseg == &psegs[0] && curproc != uvm.pagedaemon_proc)
     154           0 :                         i = 1;
     155             :                 else
     156             :                         i = 0;
     157             : 
     158           0 :                 for (; i < MAX_PAGER_SEGS; i++) {
     159           0 :                         if (!UVM_PSEG_INUSE(pseg, i)) {
     160           0 :                                 pseg->use |= 1 << i;
     161           0 :                                 mtx_leave(&uvm_pseg_lck);
     162           0 :                                 return pseg->start + i * MAXBSIZE;
     163             :                         }
     164             :                 }
     165             :         }
     166             : 
     167             : pager_seg_fail:
     168           0 :         if ((flags & UVMPAGER_MAPIN_WAITOK) != 0) {
     169           0 :                 msleep(&psegs, &uvm_pseg_lck, PVM, "pagerseg", 0);
     170           0 :                 goto pager_seg_restart;
     171             :         }
     172             : 
     173           0 :         mtx_leave(&uvm_pseg_lck);
     174           0 :         return 0;
     175           0 : }
     176             : 
     177             : /*
     178             :  * Release a pager map segment.
     179             :  *
     180             :  * Caller does not lock.
     181             :  *
     182             :  * Deallocates pseg if it is no longer in use.
     183             :  */
     184             : void
     185           0 : uvm_pseg_release(vaddr_t segaddr)
     186             : {
     187             :         int id;
     188             :         struct uvm_pseg *pseg;
     189             :         vaddr_t va = 0;
     190             : 
     191           0 :         for (pseg = &psegs[0]; pseg != &psegs[PSEG_NUMSEGS]; pseg++) {
     192           0 :                 if (pseg->start <= segaddr &&
     193           0 :                     segaddr < pseg->start + MAX_PAGER_SEGS * MAXBSIZE)
     194             :                         break;
     195             :         }
     196           0 :         KASSERT(pseg != &psegs[PSEG_NUMSEGS]);
     197             : 
     198           0 :         id = (segaddr - pseg->start) / MAXBSIZE;
     199           0 :         KASSERT(id >= 0 && id < MAX_PAGER_SEGS);
     200             : 
     201             :         /* test for no remainder */
     202             :         KDASSERT(segaddr == pseg->start + id * MAXBSIZE);
     203             : 
     204           0 :         mtx_enter(&uvm_pseg_lck);
     205             : 
     206           0 :         KASSERT(UVM_PSEG_INUSE(pseg, id));
     207             : 
     208           0 :         pseg->use &= ~(1 << id);
     209           0 :         wakeup(&psegs);
     210             : 
     211           0 :         if (pseg != &psegs[0] && UVM_PSEG_EMPTY(pseg)) {
     212           0 :                 va = pseg->start;
     213           0 :                 pseg->start = 0;
     214           0 :         }
     215             : 
     216           0 :         mtx_leave(&uvm_pseg_lck);
     217             : 
     218           0 :         if (va)
     219           0 :                 uvm_km_free(kernel_map, va, MAX_PAGER_SEGS * MAXBSIZE);
     220           0 : }
     221             : 
     222             : /*
     223             :  * uvm_pagermapin: map pages into KVA for I/O that needs mappings
     224             :  *
     225             :  * We basically just km_valloc a blank map entry to reserve the space in the
     226             :  * kernel map and then use pmap_enter() to put the mappings in by hand.
     227             :  */
     228             : vaddr_t
     229           0 : uvm_pagermapin(struct vm_page **pps, int npages, int flags)
     230             : {
     231             :         vaddr_t kva, cva;
     232             :         vm_prot_t prot;
     233             :         vsize_t size;
     234             :         struct vm_page *pp;
     235             : 
     236             :         prot = PROT_READ;
     237           0 :         if (flags & UVMPAGER_MAPIN_READ)
     238           0 :                 prot |= PROT_WRITE;
     239           0 :         size = ptoa(npages);
     240             : 
     241           0 :         KASSERT(size <= MAXBSIZE);
     242             : 
     243           0 :         kva = uvm_pseg_get(flags);
     244           0 :         if (kva == 0)
     245           0 :                 return 0;
     246             : 
     247           0 :         for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
     248           0 :                 pp = *pps++;
     249           0 :                 KASSERT(pp);
     250           0 :                 KASSERT(pp->pg_flags & PG_BUSY);
     251             :                 /* Allow pmap_enter to fail. */
     252           0 :                 if (pmap_enter(pmap_kernel(), cva, VM_PAGE_TO_PHYS(pp),
     253           0 :                     prot, PMAP_WIRED | PMAP_CANFAIL | prot) != 0) {
     254           0 :                         pmap_remove(pmap_kernel(), kva, cva);
     255             :                         pmap_update(pmap_kernel());
     256           0 :                         uvm_pseg_release(kva);
     257           0 :                         return 0;
     258             :                 }
     259             :         }
     260             :         pmap_update(pmap_kernel());
     261           0 :         return kva;
     262           0 : }
     263             : 
     264             : /*
     265             :  * uvm_pagermapout: remove KVA mapping
     266             :  *
     267             :  * We remove our mappings by hand and then remove the mapping.
     268             :  */
     269             : void
     270           0 : uvm_pagermapout(vaddr_t kva, int npages)
     271             : {
     272             : 
     273           0 :         pmap_remove(pmap_kernel(), kva, kva + ((vsize_t)npages << PAGE_SHIFT));
     274             :         pmap_update(pmap_kernel());
     275           0 :         uvm_pseg_release(kva);
     276             : 
     277           0 : }
     278             : 
     279             : /*
     280             :  * uvm_mk_pcluster
     281             :  *
     282             :  * generic "make 'pager put' cluster" function.  a pager can either
     283             :  * [1] set pgo_mk_pcluster to NULL (never cluster), [2] set it to this
     284             :  * generic function, or [3] set it to a pager specific function.
     285             :  *
     286             :  * => caller must lock object _and_ pagequeues (since we need to look
     287             :  *    at active vs. inactive bits, etc.)
     288             :  * => caller must make center page busy and write-protect it
     289             :  * => we mark all cluster pages busy for the caller
     290             :  * => the caller must unbusy all pages (and check wanted/released
     291             :  *    status if it drops the object lock)
     292             :  * => flags:
     293             :  *      PGO_ALLPAGES:  all pages in object are valid targets
     294             :  *      !PGO_ALLPAGES: use "lo" and "hi" to limit range of cluster
     295             :  *      PGO_DOACTCLUST: include active pages in cluster.
     296             :  *      PGO_FREE: set the PG_RELEASED bits on the cluster so they'll be freed
     297             :  *              in async io (caller must clean on error).
     298             :  *        NOTE: the caller should clear PG_CLEANCHK bits if PGO_DOACTCLUST.
     299             :  *              PG_CLEANCHK is only a hint, but clearing will help reduce
     300             :  *              the number of calls we make to the pmap layer.
     301             :  */
     302             : 
     303             : struct vm_page **
     304           0 : uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
     305             :     struct vm_page *center, int flags, voff_t mlo, voff_t mhi)
     306             : {
     307             :         struct vm_page **ppsp, *pclust;
     308           0 :         voff_t lo, hi, curoff;
     309             :         int center_idx, forward, incr;
     310             : 
     311             :         /* 
     312             :          * center page should already be busy and write protected.  XXX:
     313             :          * suppose page is wired?  if we lock, then a process could
     314             :          * fault/block on it.  if we don't lock, a process could write the
     315             :          * pages in the middle of an I/O.  (consider an msync()).  let's
     316             :          * lock it for now (better to delay than corrupt data?).
     317             :          */
     318             :         /* get cluster boundaries, check sanity, and apply our limits as well.*/
     319           0 :         uobj->pgops->pgo_cluster(uobj, center->offset, &lo, &hi);
     320           0 :         if ((flags & PGO_ALLPAGES) == 0) {
     321           0 :                 if (lo < mlo)
     322           0 :                         lo = mlo;
     323           0 :                 if (hi > mhi)
     324           0 :                         hi = mhi;
     325             :         }
     326           0 :         if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
     327           0 :                 pps[0] = center;
     328           0 :                 *npages = 1;
     329           0 :                 return(pps);
     330             :         }
     331             : 
     332             :         /* now determine the center and attempt to cluster around the edges */
     333           0 :         center_idx = (center->offset - lo) >> PAGE_SHIFT;
     334           0 :         pps[center_idx] = center;       /* plug in the center page */
     335             :         ppsp = &pps[center_idx];
     336           0 :         *npages = 1;
     337             : 
     338             :         /*
     339             :          * attempt to cluster around the left [backward], and then 
     340             :          * the right side [forward].    
     341             :          *
     342             :          * note that for inactive pages (pages that have been deactivated)
     343             :          * there are no valid mappings and PG_CLEAN should be up to date.
     344             :          * [i.e. there is no need to query the pmap with pmap_is_modified
     345             :          * since there are no mappings].
     346             :          */
     347           0 :         for (forward  = 0 ; forward <= 1 ; forward++) {
     348           0 :                 incr = forward ? PAGE_SIZE : -PAGE_SIZE;
     349           0 :                 curoff = center->offset + incr;
     350           0 :                 for ( ;(forward == 0 && curoff >= lo) ||
     351           0 :                        (forward && curoff < hi);
     352           0 :                       curoff += incr) {
     353             : 
     354           0 :                         pclust = uvm_pagelookup(uobj, curoff); /* lookup page */
     355           0 :                         if (pclust == NULL) {
     356             :                                 break;                  /* no page */
     357             :                         }
     358             :                         /* handle active pages */
     359             :                         /* NOTE: inactive pages don't have pmap mappings */
     360           0 :                         if ((pclust->pg_flags & PQ_INACTIVE) == 0) {
     361           0 :                                 if ((flags & PGO_DOACTCLUST) == 0) {
     362             :                                         /* dont want mapped pages at all */
     363             :                                         break;
     364             :                                 }
     365             : 
     366             :                                 /* make sure "clean" bit is sync'd */
     367           0 :                                 if ((pclust->pg_flags & PG_CLEANCHK) == 0) {
     368           0 :                                         if ((pclust->pg_flags & (PG_CLEAN|PG_BUSY))
     369           0 :                                            == PG_CLEAN &&
     370           0 :                                            pmap_is_modified(pclust))
     371           0 :                                                 atomic_clearbits_int(
     372             :                                                     &pclust->pg_flags,
     373             :                                                     PG_CLEAN);
     374             :                                         /* now checked */
     375           0 :                                         atomic_setbits_int(&pclust->pg_flags,
     376             :                                             PG_CLEANCHK);
     377           0 :                                 }
     378             :                         }
     379             : 
     380             :                         /* is page available for cleaning and does it need it */
     381           0 :                         if ((pclust->pg_flags & (PG_CLEAN|PG_BUSY)) != 0) {
     382             :                                 break;  /* page is already clean or is busy */
     383             :                         }
     384             : 
     385             :                         /* yes!   enroll the page in our array */
     386           0 :                         atomic_setbits_int(&pclust->pg_flags, PG_BUSY);
     387             :                         UVM_PAGE_OWN(pclust, "uvm_mk_pcluster");
     388             : 
     389             :                         /*
     390             :                          * If we want to free after io is done, and we're
     391             :                          * async, set the released flag
     392             :                          */
     393           0 :                         if ((flags & (PGO_FREE|PGO_SYNCIO)) == PGO_FREE)
     394           0 :                                 atomic_setbits_int(&pclust->pg_flags,
     395             :                                     PG_RELEASED);
     396             : 
     397             :                         /* XXX: protect wired page?   see above comment. */
     398           0 :                         pmap_page_protect(pclust, PROT_READ);
     399           0 :                         if (!forward) {
     400           0 :                                 ppsp--;                 /* back up one page */
     401           0 :                                 *ppsp = pclust;
     402           0 :                         } else {
     403             :                                 /* move forward one page */
     404           0 :                                 ppsp[*npages] = pclust;
     405             :                         }
     406           0 :                         (*npages)++;
     407             :                 }
     408             :         }
     409             :         
     410             :         /*
     411             :          * done!  return the cluster array to the caller!!!
     412             :          */
     413           0 :         return(ppsp);
     414           0 : }
     415             : 
     416             : /*
     417             :  * uvm_pager_put: high level pageout routine
     418             :  *
     419             :  * we want to pageout page "pg" to backing store, clustering if
     420             :  * possible.
     421             :  *
     422             :  * => page queues must be locked by caller
     423             :  * => if page is not swap-backed, then "uobj" points to the object
     424             :  *      backing it.
     425             :  * => if page is swap-backed, then "uobj" should be NULL.
     426             :  * => "pg" should be PG_BUSY (by caller), and !PG_CLEAN
     427             :  *    for swap-backed memory, "pg" can be NULL if there is no page
     428             :  *    of interest [sometimes the case for the pagedaemon]
     429             :  * => "ppsp_ptr" should point to an array of npages vm_page pointers
     430             :  *      for possible cluster building
     431             :  * => flags (first two for non-swap-backed pages)
     432             :  *      PGO_ALLPAGES: all pages in uobj are valid targets
     433             :  *      PGO_DOACTCLUST: include "PQ_ACTIVE" pages as valid targets
     434             :  *      PGO_SYNCIO: do SYNC I/O (no async)
     435             :  *      PGO_PDFREECLUST: pagedaemon: drop cluster on successful I/O
     436             :  *      PGO_FREE: tell the aio daemon to free pages in the async case.
     437             :  * => start/stop: if (uobj && !PGO_ALLPAGES) limit targets to this range
     438             :  *                if (!uobj) start is the (daddr_t) of the starting swapblk
     439             :  * => return state:
     440             :  *      1. we return the VM_PAGER status code of the pageout
     441             :  *      2. we return with the page queues unlocked
     442             :  *      3. on errors we always drop the cluster.   thus, if we return
     443             :  *              !PEND, !OK, then the caller only has to worry about
     444             :  *              un-busying the main page (not the cluster pages).
     445             :  *      4. on success, if !PGO_PDFREECLUST, we return the cluster
     446             :  *              with all pages busy (caller must un-busy and check
     447             :  *              wanted/released flags).
     448             :  */
     449             : int
     450           0 : uvm_pager_put(struct uvm_object *uobj, struct vm_page *pg,
     451             :     struct vm_page ***ppsp_ptr, int *npages, int flags,
     452             :     voff_t start, voff_t stop)
     453             : {
     454             :         int result;
     455             :         daddr_t swblk;
     456           0 :         struct vm_page **ppsp = *ppsp_ptr;
     457             : 
     458             :         /*
     459             :          * note that uobj is null  if we are doing a swap-backed pageout.
     460             :          * note that uobj is !null if we are doing normal object pageout.
     461             :          * note that the page queues must be locked to cluster.
     462             :          */
     463           0 :         if (uobj) {     /* if !swap-backed */
     464             :                 /*
     465             :                  * attempt to build a cluster for pageout using its
     466             :                  * make-put-cluster function (if it has one).
     467             :                  */
     468           0 :                 if (uobj->pgops->pgo_mk_pcluster) {
     469           0 :                         ppsp = uobj->pgops->pgo_mk_pcluster(uobj, ppsp,
     470             :                             npages, pg, flags, start, stop);
     471           0 :                         *ppsp_ptr = ppsp;  /* update caller's pointer */
     472           0 :                 } else {
     473           0 :                         ppsp[0] = pg;
     474           0 :                         *npages = 1;
     475             :                 }
     476             : 
     477             :                 swblk = 0;              /* XXX: keep gcc happy */
     478           0 :         } else {
     479             :                 /*
     480             :                  * for swap-backed pageout, the caller (the pagedaemon) has
     481             :                  * already built the cluster for us.   the starting swap
     482             :                  * block we are writing to has been passed in as "start."
     483             :                  * "pg" could be NULL if there is no page we are especially
     484             :                  * interested in (in which case the whole cluster gets dropped
     485             :                  * in the event of an error or a sync "done").
     486             :                  */
     487             :                 swblk = start;
     488             :                 /* ppsp and npages should be ok */
     489             :         }
     490             : 
     491             :         /* now that we've clustered we can unlock the page queues */
     492           0 :         uvm_unlock_pageq();
     493             : 
     494             :         /*
     495             :          * now attempt the I/O.   if we have a failure and we are
     496             :          * clustered, we will drop the cluster and try again.
     497             :          */
     498             : ReTry:
     499           0 :         if (uobj) {
     500           0 :                 result = uobj->pgops->pgo_put(uobj, ppsp, *npages, flags);
     501           0 :         } else {
     502             :                 /* XXX daddr_t -> int */
     503           0 :                 result = uvm_swap_put(swblk, ppsp, *npages, flags);
     504             :         }
     505             : 
     506             :         /*
     507             :          * we have attempted the I/O.
     508             :          *
     509             :          * if the I/O was a success then:
     510             :          *      if !PGO_PDFREECLUST, we return the cluster to the 
     511             :          *              caller (who must un-busy all pages)
     512             :          *      else we un-busy cluster pages for the pagedaemon
     513             :          *
     514             :          * if I/O is pending (async i/o) then we return the pending code.
     515             :          * [in this case the async i/o done function must clean up when
     516             :          *  i/o is done...]
     517             :          */
     518           0 :         if (result == VM_PAGER_PEND || result == VM_PAGER_OK) {
     519           0 :                 if (result == VM_PAGER_OK && (flags & PGO_PDFREECLUST)) {
     520             :                         /* drop cluster */
     521           0 :                         if (*npages > 1 || pg == NULL)
     522           0 :                                 uvm_pager_dropcluster(uobj, pg, ppsp, npages,
     523             :                                     PGO_PDFREECLUST);
     524             :                 }
     525           0 :                 return (result);
     526             :         }
     527             : 
     528             :         /*
     529             :          * a pager error occured (even after dropping the cluster, if there
     530             :          * was one).  give up! the caller only has one page ("pg")
     531             :          * to worry about.
     532             :          */
     533           0 :         if (*npages > 1 || pg == NULL) {
     534           0 :                 uvm_pager_dropcluster(uobj, pg, ppsp, npages, PGO_REALLOCSWAP);
     535             : 
     536             :                 /*
     537             :                  * for failed swap-backed pageouts with a "pg",
     538             :                  * we need to reset pg's swslot to either:
     539             :                  * "swblk" (for transient errors, so we can retry),
     540             :                  * or 0 (for hard errors).
     541             :                  */
     542           0 :                 if (uobj == NULL && pg != NULL) {
     543             :                         /* XXX daddr_t -> int */
     544           0 :                         int nswblk = (result == VM_PAGER_AGAIN) ? swblk : 0;
     545           0 :                         if (pg->pg_flags & PQ_ANON) {
     546           0 :                                 pg->uanon->an_swslot = nswblk;
     547           0 :                         } else {
     548           0 :                                 uao_set_swslot(pg->uobject,
     549           0 :                                                pg->offset >> PAGE_SHIFT,
     550             :                                                nswblk);
     551             :                         }
     552           0 :                 }
     553           0 :                 if (result == VM_PAGER_AGAIN) {
     554             :                         /*
     555             :                          * for transient failures, free all the swslots that
     556             :                          * we're not going to retry with.
     557             :                          */
     558           0 :                         if (uobj == NULL) {
     559           0 :                                 if (pg) {
     560             :                                         /* XXX daddr_t -> int */
     561           0 :                                         uvm_swap_free(swblk + 1, *npages - 1);
     562           0 :                                 } else {
     563             :                                         /* XXX daddr_t -> int */
     564           0 :                                         uvm_swap_free(swblk, *npages);
     565             :                                 }
     566             :                         }
     567           0 :                         if (pg) {
     568           0 :                                 ppsp[0] = pg;
     569           0 :                                 *npages = 1;
     570           0 :                                 goto ReTry;
     571             :                         }
     572           0 :                 } else if (uobj == NULL) {
     573             :                         /*
     574             :                          * for hard errors on swap-backed pageouts,
     575             :                          * mark the swslots as bad.  note that we do not
     576             :                          * free swslots that we mark bad.
     577             :                          */
     578             :                         /* XXX daddr_t -> int */
     579           0 :                         uvm_swap_markbad(swblk, *npages);
     580           0 :                 }
     581             :         }
     582             : 
     583             :         /*
     584             :          * a pager error occurred (even after dropping the cluster, if there
     585             :          * was one).    give up!   the caller only has one page ("pg")
     586             :          * to worry about.
     587             :          */
     588             :         
     589           0 :         return(result);
     590           0 : }
     591             : 
     592             : /*
     593             :  * uvm_pager_dropcluster: drop a cluster we have built (because we 
     594             :  * got an error, or, if PGO_PDFREECLUST we are un-busying the
     595             :  * cluster pages on behalf of the pagedaemon).
     596             :  *
     597             :  * => uobj, if non-null, is a non-swap-backed object
     598             :  * => page queues are not locked
     599             :  * => pg is our page of interest (the one we clustered around, can be null)
     600             :  * => ppsp/npages is our current cluster
     601             :  * => flags: PGO_PDFREECLUST: pageout was a success: un-busy cluster
     602             :  *      pages on behalf of the pagedaemon.
     603             :  *           PGO_REALLOCSWAP: drop previously allocated swap slots for 
     604             :  *              clustered swap-backed pages (except for "pg" if !NULL)
     605             :  *              "swblk" is the start of swap alloc (e.g. for ppsp[0])
     606             :  *              [only meaningful if swap-backed (uobj == NULL)]
     607             :  */
     608             : 
     609             : void
     610           0 : uvm_pager_dropcluster(struct uvm_object *uobj, struct vm_page *pg,
     611             :     struct vm_page **ppsp, int *npages, int flags)
     612             : {
     613             :         int lcv;
     614             : 
     615             :         /* drop all pages but "pg" */
     616           0 :         for (lcv = 0 ; lcv < *npages ; lcv++) {
     617             :                 /* skip "pg" or empty slot */
     618           0 :                 if (ppsp[lcv] == pg || ppsp[lcv] == NULL)
     619             :                         continue;
     620             :         
     621             :                 /*
     622             :                  * Note that PQ_ANON bit can't change as long as we are holding
     623             :                  * the PG_BUSY bit (so there is no need to lock the page
     624             :                  * queues to test it).
     625             :                  */
     626           0 :                 if (!uobj) {
     627           0 :                         if (ppsp[lcv]->pg_flags & PQ_ANON) {
     628           0 :                                 if (flags & PGO_REALLOCSWAP)
     629             :                                           /* zap swap block */
     630           0 :                                           ppsp[lcv]->uanon->an_swslot = 0;
     631             :                         } else {
     632           0 :                                 if (flags & PGO_REALLOCSWAP)
     633           0 :                                         uao_set_swslot(ppsp[lcv]->uobject,
     634           0 :                                             ppsp[lcv]->offset >> PAGE_SHIFT, 0);
     635             :                         }
     636             :                 }
     637             : 
     638             :                 /* did someone want the page while we had it busy-locked? */
     639           0 :                 if (ppsp[lcv]->pg_flags & PG_WANTED) {
     640           0 :                         wakeup(ppsp[lcv]);
     641           0 :                 }
     642             : 
     643             :                 /* if page was released, release it.  otherwise un-busy it */
     644           0 :                 if (ppsp[lcv]->pg_flags & PG_RELEASED &&
     645           0 :                     ppsp[lcv]->pg_flags & PQ_ANON) {
     646             :                                 /* so that anfree will free */
     647           0 :                                 atomic_clearbits_int(&ppsp[lcv]->pg_flags,
     648             :                                     PG_BUSY);
     649             :                                 UVM_PAGE_OWN(ppsp[lcv], NULL);
     650             : 
     651             :                                 /* kills anon and frees pg */
     652           0 :                                 uvm_anfree(ppsp[lcv]->uanon);
     653             : 
     654           0 :                                 continue;
     655             :                 } else {
     656             :                         /*
     657             :                          * if we were planning on async io then we would
     658             :                          * have PG_RELEASED set, clear that with the others.
     659             :                          */
     660           0 :                         atomic_clearbits_int(&ppsp[lcv]->pg_flags,
     661             :                             PG_BUSY|PG_WANTED|PG_FAKE|PG_RELEASED);
     662             :                         UVM_PAGE_OWN(ppsp[lcv], NULL);
     663             :                 }
     664             : 
     665             :                 /*
     666             :                  * if we are operating on behalf of the pagedaemon and we 
     667             :                  * had a successful pageout update the page!
     668             :                  */
     669           0 :                 if (flags & PGO_PDFREECLUST) {
     670           0 :                         pmap_clear_reference(ppsp[lcv]);
     671           0 :                         pmap_clear_modify(ppsp[lcv]);
     672           0 :                         atomic_setbits_int(&ppsp[lcv]->pg_flags, PG_CLEAN);
     673           0 :                 }
     674             :         }
     675           0 : }
     676             : 
     677             : /*
     678             :  * interrupt-context iodone handler for single-buf i/os
     679             :  * or the top-level buf of a nested-buf i/o.
     680             :  *
     681             :  * => must be at splbio().
     682             :  */
     683             : 
     684             : void
     685           0 : uvm_aio_biodone(struct buf *bp)
     686             : {
     687           0 :         splassert(IPL_BIO);
     688             : 
     689             :         /* reset b_iodone for when this is a single-buf i/o. */
     690           0 :         bp->b_iodone = uvm_aio_aiodone;
     691             : 
     692           0 :         mtx_enter(&uvm.aiodoned_lock);
     693           0 :         TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
     694           0 :         wakeup(&uvm.aiodoned);
     695           0 :         mtx_leave(&uvm.aiodoned_lock);
     696           0 : }
     697             : 
     698             : /*
     699             :  * uvm_aio_aiodone: do iodone processing for async i/os.
     700             :  * this should be called in thread context, not interrupt context.
     701             :  */
     702             : void
     703           0 : uvm_aio_aiodone(struct buf *bp)
     704             : {
     705           0 :         int npages = bp->b_bufsize >> PAGE_SHIFT;
     706           0 :         struct vm_page *pg, *pgs[MAXPHYS >> PAGE_SHIFT];
     707             :         struct uvm_object *uobj;
     708             :         int i, error;
     709             :         boolean_t write, swap;
     710             : 
     711           0 :         KASSERT(npages <= MAXPHYS >> PAGE_SHIFT);
     712           0 :         splassert(IPL_BIO);
     713             : 
     714           0 :         error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0;
     715           0 :         write = (bp->b_flags & B_READ) == 0;
     716             : 
     717             :         uobj = NULL;
     718           0 :         for (i = 0; i < npages; i++)
     719           0 :                 pgs[i] = uvm_atopg((vaddr_t)bp->b_data +
     720           0 :                     ((vsize_t)i << PAGE_SHIFT));
     721           0 :         uvm_pagermapout((vaddr_t)bp->b_data, npages);
     722             : #ifdef UVM_SWAP_ENCRYPT
     723             :         /*
     724             :          * XXX - assumes that we only get ASYNC writes. used to be above.
     725             :          */
     726           0 :         if (pgs[0]->pg_flags & PQ_ENCRYPT) {
     727           0 :                 uvm_swap_freepages(pgs, npages);
     728           0 :                 goto freed;
     729             :         }
     730             : #endif /* UVM_SWAP_ENCRYPT */
     731           0 :         for (i = 0; i < npages; i++) {
     732           0 :                 pg = pgs[i];
     733             : 
     734           0 :                 if (i == 0) {
     735           0 :                         swap = (pg->pg_flags & PQ_SWAPBACKED) != 0;
     736           0 :                         if (!swap) {
     737           0 :                                 uobj = pg->uobject;
     738           0 :                         }
     739             :                 }
     740           0 :                 KASSERT(swap || pg->uobject == uobj);
     741             : 
     742             :                 /*
     743             :                  * if this is a read and we got an error, mark the pages
     744             :                  * PG_RELEASED so that uvm_page_unbusy() will free them.
     745             :                  */
     746           0 :                 if (!write && error) {
     747           0 :                         atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
     748           0 :                         continue;
     749             :                 }
     750           0 :                 KASSERT(!write || (pgs[i]->pg_flags & PG_FAKE) == 0);
     751             : 
     752             :                 /*
     753             :                  * if this is a read and the page is PG_FAKE,
     754             :                  * or this was a successful write,
     755             :                  * mark the page PG_CLEAN and not PG_FAKE.
     756             :                  */
     757           0 :                 if ((pgs[i]->pg_flags & PG_FAKE) || (write && error != ENOMEM)) {
     758           0 :                         pmap_clear_reference(pgs[i]);
     759           0 :                         pmap_clear_modify(pgs[i]);
     760           0 :                         atomic_setbits_int(&pgs[i]->pg_flags, PG_CLEAN);
     761           0 :                         atomic_clearbits_int(&pgs[i]->pg_flags, PG_FAKE);
     762           0 :                 }
     763             :         }
     764           0 :         uvm_page_unbusy(pgs, npages);
     765             : 
     766             : #ifdef UVM_SWAP_ENCRYPT
     767             : freed:
     768             : #endif
     769           0 :         pool_put(&bufpool, bp);
     770           0 : }

Generated by: LCOV version 1.13