LCOV - code coverage report
Current view: top level - uvm - uvm_amap.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 16 492 3.3 %
Date: 2018-10-19 03:25:38 Functions: 0 26 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*      $OpenBSD: uvm_amap.c,v 1.79 2017/01/31 17:08:51 dhill Exp $     */
       2             : /*      $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $        */
       3             : 
       4             : /*
       5             :  * Copyright (c) 1997 Charles D. Cranor and Washington University.
       6             :  * All rights reserved.
       7             :  *
       8             :  * Redistribution and use in source and binary forms, with or without
       9             :  * modification, are permitted provided that the following conditions
      10             :  * are met:
      11             :  * 1. Redistributions of source code must retain the above copyright
      12             :  *    notice, this list of conditions and the following disclaimer.
      13             :  * 2. Redistributions in binary form must reproduce the above copyright
      14             :  *    notice, this list of conditions and the following disclaimer in the
      15             :  *    documentation and/or other materials provided with the distribution.
      16             :  *
      17             :  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
      18             :  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
      19             :  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
      20             :  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
      21             :  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
      22             :  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      23             :  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      24             :  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      25             :  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
      26             :  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      27             :  */
      28             : 
      29             : /*
      30             :  * uvm_amap.c: amap operations
      31             :  *
      32             :  * this file contains functions that perform operations on amaps.  see
      33             :  * uvm_amap.h for a brief explanation of the role of amaps in uvm.
      34             :  */
      35             : 
      36             : #include <sys/param.h>
      37             : #include <sys/systm.h>
      38             : #include <sys/malloc.h>
      39             : #include <sys/kernel.h>
      40             : #include <sys/pool.h>
      41             : #include <sys/atomic.h>
      42             : 
      43             : #include <uvm/uvm.h>
      44             : #include <uvm/uvm_swap.h>
      45             : 
      46             : /*
      47             :  * pools for allocation of vm_amap structures.  note that in order to
      48             :  * avoid an endless loop, the amap pool's allocator cannot allocate
      49             :  * memory from an amap (it currently goes through the kernel uobj, so
      50             :  * we are ok).
      51             :  */
      52             : 
      53             : struct pool uvm_amap_pool;
      54             : struct pool uvm_small_amap_pool[UVM_AMAP_CHUNK];
      55             : struct pool uvm_amap_chunk_pool;
      56             : 
      57             : LIST_HEAD(, vm_amap) amap_list;
      58             : 
      59             : static char amap_small_pool_names[UVM_AMAP_CHUNK][9];
      60             : 
      61             : /*
      62             :  * local functions
      63             :  */
      64             : 
      65             : static struct vm_amap *amap_alloc1(int, int, int);
      66             : static __inline void amap_list_insert(struct vm_amap *);
      67             : static __inline void amap_list_remove(struct vm_amap *);   
      68             : 
      69             : struct vm_amap_chunk *amap_chunk_get(struct vm_amap *, int, int, int);
      70             : void amap_chunk_free(struct vm_amap *, struct vm_amap_chunk *);
      71             : void amap_wiperange_chunk(struct vm_amap *, struct vm_amap_chunk *, int, int);
      72             : 
      73             : static __inline void
      74           0 : amap_list_insert(struct vm_amap *amap)
      75             : {
      76           0 :         LIST_INSERT_HEAD(&amap_list, amap, am_list);
      77           0 : }
      78             : 
      79             : static __inline void
      80           0 : amap_list_remove(struct vm_amap *amap)
      81             : { 
      82           0 :         LIST_REMOVE(amap, am_list);
      83           0 : }
      84             : 
      85             : /*
      86             :  * amap_chunk_get: lookup a chunk for slot. if create is non-zero,
      87             :  * the chunk is created if it does not yet exist.
      88             :  *
      89             :  * => returns the chunk on success or NULL on error
      90             :  */
      91             : struct vm_amap_chunk *
      92           0 : amap_chunk_get(struct vm_amap *amap, int slot, int create, int waitf)
      93             : {
      94           0 :         int bucket = UVM_AMAP_BUCKET(amap, slot);
      95           0 :         int baseslot = AMAP_BASE_SLOT(slot);
      96             :         int n;
      97             :         struct vm_amap_chunk *chunk, *newchunk, *pchunk = NULL;
      98             : 
      99          60 :         if (UVM_AMAP_SMALL(amap))
     100           0 :                 return &amap->am_small;
     101             : 
     102           0 :         for (chunk = amap->am_buckets[bucket]; chunk != NULL;
     103           0 :             chunk = TAILQ_NEXT(chunk, ac_list)) {
     104           0 :                 if (UVM_AMAP_BUCKET(amap, chunk->ac_baseslot) != bucket)
     105             :                         break;
     106           0 :                 if (chunk->ac_baseslot == baseslot)
     107           0 :                         return chunk;
     108             :                 pchunk = chunk;
     109             :         }
     110           0 :         if (!create)
     111           0 :                 return NULL;
     112             : 
     113           0 :         if (amap->am_nslot - baseslot >= UVM_AMAP_CHUNK)
     114           0 :                 n = UVM_AMAP_CHUNK;
     115             :         else
     116             :                 n = amap->am_nslot - baseslot;
     117             : 
     118           0 :         newchunk = pool_get(&uvm_amap_chunk_pool, waitf | PR_ZERO);
     119           0 :         if (newchunk == NULL)
     120           0 :                 return NULL;
     121             : 
     122           0 :         if (pchunk == NULL) {
     123           0 :                 TAILQ_INSERT_TAIL(&amap->am_chunks, newchunk, ac_list);
     124           0 :                 KASSERT(amap->am_buckets[bucket] == NULL);
     125           0 :                 amap->am_buckets[bucket] = newchunk;
     126           0 :         } else
     127           0 :                 TAILQ_INSERT_AFTER(&amap->am_chunks, pchunk, newchunk,
     128             :                     ac_list);
     129             : 
     130           0 :         amap->am_ncused++;
     131           0 :         newchunk->ac_baseslot = baseslot;
     132           0 :         newchunk->ac_nslot = n;
     133           0 :         return newchunk;
     134           0 : }
     135             : 
     136             : void
     137           0 : amap_chunk_free(struct vm_amap *amap, struct vm_amap_chunk *chunk)
     138             : {
     139           0 :         int bucket = UVM_AMAP_BUCKET(amap, chunk->ac_baseslot);
     140             :         struct vm_amap_chunk *nchunk;
     141             : 
     142           0 :         if (UVM_AMAP_SMALL(amap))
     143           0 :                 return;
     144             : 
     145           0 :         nchunk = TAILQ_NEXT(chunk, ac_list);
     146           0 :         TAILQ_REMOVE(&amap->am_chunks, chunk, ac_list);
     147           0 :         if (amap->am_buckets[bucket] == chunk) {
     148           0 :                 if (nchunk != NULL &&
     149           0 :                     UVM_AMAP_BUCKET(amap, nchunk->ac_baseslot) == bucket)
     150           0 :                         amap->am_buckets[bucket] = nchunk;
     151             :                 else
     152           0 :                         amap->am_buckets[bucket] = NULL;
     153             : 
     154             :         }
     155           0 :         pool_put(&uvm_amap_chunk_pool, chunk);
     156           0 :         amap->am_ncused--;
     157           0 : }
     158             : 
     159             : #ifdef UVM_AMAP_PPREF
     160             : /*
     161             :  * what is ppref?   ppref is an _optional_ amap feature which is used
     162             :  * to keep track of reference counts on a per-page basis.  it is enabled
     163             :  * when UVM_AMAP_PPREF is defined.
     164             :  *
     165             :  * when enabled, an array of ints is allocated for the pprefs.  this
     166             :  * array is allocated only when a partial reference is added to the
     167             :  * map (either by unmapping part of the amap, or gaining a reference
     168             :  * to only a part of an amap).  if the malloc of the array fails
     169             :  * (M_NOWAIT), then we set the array pointer to PPREF_NONE to indicate
     170             :  * that we tried to do ppref's but couldn't alloc the array so just
     171             :  * give up (after all, this is an optional feature!).
     172             :  *
     173             :  * the array is divided into page sized "chunks."   for chunks of length 1,
     174             :  * the chunk reference count plus one is stored in that chunk's slot.
     175             :  * for chunks of length > 1 the first slot contains (the reference count
     176             :  * plus one) * -1.    [the negative value indicates that the length is
     177             :  * greater than one.]   the second slot of the chunk contains the length
     178             :  * of the chunk.   here is an example:
     179             :  *
     180             :  * actual REFS:  2  2  2  2  3  1  1  0  0  0  4  4  0  1  1  1
     181             :  *       ppref: -3  4  x  x  4 -2  2 -1  3  x -5  2  1 -2  3  x
     182             :  *              <----------><-><----><-------><----><-><------->
     183             :  * (x = don't care)
     184             :  *
     185             :  * this allows us to allow one int to contain the ref count for the whole
     186             :  * chunk.    note that the "plus one" part is needed because a reference
     187             :  * count of zero is neither positive or negative (need a way to tell
     188             :  * if we've got one zero or a bunch of them).
     189             :  * 
     190             :  * here are some in-line functions to help us.
     191             :  */
     192             : 
     193             : static __inline void pp_getreflen(int *, int, int *, int *);
     194             : static __inline void pp_setreflen(int *, int, int, int);
     195             : 
     196             : /*
     197             :  * pp_getreflen: get the reference and length for a specific offset
     198             :  */
     199             : static __inline void
     200           0 : pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
     201             : {
     202             : 
     203           0 :         if (ppref[offset] > 0) {             /* chunk size must be 1 */
     204           0 :                 *refp = ppref[offset] - 1;      /* don't forget to adjust */
     205           0 :                 *lenp = 1;
     206           0 :         } else {
     207         117 :                 *refp = (ppref[offset] * -1) - 1;
     208           0 :                 *lenp = ppref[offset+1];
     209             :         }
     210           0 : }
     211             : 
     212             : /*
     213             :  * pp_setreflen: set the reference and length for a specific offset
     214             :  */
     215             : static __inline void
     216           0 : pp_setreflen(int *ppref, int offset, int ref, int len)
     217             : {
     218           0 :         if (len == 1) {
     219           0 :                 ppref[offset] = ref + 1;
     220           0 :         } else {
     221         115 :                 ppref[offset] = (ref + 1) * -1;
     222           0 :                 ppref[offset+1] = len;
     223             :         }
     224           0 : }
     225             : #endif
     226             : 
     227             : /*
     228             :  * amap_init: called at boot time to init global amap data structures
     229             :  */
     230             : 
     231             : void
     232           0 : amap_init(void)
     233             : {
     234             :         int i;
     235             :         size_t size;
     236             : 
     237             :         /* Initialize the vm_amap pool. */
     238           0 :         pool_init(&uvm_amap_pool, sizeof(struct vm_amap),
     239             :             0, IPL_NONE, PR_WAITOK, "amappl", NULL);
     240           0 :         pool_sethiwat(&uvm_amap_pool, 4096);
     241             : 
     242             :         /* initialize small amap pools */
     243           0 :         for (i = 0; i < nitems(uvm_small_amap_pool); i++) {
     244           0 :                 snprintf(amap_small_pool_names[i],
     245           0 :                     sizeof(amap_small_pool_names[0]), "amappl%d", i + 1);
     246           0 :                 size = offsetof(struct vm_amap, am_small.ac_anon) +
     247           0 :                     (i + 1) * sizeof(struct vm_anon *);
     248           0 :                 pool_init(&uvm_small_amap_pool[i], size, 0,
     249             :                     IPL_NONE, 0, amap_small_pool_names[i], NULL);
     250             :         }
     251             : 
     252           0 :         pool_init(&uvm_amap_chunk_pool, sizeof(struct vm_amap_chunk) +
     253             :             UVM_AMAP_CHUNK * sizeof(struct vm_anon *),
     254             :             0, IPL_NONE, 0, "amapchunkpl", NULL);
     255           0 :         pool_sethiwat(&uvm_amap_chunk_pool, 4096);
     256           0 : }
     257             : 
     258             : /*
     259             :  * amap_alloc1: internal function that allocates an amap, but does not
     260             :  *      init the overlay.
     261             :  */
     262             : static inline struct vm_amap *
     263           0 : amap_alloc1(int slots, int waitf, int lazyalloc)
     264             : {
     265             :         struct vm_amap *amap;
     266             :         struct vm_amap_chunk *chunk, *tmp;
     267             :         int chunks, log_chunks, chunkperbucket = 1, hashshift = 0;
     268             :         int buckets, i, n;
     269           0 :         int pwaitf = (waitf & M_WAITOK) ? PR_WAITOK : PR_NOWAIT;
     270             : 
     271           0 :         KASSERT(slots > 0);
     272             : 
     273             :         /*
     274             :          * Cast to unsigned so that rounding up cannot cause integer overflow
     275             :          * if slots is large.
     276             :          */
     277           0 :         chunks = roundup((unsigned int)slots, UVM_AMAP_CHUNK) / UVM_AMAP_CHUNK;
     278             : 
     279           0 :         if (lazyalloc) {
     280             :                 /*
     281             :                  * Basically, the amap is a hash map where the number of
     282             :                  * buckets is fixed. We select the number of buckets using the
     283             :                  * following strategy:
     284             :                  *
     285             :                  * 1. The maximal number of entries to search in a bucket upon
     286             :                  * a collision should be less than or equal to
     287             :                  * log2(slots / UVM_AMAP_CHUNK). This is the worst-case number
     288             :                  * of lookups we would have if we could chunk the amap. The
     289             :                  * log2(n) comes from the fact that amaps are chunked by
     290             :                  * splitting up their vm_map_entries and organizing those
     291             :                  * in a binary search tree.
     292             :                  *
     293             :                  * 2. The maximal number of entries in a bucket must be a
     294             :                  * power of two.
     295             :                  *
     296             :                  * The maximal number of entries per bucket is used to hash
     297             :                  * a slot to a bucket.
     298             :                  *
     299             :                  * In the future, this strategy could be refined to make it
     300             :                  * even harder/impossible that the total amount of KVA needed
     301             :                  * for the hash buckets of all amaps to exceed the maximal
     302             :                  * amount of KVA memory reserved for amaps.
     303             :                  */
     304           0 :                 for (log_chunks = 1; (chunks >> log_chunks) > 0; log_chunks++)
     305             :                         continue;
     306             : 
     307             :                 chunkperbucket = 1 << hashshift;
     308           0 :                 while (chunkperbucket + 1 < log_chunks) {
     309           0 :                         hashshift++;
     310           0 :                         chunkperbucket = 1 << hashshift;
     311             :                 }
     312             :         }
     313             : 
     314           0 :         if (slots > UVM_AMAP_CHUNK)
     315           0 :                 amap = pool_get(&uvm_amap_pool, pwaitf);
     316             :         else
     317           0 :                 amap = pool_get(&uvm_small_amap_pool[slots - 1],
     318           0 :                     pwaitf | PR_ZERO);
     319           0 :         if (amap == NULL)
     320           0 :                 return(NULL);
     321             : 
     322           0 :         amap->am_ref = 1;
     323           0 :         amap->am_flags = 0;
     324             : #ifdef UVM_AMAP_PPREF
     325           0 :         amap->am_ppref = NULL;
     326             : #endif
     327           0 :         amap->am_nslot = slots;
     328           0 :         amap->am_nused = 0;
     329             : 
     330           0 :         if (UVM_AMAP_SMALL(amap)) {
     331           0 :                 amap->am_small.ac_nslot = slots;
     332           0 :                 return (amap);
     333             :         }
     334             : 
     335           0 :         amap->am_ncused = 0;
     336           0 :         TAILQ_INIT(&amap->am_chunks);
     337           0 :         amap->am_hashshift = hashshift;
     338           0 :         amap->am_buckets = NULL;
     339             : 
     340           0 :         buckets = howmany(chunks, chunkperbucket);
     341           0 :         amap->am_buckets = mallocarray(buckets, sizeof(*amap->am_buckets),
     342           0 :             M_UVMAMAP, waitf | (lazyalloc ? M_ZERO : 0));
     343           0 :         if (amap->am_buckets == NULL)
     344             :                 goto fail1;
     345             : 
     346           0 :         if (!lazyalloc) {
     347           0 :                 for (i = 0; i < buckets; i++) {
     348           0 :                         if (i == buckets - 1) {
     349           0 :                                 n = slots % UVM_AMAP_CHUNK;
     350           0 :                                 if (n == 0)
     351             :                                         n = UVM_AMAP_CHUNK;
     352           0 :                         } else
     353             :                                 n = UVM_AMAP_CHUNK;
     354             : 
     355           0 :                         chunk = pool_get(&uvm_amap_chunk_pool,
     356           0 :                             PR_ZERO | pwaitf);
     357           0 :                         if (chunk == NULL)
     358             :                                 goto fail1;
     359             : 
     360           0 :                         amap->am_buckets[i] = chunk;
     361           0 :                         amap->am_ncused++;
     362           0 :                         chunk->ac_baseslot = i * UVM_AMAP_CHUNK;
     363           0 :                         chunk->ac_nslot = n;
     364           0 :                         TAILQ_INSERT_TAIL(&amap->am_chunks, chunk, ac_list);
     365             :                 }
     366             :         }
     367             : 
     368           0 :         return(amap);
     369             : 
     370             : fail1:
     371           0 :         free(amap->am_buckets, M_UVMAMAP, buckets * sizeof(*amap->am_buckets));
     372           0 :         TAILQ_FOREACH_SAFE(chunk, &amap->am_chunks, ac_list, tmp)
     373           0 :                 pool_put(&uvm_amap_chunk_pool, chunk);
     374           0 :         pool_put(&uvm_amap_pool, amap);
     375           0 :         return (NULL);
     376           0 : }
     377             : 
     378             : /*
     379             :  * amap_alloc: allocate an amap to manage "sz" bytes of anonymous VM
     380             :  *
     381             :  * => caller should ensure sz is a multiple of PAGE_SIZE
     382             :  * => reference count to new amap is set to one
     383             :  */
     384             : struct vm_amap *
     385           0 : amap_alloc(vaddr_t sz, int waitf, int lazyalloc)
     386             : {
     387             :         struct vm_amap *amap;
     388             :         size_t slots;
     389             : 
     390           0 :         AMAP_B2SLOT(slots, sz);         /* load slots */
     391           0 :         if (slots > INT_MAX)
     392           0 :                 return (NULL);
     393             : 
     394           0 :         amap = amap_alloc1(slots, waitf, lazyalloc);
     395           0 :         if (amap)
     396           0 :                 amap_list_insert(amap);
     397             : 
     398           0 :         return(amap);
     399           0 : }
     400             : 
     401             : 
     402             : /*
     403             :  * amap_free: free an amap
     404             :  *
     405             :  * => the amap should have a zero reference count and be empty
     406             :  */
     407             : void
     408           0 : amap_free(struct vm_amap *amap)
     409             : {
     410             :         struct vm_amap_chunk *chunk, *tmp;
     411             : 
     412           0 :         KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
     413           0 :         KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
     414             : 
     415             : #ifdef UVM_AMAP_PPREF
     416           0 :         if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
     417           0 :                 free(amap->am_ppref, M_UVMAMAP, amap->am_nslot * sizeof(int));
     418             : #endif
     419             : 
     420           0 :         if (UVM_AMAP_SMALL(amap))
     421           0 :                 pool_put(&uvm_small_amap_pool[amap->am_nslot - 1], amap);
     422             :         else {
     423           0 :                 TAILQ_FOREACH_SAFE(chunk, &amap->am_chunks, ac_list, tmp)
     424           0 :                     pool_put(&uvm_amap_chunk_pool, chunk);
     425           0 :                 free(amap->am_buckets, M_UVMAMAP, 0);
     426           0 :                 pool_put(&uvm_amap_pool, amap);
     427             :         }
     428           0 : }
     429             : 
     430             : /*
     431             :  * amap_wipeout: wipeout all anon's in an amap; then free the amap!
     432             :  *
     433             :  * => called from amap_unref when the final reference to an amap is
     434             :  *      discarded (i.e. when reference count == 1)
     435             :  */
     436             : 
     437             : void
     438           0 : amap_wipeout(struct vm_amap *amap)
     439             : {
     440             :         int slot;
     441             :         struct vm_anon *anon;
     442             :         struct vm_amap_chunk *chunk;
     443             : 
     444           0 :         KASSERT(amap->am_ref == 0);
     445             : 
     446           0 :         if (__predict_false((amap->am_flags & AMAP_SWAPOFF) != 0)) {
     447             :                 /* amap_swap_off will call us again. */
     448           0 :                 return;
     449             :         }
     450           0 :         amap_list_remove(amap);
     451             : 
     452           0 :         AMAP_CHUNK_FOREACH(chunk, amap) {
     453           0 :                 int i, refs, map = chunk->ac_usedmap;
     454             : 
     455           0 :                 for (i = ffs(map); i != 0; i = ffs(map)) {
     456           0 :                         slot = i - 1;
     457           0 :                         map ^= 1 << slot;
     458           0 :                         anon = chunk->ac_anon[slot];
     459             : 
     460           0 :                         if (anon == NULL || anon->an_ref == 0)
     461           0 :                                 panic("amap_wipeout: corrupt amap");
     462             : 
     463           0 :                         refs = --anon->an_ref;
     464           0 :                         if (refs == 0) {
     465             :                                 /*
     466             :                                  * we had the last reference to a vm_anon.
     467             :                                  * free it.
     468             :                                  */
     469           0 :                                 uvm_anfree(anon);
     470           0 :                         }
     471             :                 }
     472             :         }
     473             : 
     474             :         /* now we free the map */
     475           0 :         amap->am_ref = 0;    /* ... was one */
     476           0 :         amap->am_nused = 0;
     477           0 :         amap_free(amap);        /* will free amap */
     478           0 : }
     479             : 
     480             : /*
     481             :  * amap_copy: ensure that a map entry's "needs_copy" flag is false
     482             :  *      by copying the amap if necessary.
     483             :  * 
     484             :  * => an entry with a null amap pointer will get a new (blank) one.
     485             :  * => if canchunk is true, then we may clip the entry into a chunk
     486             :  * => "startva" and "endva" are used only if canchunk is true.  they are
     487             :  *     used to limit chunking (e.g. if you have a large space that you
     488             :  *     know you are going to need to allocate amaps for, there is no point
     489             :  *     in allowing that to be chunked)
     490             :  */
     491             : 
     492             : void
     493           0 : amap_copy(struct vm_map *map, struct vm_map_entry *entry, int waitf,
     494             :     boolean_t canchunk, vaddr_t startva, vaddr_t endva)
     495             : {
     496             :         struct vm_amap *amap, *srcamap;
     497             :         int slots, lcv, lazyalloc = 0;
     498             :         vaddr_t chunksize;
     499             :         int i, j, k, n, srcslot;
     500             :         struct vm_amap_chunk *chunk = NULL, *srcchunk = NULL;
     501             : 
     502             :         /* is there a map to copy?   if not, create one from scratch. */
     503           0 :         if (entry->aref.ar_amap == NULL) {
     504             :                 /*
     505             :                  * check to see if we have a large amap that we can
     506             :                  * chunk.  we align startva/endva to chunk-sized
     507             :                  * boundaries and then clip to them.
     508             :                  *
     509             :                  * if we cannot chunk the amap, allocate it in a way
     510             :                  * that makes it grow or shrink dynamically with
     511             :                  * the number of slots.
     512             :                  */
     513           0 :                 if (atop(entry->end - entry->start) >= UVM_AMAP_LARGE) {
     514           0 :                         if (canchunk) {
     515             :                                 /* convert slots to bytes */
     516             :                                 chunksize = UVM_AMAP_CHUNK << PAGE_SHIFT;
     517           0 :                                 startva = (startva / chunksize) * chunksize;
     518           0 :                                 endva = roundup(endva, chunksize);
     519           0 :                                 UVM_MAP_CLIP_START(map, entry, startva);
     520             :                                 /* watch out for endva wrap-around! */
     521           0 :                                 if (endva >= startva)
     522           0 :                                         UVM_MAP_CLIP_END(map, entry, endva);
     523             :                         } else
     524             :                                 lazyalloc = 1;
     525             :                 }
     526             : 
     527           0 :                 entry->aref.ar_pageoff = 0;
     528           0 :                 entry->aref.ar_amap = amap_alloc(entry->end - entry->start,
     529             :                     waitf, lazyalloc);
     530           0 :                 if (entry->aref.ar_amap != NULL)
     531           0 :                         entry->etype &= ~UVM_ET_NEEDSCOPY;
     532           0 :                 return;
     533             :         }
     534             : 
     535             :         /*
     536             :          * first check and see if we are the only map entry
     537             :          * referencing the amap we currently have.  if so, then we can
     538             :          * just take it over rather than copying it.  the value can only
     539             :          * be one if we have the only reference to the amap
     540             :          */
     541           0 :         if (entry->aref.ar_amap->am_ref == 1) {
     542           0 :                 entry->etype &= ~UVM_ET_NEEDSCOPY;
     543           0 :                 return;
     544             :         }
     545             : 
     546             :         /* looks like we need to copy the map. */
     547           0 :         AMAP_B2SLOT(slots, entry->end - entry->start);
     548           0 :         if (!UVM_AMAP_SMALL(entry->aref.ar_amap) &&
     549           0 :             entry->aref.ar_amap->am_hashshift != 0)
     550           0 :                 lazyalloc = 1;
     551           0 :         amap = amap_alloc1(slots, waitf, lazyalloc);
     552           0 :         if (amap == NULL)
     553           0 :                 return;
     554           0 :         srcamap = entry->aref.ar_amap;
     555             : 
     556             :         /*
     557             :          * need to double check reference count now.  the reference count
     558             :          * could have changed while we were in malloc.  if the reference count
     559             :          * dropped down to one we take over the old map rather than
     560             :          * copying the amap.
     561             :          */
     562           0 :         if (srcamap->am_ref == 1) {          /* take it over? */
     563           0 :                 entry->etype &= ~UVM_ET_NEEDSCOPY;
     564           0 :                 amap->am_ref--;              /* drop final reference to map */
     565           0 :                 amap_free(amap);        /* dispose of new (unused) amap */
     566           0 :                 return;
     567             :         }
     568             : 
     569             :         /* we must copy it now. */
     570           0 :         for (lcv = 0; lcv < slots; lcv += n) {
     571           0 :                 srcslot = entry->aref.ar_pageoff + lcv;
     572           0 :                 i = UVM_AMAP_SLOTIDX(lcv);
     573           0 :                 j = UVM_AMAP_SLOTIDX(srcslot);
     574             :                 n = UVM_AMAP_CHUNK;
     575           0 :                 if (i > j)
     576           0 :                         n -= i;
     577             :                 else
     578           0 :                         n -= j;
     579           0 :                 if (lcv + n > slots)
     580           0 :                         n = slots - lcv;
     581             : 
     582           0 :                 srcchunk = amap_chunk_get(srcamap, srcslot, 0, PR_NOWAIT);
     583           0 :                 if (srcchunk == NULL)
     584             :                         continue;
     585             : 
     586           0 :                 chunk = amap_chunk_get(amap, lcv, 1, PR_NOWAIT);
     587           0 :                 if (chunk == NULL) {
     588           0 :                         amap->am_ref = 0;
     589           0 :                         amap_wipeout(amap);
     590           0 :                         return;
     591             :                 }
     592             : 
     593           0 :                 for (k = 0; k < n; i++, j++, k++) {
     594           0 :                         chunk->ac_anon[i] = srcchunk->ac_anon[j];
     595           0 :                         if (chunk->ac_anon[i] == NULL)
     596             :                                 continue;
     597             : 
     598           0 :                         chunk->ac_usedmap |= (1 << i);
     599           0 :                         chunk->ac_anon[i]->an_ref++;
     600           0 :                         amap->am_nused++;
     601           0 :                 }
     602             :         }
     603             : 
     604             :         /*
     605             :          * drop our reference to the old amap (srcamap).
     606             :          * we know that the reference count on srcamap is greater than
     607             :          * one (we checked above), so there is no way we could drop
     608             :          * the count to zero.  [and no need to worry about freeing it]
     609             :          */
     610           0 :         srcamap->am_ref--;
     611           0 :         if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0)
     612           0 :                 srcamap->am_flags &= ~AMAP_SHARED;   /* clear shared flag */
     613             : #ifdef UVM_AMAP_PPREF
     614           0 :         if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
     615           0 :                 amap_pp_adjref(srcamap, entry->aref.ar_pageoff, 
     616           0 :                     (entry->end - entry->start) >> PAGE_SHIFT, -1);
     617           0 :         }
     618             : #endif
     619             : 
     620             :         /* install new amap. */
     621           0 :         entry->aref.ar_pageoff = 0;
     622           0 :         entry->aref.ar_amap = amap;
     623           0 :         entry->etype &= ~UVM_ET_NEEDSCOPY;
     624             : 
     625           0 :         amap_list_insert(amap);
     626           0 : }
     627             : 
     628             : /*
     629             :  * amap_cow_now: resolve all copy-on-write faults in an amap now for fork(2)
     630             :  *
     631             :  *      called during fork(2) when the parent process has a wired map
     632             :  *      entry.   in that case we want to avoid write-protecting pages
     633             :  *      in the parent's map (e.g. like what you'd do for a COW page)
     634             :  *      so we resolve the COW here.
     635             :  *
     636             :  * => assume parent's entry was wired, thus all pages are resident.
     637             :  * => caller passes child's map/entry in to us
     638             :  * => XXXCDC: out of memory should cause fork to fail, but there is
     639             :  *      currently no easy way to do this (needs fix)
     640             :  */
     641             : 
     642             : void
     643           0 : amap_cow_now(struct vm_map *map, struct vm_map_entry *entry)
     644             : {
     645           0 :         struct vm_amap *amap = entry->aref.ar_amap;
     646             :         int slot;
     647             :         struct vm_anon *anon, *nanon;
     648             :         struct vm_page *pg, *npg;
     649           0 :         struct vm_amap_chunk *chunk;
     650             : 
     651             :         /*
     652             :          * note that if we wait, we must ReStart the "lcv" for loop because
     653             :          * some other process could reorder the anon's in the
     654             :          * am_anon[] array on us.
     655             :          */
     656             : ReStart:
     657           0 :         AMAP_CHUNK_FOREACH(chunk, amap) {
     658           0 :                 int i, map = chunk->ac_usedmap;
     659             : 
     660           0 :                 for (i = ffs(map); i != 0; i = ffs(map)) {
     661           0 :                         slot = i - 1;
     662           0 :                         map ^= 1 << slot;
     663           0 :                         anon = chunk->ac_anon[slot];
     664           0 :                         pg = anon->an_page;
     665             : 
     666             :                         /* page must be resident since parent is wired */
     667           0 :                         if (pg == NULL)
     668           0 :                                 panic("amap_cow_now: non-resident wired page"
     669             :                                     " in anon %p", anon);
     670             : 
     671             :                         /*
     672             :                          * if the anon ref count is one, we are safe (the child
     673             :                          * has exclusive access to the page).
     674             :                          */
     675           0 :                         if (anon->an_ref <= 1)
     676             :                                 continue;
     677             : 
     678             :                         /*
     679             :                          * if the page is busy then we have to wait for
     680             :                          * it and then restart.
     681             :                          */
     682           0 :                         if (pg->pg_flags & PG_BUSY) {
     683           0 :                                 atomic_setbits_int(&pg->pg_flags, PG_WANTED);
     684           0 :                                 UVM_WAIT(pg, FALSE, "cownow", 0);
     685           0 :                                 goto ReStart;
     686             :                         }
     687             : 
     688             :                         /* ok, time to do a copy-on-write to a new anon */
     689           0 :                         nanon = uvm_analloc();
     690           0 :                         if (nanon) {
     691           0 :                                 npg = uvm_pagealloc(NULL, 0, nanon, 0);
     692           0 :                         } else
     693             :                                 npg = NULL;     /* XXX: quiet gcc warning */
     694             : 
     695           0 :                         if (nanon == NULL || npg == NULL) {
     696             :                                 /* out of memory */
     697             :                                 /*
     698             :                                  * XXXCDC: we should cause fork to fail, but
     699             :                                  * we can't ...
     700             :                                  */
     701           0 :                                 if (nanon) {
     702           0 :                                         uvm_anfree(nanon);
     703           0 :                                 }
     704           0 :                                 uvm_wait("cownowpage");
     705           0 :                                 goto ReStart;
     706             :                         }
     707             : 
     708             :                         /*
     709             :                          * got it... now we can copy the data and replace anon
     710             :                          * with our new one...
     711             :                          */
     712           0 :                         uvm_pagecopy(pg, npg);          /* old -> new */
     713           0 :                         anon->an_ref--;                      /* can't drop to zero */
     714           0 :                         chunk->ac_anon[slot] = nanon;        /* replace */
     715             : 
     716             :                         /*
     717             :                          * drop PG_BUSY on new page ... since we have had its
     718             :                          * owner locked the whole time it can't be
     719             :                          * PG_RELEASED | PG_WANTED.
     720             :                          */
     721           0 :                         atomic_clearbits_int(&npg->pg_flags, PG_BUSY|PG_FAKE);
     722             :                         UVM_PAGE_OWN(npg, NULL);
     723           0 :                         uvm_lock_pageq();
     724           0 :                         uvm_pageactivate(npg);
     725           0 :                         uvm_unlock_pageq();
     726           0 :                 }
     727           0 :         }
     728           0 : }
     729             : 
     730             : /*
     731             :  * amap_splitref: split a single reference into two separate references
     732             :  *
     733             :  * => called from uvm_map's clip routines
     734             :  */
     735             : void
     736           0 : amap_splitref(struct vm_aref *origref, struct vm_aref *splitref, vaddr_t offset)
     737             : {
     738             :         int leftslots;
     739             : 
     740           0 :         AMAP_B2SLOT(leftslots, offset);
     741           0 :         if (leftslots == 0)
     742           0 :                 panic("amap_splitref: split at zero offset");
     743             : 
     744             :         /* now: we have a valid am_mapped array. */
     745           0 :         if (origref->ar_amap->am_nslot - origref->ar_pageoff - leftslots <= 0)
     746           0 :                 panic("amap_splitref: map size check failed");
     747             : 
     748             : #ifdef UVM_AMAP_PPREF
     749             :         /* establish ppref before we add a duplicate reference to the amap */
     750           0 :         if (origref->ar_amap->am_ppref == NULL)
     751           0 :                 amap_pp_establish(origref->ar_amap);
     752             : #endif
     753             : 
     754           0 :         splitref->ar_amap = origref->ar_amap;
     755           0 :         splitref->ar_amap->am_ref++;              /* not a share reference */
     756           0 :         splitref->ar_pageoff = origref->ar_pageoff + leftslots;
     757           0 : }
     758             : 
     759             : #ifdef UVM_AMAP_PPREF
     760             : 
     761             : /*
     762             :  * amap_pp_establish: add a ppref array to an amap, if possible
     763             :  */
     764             : void
     765           0 : amap_pp_establish(struct vm_amap *amap)
     766             : {
     767             : 
     768           0 :         amap->am_ppref = mallocarray(amap->am_nslot, sizeof(int),
     769             :             M_UVMAMAP, M_NOWAIT|M_ZERO);
     770             : 
     771             :         /* if we fail then we just won't use ppref for this amap */
     772           0 :         if (amap->am_ppref == NULL) {
     773           0 :                 amap->am_ppref = PPREF_NONE; /* not using it */
     774           0 :                 return;
     775             :         }
     776             : 
     777             :         /* init ppref */
     778           0 :         pp_setreflen(amap->am_ppref, 0, amap->am_ref, amap->am_nslot);
     779           0 : }
     780             : 
     781             : /*
     782             :  * amap_pp_adjref: adjust reference count to a part of an amap using the
     783             :  * per-page reference count array.
     784             :  *
     785             :  * => caller must check that ppref != PPREF_NONE before calling
     786             :  */
     787             : void
     788           0 : amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
     789         117 : {
     790             :         int stopslot, *ppref, lcv, prevlcv;
     791           0 :         int ref, len, prevref, prevlen;
     792             : 
     793           0 :         stopslot = curslot + slotlen;
     794           0 :         ppref = amap->am_ppref;
     795             :         prevlcv = 0;
     796             : 
     797             :         /*
     798             :          * first advance to the correct place in the ppref array,
     799             :          * fragment if needed.
     800             :          */
     801         117 :         for (lcv = 0 ; lcv < curslot ; lcv += len) {
     802           0 :                 pp_getreflen(ppref, lcv, &ref, &len);
     803           0 :                 if (lcv + len > curslot) {     /* goes past start? */
     804           0 :                         pp_setreflen(ppref, lcv, ref, curslot - lcv);
     805           0 :                         pp_setreflen(ppref, curslot, ref, len - (curslot -lcv));
     806           0 :                         len = curslot - lcv;   /* new length of entry @ lcv */
     807           0 :                 }
     808             :                 prevlcv = lcv;
     809             :         }
     810           0 :         if (lcv != 0)
     811           0 :                 pp_getreflen(ppref, prevlcv, &prevref, &prevlen);
     812             :         else {
     813             :                 /* Ensure that the "prevref == ref" test below always
     814             :                  * fails, since we're starting from the beginning of
     815             :                  * the ppref array; that is, there is no previous
     816             :                  * chunk.  
     817             :                  */
     818           0 :                 prevref = -1;
     819           0 :                 prevlen = 0;
     820             :         }
     821             : 
     822             :         /*
     823             :          * now adjust reference counts in range.  merge the first
     824             :          * changed entry with the last unchanged entry if possible.
     825             :          */
     826           0 :         if (lcv != curslot)
     827           0 :                 panic("amap_pp_adjref: overshot target");
     828             : 
     829         230 :         for (/* lcv already set */; lcv < stopslot ; lcv += len) {
     830           0 :                 pp_getreflen(ppref, lcv, &ref, &len);
     831           0 :                 if (lcv + len > stopslot) {     /* goes past end? */
     832           0 :                         pp_setreflen(ppref, lcv, ref, stopslot - lcv);
     833           0 :                         pp_setreflen(ppref, stopslot, ref,
     834           0 :                             len - (stopslot - lcv));
     835           0 :                         len = stopslot - lcv;
     836           0 :                 }
     837           0 :                 ref += adjval;
     838         116 :                 if (ref < 0)
     839           0 :                         panic("amap_pp_adjref: negative reference count");
     840           0 :                 if (lcv == prevlcv + prevlen && ref == prevref) {
     841           0 :                         pp_setreflen(ppref, prevlcv, ref, prevlen + len);
     842           0 :                 } else {
     843           0 :                         pp_setreflen(ppref, lcv, ref, len);
     844             :                 }
     845           0 :                 if (ref == 0)
     846           0 :                         amap_wiperange(amap, lcv, len);
     847             :         }
     848             : 
     849           0 : }
     850             : 
     851             : void
     852           0 : amap_wiperange_chunk(struct vm_amap *amap, struct vm_amap_chunk *chunk,
     853             :     int slotoff, int slots)
     854             : {
     855             :         int curslot, i, map;
     856             :         int startbase, endbase;
     857             :         struct vm_anon *anon;
     858             : 
     859           0 :         startbase = AMAP_BASE_SLOT(slotoff);
     860           0 :         endbase = AMAP_BASE_SLOT(slotoff + slots - 1);
     861             : 
     862           0 :         map = chunk->ac_usedmap;
     863           0 :         if (startbase == chunk->ac_baseslot)
     864           0 :                 map &= ~((1 << (slotoff - startbase)) - 1);
     865           0 :         if (endbase == chunk->ac_baseslot)
     866           0 :                 map &= (1 << (slotoff + slots - endbase)) - 1;
     867             : 
     868           0 :         for (i = ffs(map); i != 0; i = ffs(map)) {
     869             :                 int refs;
     870             : 
     871           0 :                 curslot = i - 1;
     872           0 :                 map ^= 1 << curslot;
     873           0 :                 chunk->ac_usedmap ^= 1 << curslot;
     874           0 :                 anon = chunk->ac_anon[curslot];
     875             : 
     876             :                 /* remove it from the amap */
     877           0 :                 chunk->ac_anon[curslot] = NULL;
     878             : 
     879           0 :                 amap->am_nused--;
     880             : 
     881             :                 /* drop anon reference count */
     882           0 :                 refs = --anon->an_ref;
     883           0 :                 if (refs == 0) {
     884             :                         /*
     885             :                          * we just eliminated the last reference to an
     886             :                          * anon.  free it.
     887             :                          */
     888           0 :                         uvm_anfree(anon);
     889           0 :                 }
     890             :         }
     891           0 : }
     892             : 
     893             : /*
     894             :  * amap_wiperange: wipe out a range of an amap
     895             :  * [different from amap_wipeout because the amap is kept intact]
     896             :  */
     897             : void
     898           0 : amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
     899             : {
     900             :         int bucket, startbucket, endbucket;
     901             :         struct vm_amap_chunk *chunk, *nchunk;
     902             : 
     903           0 :         startbucket = UVM_AMAP_BUCKET(amap, slotoff);
     904           0 :         endbucket = UVM_AMAP_BUCKET(amap, slotoff + slots - 1);
     905             : 
     906             :         /*
     907             :          * we can either traverse the amap by am_chunks or by am_buckets
     908             :          * depending on which is cheaper.    decide now.
     909             :          */
     910           0 :         if (UVM_AMAP_SMALL(amap))
     911           0 :                 amap_wiperange_chunk(amap, &amap->am_small, slotoff, slots);
     912           0 :         else if (endbucket + 1 - startbucket >= amap->am_ncused) {
     913           0 :                 TAILQ_FOREACH_SAFE(chunk, &amap->am_chunks, ac_list, nchunk) {
     914           0 :                         if (chunk->ac_baseslot + chunk->ac_nslot <= slotoff)
     915             :                                 continue;
     916           0 :                         if (chunk->ac_baseslot >= slotoff + slots)
     917             :                                 continue;
     918             : 
     919           0 :                         amap_wiperange_chunk(amap, chunk, slotoff, slots);
     920           0 :                         if (chunk->ac_usedmap == 0)
     921           0 :                                 amap_chunk_free(amap, chunk);
     922             :                 }
     923             :         } else {
     924           0 :                 for (bucket = startbucket; bucket <= endbucket; bucket++) {
     925           0 :                         for (chunk = amap->am_buckets[bucket]; chunk != NULL;
     926             :                             chunk = nchunk) {
     927           0 :                                 nchunk = TAILQ_NEXT(chunk, ac_list);
     928             : 
     929           0 :                                 if (UVM_AMAP_BUCKET(amap, chunk->ac_baseslot) !=
     930             :                                     bucket)
     931             :                                         break;
     932           0 :                                 if (chunk->ac_baseslot + chunk->ac_nslot <=
     933             :                                     slotoff)
     934             :                                         continue;
     935           0 :                                 if (chunk->ac_baseslot >= slotoff + slots)
     936             :                                         continue;
     937             : 
     938           0 :                                 amap_wiperange_chunk(amap, chunk, slotoff,
     939             :                                     slots);
     940           0 :                                 if (chunk->ac_usedmap == 0)
     941           0 :                                         amap_chunk_free(amap, chunk);
     942             :                         }
     943             :                 }
     944             :         }
     945           0 : }
     946             : 
     947             : #endif
     948             : 
     949             : /*
     950             :  * amap_swap_off: pagein anonymous pages in amaps and drop swap slots.
     951             :  *
     952             :  * => note that we don't always traverse all anons.
     953             :  *    eg. amaps being wiped out, released anons.
     954             :  * => return TRUE if failed.
     955             :  */
     956             : 
     957             : boolean_t
     958           0 : amap_swap_off(int startslot, int endslot)
     959             : {
     960             :         struct vm_amap *am;
     961             :         struct vm_amap *am_next;
     962             :         boolean_t rv = FALSE;
     963             : 
     964           0 :         for (am = LIST_FIRST(&amap_list); am != NULL && !rv; am = am_next) {
     965             :                 int i, map;
     966           0 :                 struct vm_amap_chunk *chunk;
     967             : 
     968             : again:
     969           0 :                 AMAP_CHUNK_FOREACH(chunk, am) {
     970           0 :                         map = chunk->ac_usedmap;
     971             : 
     972           0 :                         for (i = ffs(map); i != 0; i = ffs(map)) {
     973             :                                 int swslot;
     974           0 :                                 int slot = i - 1;
     975             :                                 struct vm_anon *anon;
     976             : 
     977           0 :                                 map ^= 1 << slot;
     978           0 :                                 anon = chunk->ac_anon[slot];
     979             : 
     980           0 :                                 swslot = anon->an_swslot;
     981           0 :                                 if (swslot < startslot || endslot <= swslot) {
     982           0 :                                         continue;
     983             :                                 }
     984             : 
     985           0 :                                 am->am_flags |= AMAP_SWAPOFF;
     986             : 
     987           0 :                                 rv = uvm_anon_pagein(anon);
     988             : 
     989           0 :                                 am->am_flags &= ~AMAP_SWAPOFF;
     990           0 :                                 if (rv || amap_refs(am) == 0)
     991           0 :                                         goto nextamap;
     992           0 :                                 goto again;
     993             :                         }
     994             :                 }
     995             : 
     996             : nextamap:
     997           0 :                 am_next = LIST_NEXT(am, am_list);
     998           0 :                 if (amap_refs(am) == 0)
     999           0 :                         amap_wipeout(am);
    1000             :         }
    1001             : 
    1002           0 :         return rv;
    1003             : }
    1004             : 
    1005             : /*
    1006             :  * amap_lookup: look up a page in an amap
    1007             :  */
    1008             : struct vm_anon *
    1009           0 : amap_lookup(struct vm_aref *aref, vaddr_t offset)
    1010             : {
    1011             :         int slot;
    1012           0 :         struct vm_amap *amap = aref->ar_amap;
    1013             :         struct vm_amap_chunk *chunk;
    1014             : 
    1015           0 :         AMAP_B2SLOT(slot, offset);
    1016           0 :         slot += aref->ar_pageoff;
    1017             : 
    1018           0 :         if (slot >= amap->am_nslot)
    1019           0 :                 panic("amap_lookup: offset out of range");
    1020             : 
    1021           0 :         chunk = amap_chunk_get(amap, slot, 0, PR_NOWAIT);
    1022           0 :         if (chunk == NULL)
    1023           0 :                 return NULL;
    1024             : 
    1025           0 :         return chunk->ac_anon[UVM_AMAP_SLOTIDX(slot)];
    1026           0 : }
    1027             : 
    1028             : /*
    1029             :  * amap_lookups: look up a range of pages in an amap
    1030             :  *
    1031             :  * => XXXCDC: this interface is biased toward array-based amaps.  fix.
    1032             :  */
    1033             : void
    1034           0 : amap_lookups(struct vm_aref *aref, vaddr_t offset,
    1035             :     struct vm_anon **anons, int npages)
    1036             : {
    1037             :         int i, lcv, n, slot;
    1038           0 :         struct vm_amap *amap = aref->ar_amap;
    1039             :         struct vm_amap_chunk *chunk = NULL;
    1040             : 
    1041          60 :         AMAP_B2SLOT(slot, offset);
    1042           0 :         slot += aref->ar_pageoff;
    1043             : 
    1044           0 :         if ((slot + (npages - 1)) >= amap->am_nslot)
    1045           0 :                 panic("amap_lookups: offset out of range");
    1046             : 
    1047           0 :         for (i = 0, lcv = slot; lcv < slot + npages; i += n, lcv += n) {
    1048           0 :                 n = UVM_AMAP_CHUNK - UVM_AMAP_SLOTIDX(lcv);
    1049           0 :                 if (lcv + n > slot + npages)
    1050           0 :                         n = slot + npages - lcv;
    1051             : 
    1052           0 :                 chunk = amap_chunk_get(amap, lcv, 0, PR_NOWAIT);
    1053           0 :                 if (chunk == NULL)
    1054           0 :                         memset(&anons[i], 0, n * sizeof(*anons));
    1055             :                 else
    1056           0 :                         memcpy(&anons[i],
    1057             :                             &chunk->ac_anon[UVM_AMAP_SLOTIDX(lcv)],
    1058             :                             n * sizeof(*anons));
    1059             :         }
    1060          60 : }
    1061             : 
    1062             : /*
    1063             :  * amap_populate: ensure that the amap can store an anon for the page at
    1064             :  * offset. This function can sleep until memory to store the anon is
    1065             :  * available.
    1066             :  */
    1067             : void
    1068           0 : amap_populate(struct vm_aref *aref, vaddr_t offset)
    1069             : {
    1070             :         int slot;
    1071           0 :         struct vm_amap *amap = aref->ar_amap;
    1072             :         struct vm_amap_chunk *chunk;
    1073             : 
    1074           0 :         AMAP_B2SLOT(slot, offset);
    1075           0 :         slot += aref->ar_pageoff;
    1076             : 
    1077           0 :         if (slot >= amap->am_nslot)
    1078           0 :                 panic("amap_populate: offset out of range");
    1079             : 
    1080           0 :         chunk = amap_chunk_get(amap, slot, 1, PR_WAITOK);
    1081           0 :         KASSERT(chunk != NULL);
    1082           0 : }
    1083             : 
    1084             : /*
    1085             :  * amap_add: add (or replace) a page to an amap
    1086             :  *
    1087             :  * => returns 0 if adding the page was successful or 1 when not.
    1088             :  */
    1089             : int
    1090           0 : amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
    1091             :     boolean_t replace)
    1092             : {
    1093             :         int slot;
    1094           0 :         struct vm_amap *amap = aref->ar_amap;
    1095             :         struct vm_amap_chunk *chunk;
    1096             : 
    1097           0 :         AMAP_B2SLOT(slot, offset);
    1098           0 :         slot += aref->ar_pageoff;
    1099             : 
    1100           0 :         if (slot >= amap->am_nslot)
    1101           0 :                 panic("amap_add: offset out of range");
    1102           0 :         chunk = amap_chunk_get(amap, slot, 1, PR_NOWAIT);
    1103           0 :         if (chunk == NULL)
    1104           0 :                 return 1;
    1105             : 
    1106           0 :         slot = UVM_AMAP_SLOTIDX(slot);
    1107           0 :         if (replace) {
    1108           0 :                 if (chunk->ac_anon[slot] == NULL)
    1109           0 :                         panic("amap_add: replacing null anon");
    1110           0 :                 if (chunk->ac_anon[slot]->an_page != NULL &&
    1111           0 :                     (amap->am_flags & AMAP_SHARED) != 0) {
    1112           0 :                         pmap_page_protect(chunk->ac_anon[slot]->an_page,
    1113             :                             PROT_NONE);
    1114             :                         /*
    1115             :                          * XXX: suppose page is supposed to be wired somewhere?
    1116             :                          */
    1117           0 :                 }
    1118             :         } else {   /* !replace */
    1119           0 :                 if (chunk->ac_anon[slot] != NULL)
    1120           0 :                         panic("amap_add: slot in use");
    1121             : 
    1122           0 :                 chunk->ac_usedmap |= 1 << slot;
    1123           0 :                 amap->am_nused++;
    1124             :         }
    1125           0 :         chunk->ac_anon[slot] = anon;
    1126             : 
    1127           0 :         return 0;
    1128           0 : }
    1129             : 
    1130             : /*
    1131             :  * amap_unadd: remove a page from an amap
    1132             :  */
    1133             : void
    1134           0 : amap_unadd(struct vm_aref *aref, vaddr_t offset)
    1135             : {
    1136             :         int slot;
    1137           0 :         struct vm_amap *amap = aref->ar_amap;
    1138             :         struct vm_amap_chunk *chunk;
    1139             : 
    1140           0 :         AMAP_B2SLOT(slot, offset);
    1141           0 :         slot += aref->ar_pageoff;
    1142             : 
    1143           0 :         if (slot >= amap->am_nslot)
    1144           0 :                 panic("amap_unadd: offset out of range");
    1145           0 :         chunk = amap_chunk_get(amap, slot, 0, PR_NOWAIT);
    1146           0 :         if (chunk == NULL)
    1147           0 :                 panic("amap_unadd: chunk for slot %d not present", slot);
    1148             : 
    1149           0 :         slot = UVM_AMAP_SLOTIDX(slot);
    1150           0 :         if (chunk->ac_anon[slot] == NULL)
    1151           0 :                 panic("amap_unadd: nothing there");
    1152             : 
    1153           0 :         chunk->ac_anon[slot] = NULL;
    1154           0 :         chunk->ac_usedmap &= ~(1 << slot);
    1155           0 :         amap->am_nused--;
    1156             : 
    1157           0 :         if (chunk->ac_usedmap == 0)
    1158           0 :                 amap_chunk_free(amap, chunk);
    1159           0 : }
    1160             : 
    1161             : /*
    1162             :  * amap_ref: gain a reference to an amap
    1163             :  *
    1164             :  * => "offset" and "len" are in units of pages
    1165             :  * => called at fork time to gain the child's reference
    1166             :  */
    1167             : void
    1168           0 : amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
    1169             : {
    1170             : 
    1171          60 :         amap->am_ref++;
    1172           0 :         if (flags & AMAP_SHARED)
    1173          60 :                 amap->am_flags |= AMAP_SHARED;
    1174             : #ifdef UVM_AMAP_PPREF
    1175          60 :         if (amap->am_ppref == NULL && (flags & AMAP_REFALL) == 0 &&
    1176           0 :             len != amap->am_nslot)
    1177           0 :                 amap_pp_establish(amap);
    1178           0 :         if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
    1179           0 :                 if (flags & AMAP_REFALL)
    1180          60 :                         amap_pp_adjref(amap, 0, amap->am_nslot, 1);
    1181             :                 else
    1182           0 :                         amap_pp_adjref(amap, offset, len, 1);
    1183             :         }
    1184             : #endif
    1185           0 : }
    1186             : 
    1187             : /*
    1188             :  * amap_unref: remove a reference to an amap
    1189             :  *
    1190             :  * => caller must remove all pmap-level references to this amap before
    1191             :  *      dropping the reference
    1192             :  * => called from uvm_unmap_detach [only]  ... note that entry is no
    1193             :  *      longer part of a map
    1194             :  */
    1195             : void
    1196           0 : amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, boolean_t all)
    1197             : {
    1198             : 
    1199             :         /* if we are the last reference, free the amap and return. */
    1200          58 :         if (amap->am_ref-- == 1) {
    1201           0 :                 amap_wipeout(amap);     /* drops final ref and frees */
    1202           0 :                 return;
    1203             :         }
    1204             : 
    1205             :         /* otherwise just drop the reference count(s) */
    1206           0 :         if (amap->am_ref == 1 && (amap->am_flags & AMAP_SHARED) != 0)
    1207           0 :                 amap->am_flags &= ~AMAP_SHARED;  /* clear shared flag */
    1208             : #ifdef UVM_AMAP_PPREF
    1209         114 :         if (amap->am_ppref == NULL && all == 0 && len != amap->am_nslot)
    1210           0 :                 amap_pp_establish(amap);
    1211           0 :         if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
    1212           0 :                 if (all)
    1213          57 :                         amap_pp_adjref(amap, 0, amap->am_nslot, -1);
    1214             :                 else
    1215           0 :                         amap_pp_adjref(amap, offset, len, -1);
    1216             :         }
    1217             : #endif
    1218           0 : }

Generated by: LCOV version 1.13