LCOV - code coverage report
Current view: top level - uvm - uvm_device.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 97 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 5 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*      $OpenBSD: uvm_device.c,v 1.55 2018/08/20 10:00:04 kettenis Exp $        */
       2             : /*      $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $      */
       3             : 
       4             : /*
       5             :  * Copyright (c) 1997 Charles D. Cranor and Washington University.
       6             :  * All rights reserved.
       7             :  *
       8             :  * Redistribution and use in source and binary forms, with or without
       9             :  * modification, are permitted provided that the following conditions
      10             :  * are met:
      11             :  * 1. Redistributions of source code must retain the above copyright
      12             :  *    notice, this list of conditions and the following disclaimer.
      13             :  * 2. Redistributions in binary form must reproduce the above copyright
      14             :  *    notice, this list of conditions and the following disclaimer in the
      15             :  *    documentation and/or other materials provided with the distribution.
      16             :  *
      17             :  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
      18             :  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
      19             :  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
      20             :  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
      21             :  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
      22             :  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
      23             :  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
      24             :  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
      25             :  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
      26             :  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      27             :  *
      28             :  * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
      29             :  */
      30             : 
      31             : /*
      32             :  * uvm_device.c: the device pager.
      33             :  */
      34             : 
      35             : #include <sys/param.h>
      36             : #include <sys/systm.h>
      37             : #include <sys/conf.h>
      38             : #include <sys/malloc.h>
      39             : #include <sys/mutex.h>
      40             : 
      41             : #include <uvm/uvm.h>
      42             : #include <uvm/uvm_device.h>
      43             : 
      44             : #if defined(__amd64__) || defined(__arm64__) || \
      45             :     defined(__i386__) || defined(__loongson__) || \
      46             :     defined(__macppc__) || defined(__sparc64__)
      47             : #include "drm.h"
      48             : #endif
      49             : 
      50             : /*
      51             :  * private global data structure
      52             :  *
      53             :  * we keep a list of active device objects in the system.
      54             :  */
      55             : 
      56             : LIST_HEAD(, uvm_device) udv_list = LIST_HEAD_INITIALIZER(udv_list);
      57             : struct mutex udv_lock = MUTEX_INITIALIZER(IPL_NONE);
      58             : 
      59             : /*
      60             :  * functions
      61             :  */
      62             : static void             udv_reference(struct uvm_object *);
      63             : static void             udv_detach(struct uvm_object *);
      64             : static int              udv_fault(struct uvm_faultinfo *, vaddr_t,
      65             :                                        vm_page_t *, int, int, vm_fault_t,
      66             :                                        vm_prot_t, int);
      67             : static boolean_t        udv_flush(struct uvm_object *, voff_t, voff_t,
      68             :                                        int);
      69             : 
      70             : /*
      71             :  * master pager structure
      72             :  */
      73             : struct uvm_pagerops uvm_deviceops = {
      74             :         NULL,           /* inited statically */
      75             :         udv_reference,
      76             :         udv_detach,
      77             :         udv_fault,
      78             :         udv_flush,
      79             : };
      80             : 
      81             : /*
      82             :  * udv_attach
      83             :  *
      84             :  * get a VM object that is associated with a device.   allocate a new
      85             :  * one if needed.
      86             :  *
      87             :  * => nothing should be locked so that we can sleep here.
      88             :  *
      89             :  * The last two arguments (off and size) are only used for access checking.
      90             :  */
      91             : struct uvm_object *
      92           0 : udv_attach(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
      93             : {
      94             :         struct uvm_device *udv, *lcv;
      95             :         paddr_t (*mapfn)(dev_t, off_t, int);
      96             : #if NDRM > 0
      97             :         struct uvm_object *obj;
      98             : #endif
      99             : 
     100             :         /* before we do anything, ensure this device supports mmap */
     101           0 :         mapfn = cdevsw[major(device)].d_mmap;
     102           0 :         if (mapfn == NULL ||
     103           0 :             mapfn == (paddr_t (*)(dev_t, off_t, int)) enodev ||
     104           0 :             mapfn == (paddr_t (*)(dev_t, off_t, int)) nullop)
     105           0 :                 return(NULL);
     106             : 
     107             :         /* Negative offsets on the object are not allowed. */
     108           0 :         if (off < 0)
     109           0 :                 return(NULL);
     110             : 
     111             : #if NDRM > 0
     112           0 :         obj = udv_attach_drm(device, accessprot, off, size);
     113           0 :         if (obj)
     114           0 :                 return(obj);
     115             : #endif
     116             : 
     117             :         /*
     118             :          * Check that the specified range of the device allows the
     119             :          * desired protection.
     120             :          * 
     121             :          * XXX clobbers off and size, but nothing else here needs them.
     122             :          */
     123           0 :         while (size != 0) {
     124           0 :                 if ((*mapfn)(device, off, accessprot) == -1)
     125           0 :                         return (NULL);
     126           0 :                 off += PAGE_SIZE; size -= PAGE_SIZE;
     127             :         }
     128             : 
     129             :         /* keep looping until we get it */
     130           0 :         for (;;) {
     131             :                 /* first, attempt to find it on the main list */
     132           0 :                 mtx_enter(&udv_lock);
     133           0 :                 LIST_FOREACH(lcv, &udv_list, u_list) {
     134           0 :                         if (device == lcv->u_device)
     135             :                                 break;
     136             :                 }
     137             : 
     138             :                 /* got it on main list.  put a hold on it and unlock udv_lock. */
     139           0 :                 if (lcv) {
     140             :                         /*
     141             :                          * if someone else has a hold on it, sleep and start
     142             :                          * over again. Else, we need take HOLD flag so we
     143             :                          * don't have to re-order locking here.
     144             :                          */
     145           0 :                         if (lcv->u_flags & UVM_DEVICE_HOLD) {
     146           0 :                                 lcv->u_flags |= UVM_DEVICE_WANTED;
     147           0 :                                 msleep(lcv, &udv_lock, PVM | PNORELOCK,
     148             :                                     "udv_attach", 0);
     149           0 :                                 continue;
     150             :                         }
     151             : 
     152             :                         /* we are now holding it */
     153           0 :                         lcv->u_flags |= UVM_DEVICE_HOLD;
     154           0 :                         mtx_leave(&udv_lock);
     155             : 
     156             :                         /* bump reference count, unhold, return. */
     157           0 :                         lcv->u_obj.uo_refs++;
     158             : 
     159           0 :                         mtx_enter(&udv_lock);
     160           0 :                         if (lcv->u_flags & UVM_DEVICE_WANTED)
     161           0 :                                 wakeup(lcv);
     162           0 :                         lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
     163           0 :                         mtx_leave(&udv_lock);
     164           0 :                         return(&lcv->u_obj);
     165             :                 }
     166             : 
     167             :                 /* did not find it on main list.   need to malloc a new one. */
     168           0 :                 mtx_leave(&udv_lock);
     169             :                 /* NOTE: we could sleep in the following malloc() */
     170           0 :                 udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK);
     171           0 :                 mtx_enter(&udv_lock);
     172             : 
     173             :                 /*
     174             :                  * now we have to double check to make sure no one added it
     175             :                  * to the list while we were sleeping...
     176             :                  */
     177           0 :                 LIST_FOREACH(lcv, &udv_list, u_list) {
     178           0 :                         if (device == lcv->u_device)
     179             :                                 break;
     180             :                 }
     181             : 
     182             :                 /*
     183             :                  * did we lose a race to someone else?
     184             :                  * free our memory and retry.
     185             :                  */
     186           0 :                 if (lcv) {
     187           0 :                         mtx_leave(&udv_lock);
     188           0 :                         free(udv, M_TEMP, sizeof(*udv));
     189           0 :                         continue;
     190             :                 }
     191             : 
     192             :                 /*
     193             :                  * we have it!   init the data structures, add to list
     194             :                  * and return.
     195             :                  */
     196           0 :                 uvm_objinit(&udv->u_obj, &uvm_deviceops, 1);
     197           0 :                 udv->u_flags = 0;
     198           0 :                 udv->u_device = device;
     199           0 :                 LIST_INSERT_HEAD(&udv_list, udv, u_list);
     200           0 :                 mtx_leave(&udv_lock);
     201           0 :                 return(&udv->u_obj);
     202             :         }
     203             :         /*NOTREACHED*/
     204           0 : }
     205             :         
     206             : /*
     207             :  * udv_reference
     208             :  *
     209             :  * add a reference to a VM object.   Note that the reference count must
     210             :  * already be one (the passed in reference) so there is no chance of the
     211             :  * udv being released or locked out here.
     212             :  */
     213             : static void
     214           0 : udv_reference(struct uvm_object *uobj)
     215             : {
     216             : 
     217           0 :         uobj->uo_refs++;
     218           0 : }
     219             : 
     220             : /*
     221             :  * udv_detach
     222             :  *
     223             :  * remove a reference to a VM object.
     224             :  */
     225             : static void
     226           0 : udv_detach(struct uvm_object *uobj)
     227             : {
     228           0 :         struct uvm_device *udv = (struct uvm_device *)uobj;
     229             : 
     230             :         /* loop until done */
     231             : again:
     232           0 :         if (uobj->uo_refs > 1) {
     233           0 :                 uobj->uo_refs--;
     234           0 :                 return;
     235             :         }
     236           0 :         KASSERT(uobj->uo_npages == 0 && RBT_EMPTY(uvm_objtree, &uobj->memt));
     237             : 
     238             :         /* is it being held?   if so, wait until others are done. */
     239           0 :         mtx_enter(&udv_lock);
     240           0 :         if (udv->u_flags & UVM_DEVICE_HOLD) {
     241           0 :                 udv->u_flags |= UVM_DEVICE_WANTED;
     242             :                 /*
     243             :                  * lock interleaving. -- this is ok in this case since the
     244             :                  * locks are both IPL_NONE
     245             :                  */
     246           0 :                 msleep(udv, &udv_lock, PVM | PNORELOCK, "udv_detach", 0);
     247           0 :                 goto again;
     248             :         }
     249             : 
     250             :         /* got it!   nuke it now. */
     251           0 :         LIST_REMOVE(udv, u_list);
     252           0 :         if (udv->u_flags & UVM_DEVICE_WANTED)
     253           0 :                 wakeup(udv);
     254           0 :         mtx_leave(&udv_lock);
     255           0 :         free(udv, M_TEMP, sizeof(*udv));
     256           0 : }
     257             : 
     258             : 
     259             : /*
     260             :  * udv_flush
     261             :  *
     262             :  * flush pages out of a uvm object.   a no-op for devices.
     263             :  */
     264             : static boolean_t
     265           0 : udv_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
     266             : {
     267             : 
     268           0 :         return(TRUE);
     269             : }
     270             : 
     271             : /*
     272             :  * udv_fault: non-standard fault routine for device "pages"
     273             :  *
     274             :  * => rather than having a "get" function, we have a fault routine
     275             :  *      since we don't return vm_pages we need full control over the
     276             :  *      pmap_enter map in
     277             :  * => on return, we unlock all fault data structures
     278             :  * => flags: PGO_ALLPAGES: get all of the pages
     279             :  *           PGO_LOCKED: fault data structures are locked
     280             :  *    XXX: currently PGO_LOCKED is always required ... consider removing
     281             :  *      it as a flag
     282             :  * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
     283             :  */
     284             : static int
     285           0 : udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, int npages,
     286             :     int centeridx, vm_fault_t fault_type, vm_prot_t access_type, int flags)
     287             : {
     288           0 :         struct vm_map_entry *entry = ufi->entry;
     289           0 :         struct uvm_object *uobj = entry->object.uvm_obj;
     290           0 :         struct uvm_device *udv = (struct uvm_device *)uobj;
     291             :         vaddr_t curr_va;
     292             :         off_t curr_offset;
     293             :         paddr_t paddr;
     294             :         int lcv, retval;
     295             :         dev_t device;
     296             :         paddr_t (*mapfn)(dev_t, off_t, int);
     297             :         vm_prot_t mapprot;
     298             : 
     299             :         /*
     300             :          * we do not allow device mappings to be mapped copy-on-write
     301             :          * so we kill any attempt to do so here.
     302             :          */
     303           0 :         if (UVM_ET_ISCOPYONWRITE(entry)) {
     304           0 :                 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
     305           0 :                 return(VM_PAGER_ERROR);
     306             :         }
     307             : 
     308             :         /* get device map function. */
     309           0 :         device = udv->u_device;
     310           0 :         mapfn = cdevsw[major(device)].d_mmap;
     311             : 
     312             :         /*
     313             :          * now we must determine the offset in udv to use and the VA to
     314             :          * use for pmap_enter.  note that we always use orig_map's pmap
     315             :          * for pmap_enter (even if we have a submap).   since virtual
     316             :          * addresses in a submap must match the main map, this is ok.
     317             :          */
     318             :         /* udv offset = (offset from start of entry) + entry's offset */
     319           0 :         curr_offset = entry->offset + (vaddr - entry->start);
     320             :         /* pmap va = vaddr (virtual address of pps[0]) */
     321             :         curr_va = vaddr;
     322             :         
     323             :         /* loop over the page range entering in as needed */
     324             :         retval = VM_PAGER_OK;
     325           0 :         for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
     326           0 :             curr_va += PAGE_SIZE) {
     327           0 :                 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
     328             :                         continue;
     329             : 
     330           0 :                 if (pps[lcv] == PGO_DONTCARE)
     331             :                         continue;
     332             : 
     333           0 :                 paddr = (*mapfn)(device, curr_offset, access_type);
     334           0 :                 if (paddr == -1) {
     335             :                         retval = VM_PAGER_ERROR;
     336           0 :                         break;
     337             :                 }
     338           0 :                 mapprot = ufi->entry->protection;
     339           0 :                 if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr,
     340           0 :                     mapprot, PMAP_CANFAIL | mapprot) != 0) {
     341             :                         /*
     342             :                          * pmap_enter() didn't have the resource to
     343             :                          * enter this mapping.  Unlock everything,
     344             :                          * wait for the pagedaemon to free up some
     345             :                          * pages, and then tell uvm_fault() to start
     346             :                          * the fault again.
     347             :                          *
     348             :                          * XXX Needs some rethinking for the PGO_ALLPAGES
     349             :                          * XXX case.
     350             :                          */
     351           0 :                         uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
     352             :                             uobj, NULL);
     353             : 
     354             :                         /* sync what we have so far */
     355             :                         pmap_update(ufi->orig_map->pmap);      
     356           0 :                         uvm_wait("udv_fault");
     357           0 :                         return (VM_PAGER_REFAULT);
     358             :                 }
     359             :         }
     360             : 
     361           0 :         uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
     362             :         pmap_update(ufi->orig_map->pmap);
     363           0 :         return (retval);
     364           0 : }

Generated by: LCOV version 1.13