LCOV - code coverage report
Current view: top level - dev/pci/drm/ttm - ttm_bo_vm.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 110 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 6 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /**************************************************************************
       2             :  *
       3             :  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
       4             :  * All Rights Reserved.
       5             :  *
       6             :  * Permission is hereby granted, free of charge, to any person obtaining a
       7             :  * copy of this software and associated documentation files (the
       8             :  * "Software"), to deal in the Software without restriction, including
       9             :  * without limitation the rights to use, copy, modify, merge, publish,
      10             :  * distribute, sub license, and/or sell copies of the Software, and to
      11             :  * permit persons to whom the Software is furnished to do so, subject to
      12             :  * the following conditions:
      13             :  *
      14             :  * The above copyright notice and this permission notice (including the
      15             :  * next paragraph) shall be included in all copies or substantial portions
      16             :  * of the Software.
      17             :  *
      18             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      19             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      20             :  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
      21             :  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
      22             :  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
      23             :  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
      24             :  * USE OR OTHER DEALINGS IN THE SOFTWARE.
      25             :  *
      26             :  **************************************************************************/
      27             : /*
      28             :  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
      29             :  */
      30             : 
      31             : #define pr_fmt(fmt) "[TTM] " fmt
      32             : 
      33             : #include <dev/pci/drm/ttm/ttm_module.h>
      34             : #include <dev/pci/drm/ttm/ttm_bo_driver.h>
      35             : #include <dev/pci/drm/ttm/ttm_placement.h>
      36             : #include <dev/pci/drm/drm_vma_manager.h>
      37             : 
      38             : #define TTM_BO_VM_NUM_PREFAULT 16
      39             : 
      40           0 : static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo)
      41             : {
      42             :         int ret = 0;
      43             : 
      44           0 :         if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
      45             :                 goto out_unlock;
      46             : 
      47             :         /*
      48             :          * Quick non-stalling check for idle.
      49             :          */
      50           0 :         ret = ttm_bo_wait(bo, false, false, true);
      51           0 :         if (likely(ret == 0))
      52             :                 goto out_unlock;
      53             : 
      54             :         /*
      55             :          * If possible, avoid waiting for GPU with mmap_sem
      56             :          * held.
      57             :          */
      58             : #ifdef notyet
      59             :         if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
      60             :                 ret = VM_FAULT_RETRY;
      61             :                 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
      62             :                         goto out_unlock;
      63             : 
      64             :                 ttm_bo_reference(bo);
      65             :                 up_read(&vma->vm_mm->mmap_sem);
      66             :                 (void) ttm_bo_wait(bo, false, true, false);
      67             :                 ttm_bo_unreserve(bo);
      68             :                 ttm_bo_unref(&bo);
      69             :                 goto out_unlock;
      70             :         }
      71             : #endif
      72             : 
      73             :         /*
      74             :          * Ordinary wait.
      75             :          */
      76           0 :         ret = ttm_bo_wait(bo, false, true, false);
      77           0 :         if (unlikely(ret != 0))
      78           0 :                 ret = (ret != -ERESTARTSYS) ? VM_PAGER_ERROR :
      79             :                         VM_PAGER_REFAULT;
      80             : 
      81             : out_unlock:
      82           0 :         return ret;
      83             : }
      84             : 
      85             : #ifdef __linux__
      86             : static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
      87             : {
      88             :         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
      89             :             vma->vm_private_data;
      90             :         struct ttm_bo_device *bdev = bo->bdev;
      91             :         unsigned long page_offset;
      92             :         unsigned long page_last;
      93             :         unsigned long pfn;
      94             :         struct ttm_tt *ttm = NULL;
      95             :         struct vm_page *page;
      96             :         int ret;
      97             :         int i;
      98             :         unsigned long address = (unsigned long)vmf->virtual_address;
      99             :         int retval = VM_FAULT_NOPAGE;
     100             :         struct ttm_mem_type_manager *man =
     101             :                 &bdev->man[bo->mem.mem_type];
     102             :         struct vm_area_struct cvma;
     103             : 
     104             :         /*
     105             :          * Work around locking order reversal in fault / nopfn
     106             :          * between mmap_sem and bo_reserve: Perform a trylock operation
     107             :          * for reserve, and if it fails, retry the fault after waiting
     108             :          * for the buffer to become unreserved.
     109             :          */
     110             :         ret = ttm_bo_reserve(bo, true, true, false, NULL);
     111             :         if (unlikely(ret != 0)) {
     112             :                 if (ret != -EBUSY)
     113             :                         return VM_FAULT_NOPAGE;
     114             : 
     115             :                 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
     116             :                         if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
     117             :                                 ttm_bo_reference(bo);
     118             :                                 up_read(&vma->vm_mm->mmap_sem);
     119             :                                 (void) ttm_bo_wait_unreserved(bo);
     120             :                                 ttm_bo_unref(&bo);
     121             :                         }
     122             : 
     123             :                         return VM_FAULT_RETRY;
     124             :                 }
     125             : 
     126             :                 /*
     127             :                  * If we'd want to change locking order to
     128             :                  * mmap_sem -> bo::reserve, we'd use a blocking reserve here
     129             :                  * instead of retrying the fault...
     130             :                  */
     131             :                 return VM_FAULT_NOPAGE;
     132             :         }
     133             : 
     134             :         /*
     135             :          * Refuse to fault imported pages. This should be handled
     136             :          * (if at all) by redirecting mmap to the exporter.
     137             :          */
     138             :         if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
     139             :                 retval = VM_FAULT_SIGBUS;
     140             :                 goto out_unlock;
     141             :         }
     142             : 
     143             :         if (bdev->driver->fault_reserve_notify) {
     144             :                 ret = bdev->driver->fault_reserve_notify(bo);
     145             :                 switch (ret) {
     146             :                 case 0:
     147             :                         break;
     148             :                 case -EBUSY:
     149             :                 case -ERESTARTSYS:
     150             :                         retval = VM_FAULT_NOPAGE;
     151             :                         goto out_unlock;
     152             :                 default:
     153             :                         retval = VM_FAULT_SIGBUS;
     154             :                         goto out_unlock;
     155             :                 }
     156             :         }
     157             : 
     158             :         /*
     159             :          * Wait for buffer data in transit, due to a pipelined
     160             :          * move.
     161             :          */
     162             :         ret = ttm_bo_vm_fault_idle(bo);
     163             :         if (unlikely(ret != 0)) {
     164             :                 retval = ret;
     165             : 
     166             :                 if (retval == VM_FAULT_RETRY &&
     167             :                     !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
     168             :                         /* The BO has already been unreserved. */
     169             :                         return retval;
     170             :                 }
     171             : 
     172             :                 goto out_unlock;
     173             :         }
     174             : 
     175             :         ret = ttm_mem_io_lock(man, true);
     176             :         if (unlikely(ret != 0)) {
     177             :                 retval = VM_FAULT_NOPAGE;
     178             :                 goto out_unlock;
     179             :         }
     180             :         ret = ttm_mem_io_reserve_vm(bo);
     181             :         if (unlikely(ret != 0)) {
     182             :                 retval = VM_FAULT_SIGBUS;
     183             :                 goto out_io_unlock;
     184             :         }
     185             : 
     186             :         page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
     187             :                 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
     188             :         page_last = vma_pages(vma) + vma->vm_pgoff -
     189             :                 drm_vma_node_start(&bo->vma_node);
     190             : 
     191             :         if (unlikely(page_offset >= bo->num_pages)) {
     192             :                 retval = VM_FAULT_SIGBUS;
     193             :                 goto out_io_unlock;
     194             :         }
     195             : 
     196             :         /*
     197             :          * Make a local vma copy to modify the page_prot member
     198             :          * and vm_flags if necessary. The vma parameter is protected
     199             :          * by mmap_sem in write mode.
     200             :          */
     201             :         cvma = *vma;
     202             :         cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
     203             : 
     204             :         if (bo->mem.bus.is_iomem) {
     205             :                 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
     206             :                                                 cvma.vm_page_prot);
     207             :         } else {
     208             :                 ttm = bo->ttm;
     209             :                 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
     210             :                                                 cvma.vm_page_prot);
     211             : 
     212             :                 /* Allocate all page at once, most common usage */
     213             :                 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
     214             :                         retval = VM_FAULT_OOM;
     215             :                         goto out_io_unlock;
     216             :                 }
     217             :         }
     218             : 
     219             :         /*
     220             :          * Speculatively prefault a number of pages. Only error on
     221             :          * first page.
     222             :          */
     223             :         for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
     224             :                 if (bo->mem.bus.is_iomem)
     225             :                         pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
     226             :                 else {
     227             :                         page = ttm->pages[page_offset];
     228             :                         if (unlikely(!page && i == 0)) {
     229             :                                 retval = VM_FAULT_OOM;
     230             :                                 goto out_io_unlock;
     231             :                         } else if (unlikely(!page)) {
     232             :                                 break;
     233             :                         }
     234             :                         page->mapping = vma->vm_file->f_mapping;
     235             :                         page->index = drm_vma_node_start(&bo->vma_node) +
     236             :                                 page_offset;
     237             :                         pfn = page_to_pfn(page);
     238             :                 }
     239             : 
     240             :                 if (vma->vm_flags & VM_MIXEDMAP)
     241             :                         ret = vm_insert_mixed(&cvma, address, pfn);
     242             :                 else
     243             :                         ret = vm_insert_pfn(&cvma, address, pfn);
     244             : 
     245             :                 /*
     246             :                  * Somebody beat us to this PTE or prefaulting to
     247             :                  * an already populated PTE, or prefaulting error.
     248             :                  */
     249             : 
     250             :                 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
     251             :                         break;
     252             :                 else if (unlikely(ret != 0)) {
     253             :                         retval =
     254             :                             (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
     255             :                         goto out_io_unlock;
     256             :                 }
     257             : 
     258             :                 address += PAGE_SIZE;
     259             :                 if (unlikely(++page_offset >= page_last))
     260             :                         break;
     261             :         }
     262             : out_io_unlock:
     263             :         ttm_mem_io_unlock(man);
     264             : out_unlock:
     265             :         ttm_bo_unreserve(bo);
     266             :         return retval;
     267             : }
     268             : #else
     269             : int
     270           0 : ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
     271             :     int npages, int centeridx, vm_fault_t fault_type,
     272             :     vm_prot_t access_type, int flags)
     273             : {
     274           0 :         struct uvm_object *uobj = ufi->entry->object.uvm_obj;
     275           0 :         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
     276           0 :         struct ttm_bo_device *bdev = bo->bdev;
     277             :         unsigned long page_offset;
     278             :         unsigned long page_last;
     279             :         struct ttm_tt *ttm = NULL;
     280             :         struct vm_page *page;
     281             :         bus_addr_t addr;
     282             :         paddr_t paddr;
     283             :         vm_prot_t mapprot;
     284             :         int pmap_flags;
     285             :         boolean_t locked = TRUE;
     286             :         int ret;
     287             :         int i;
     288             :         unsigned long address = (unsigned long)vaddr;
     289             :         int retval = VM_PAGER_OK;
     290             :         struct ttm_mem_type_manager *man =
     291           0 :                 &bdev->man[bo->mem.mem_type];
     292             : 
     293             :         /*
     294             :          * Work around locking order reversal in fault / nopfn
     295             :          * between mmap_sem and bo_reserve: Perform a trylock operation
     296             :          * for reserve, and if it fails, retry the fault after waiting
     297             :          * for the buffer to become unreserved.
     298             :          */
     299           0 :         ret = ttm_bo_reserve(bo, true, true, false, NULL);
     300           0 :         if (unlikely(ret != 0)) {
     301           0 :                 uvmfault_unlockall(ufi, NULL, uobj, NULL);
     302           0 :                 ret = ttm_bo_reserve(bo, true, false, false, 0);
     303           0 :                 locked = uvmfault_relock(ufi);
     304           0 :                 if (!locked)
     305           0 :                         return VM_PAGER_REFAULT;
     306             :         }
     307             : 
     308             :         /*
     309             :          * Refuse to fault imported pages. This should be handled
     310             :          * (if at all) by redirecting mmap to the exporter.
     311             :          */
     312           0 :         if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
     313             :                 retval = VM_PAGER_ERROR;
     314           0 :                 goto out_unlock;
     315             :         }
     316             : 
     317           0 :         if (bdev->driver->fault_reserve_notify) {
     318           0 :                 ret = bdev->driver->fault_reserve_notify(bo);
     319           0 :                 switch (ret) {
     320             :                 case 0:
     321             :                         break;
     322             :                 case -EBUSY:
     323             :                 case -ERESTARTSYS:
     324             :                         retval = VM_PAGER_REFAULT;
     325           0 :                         goto out_unlock;
     326             :                 default:
     327             :                         retval = VM_PAGER_ERROR;
     328           0 :                         goto out_unlock;
     329             :                 }
     330             :         }
     331             : 
     332             :         /*
     333             :          * Wait for buffer data in transit, due to a pipelined
     334             :          * move.
     335             :          */
     336           0 :         ret = ttm_bo_vm_fault_idle(bo);
     337           0 :         if (unlikely(ret != 0)) {
     338             :                 retval = ret;
     339           0 :                 retval = (ret != -ERESTARTSYS) ?
     340             :                     VM_PAGER_ERROR : VM_PAGER_REFAULT;
     341           0 :                 goto out_unlock;
     342             :         }
     343             : 
     344           0 :         ret = ttm_mem_io_lock(man, true);
     345           0 :         if (unlikely(ret != 0)) {
     346             :                 retval = VM_PAGER_REFAULT;
     347           0 :                 goto out_unlock;
     348             :         }
     349           0 :         ret = ttm_mem_io_reserve_vm(bo);
     350           0 :         if (unlikely(ret != 0)) {
     351             :                 retval = VM_PAGER_ERROR;
     352           0 :                 goto out_io_unlock;
     353             :         }
     354             : 
     355           0 :         page_offset = ((address - ufi->entry->start) >> PAGE_SHIFT) +
     356           0 :             drm_vma_node_start(&bo->vma_node) - (ufi->entry->offset >> PAGE_SHIFT);
     357           0 :         page_last = ((ufi->entry->end - ufi->entry->start) >> PAGE_SHIFT) +
     358           0 :             drm_vma_node_start(&bo->vma_node) - (ufi->entry->offset >> PAGE_SHIFT);
     359             : 
     360           0 :         if (unlikely(page_offset >= bo->num_pages)) {
     361             :                 retval = VM_PAGER_ERROR;
     362           0 :                 goto out_io_unlock;
     363             :         }
     364             : 
     365             :         /*
     366             :          * Make a local vma copy to modify the page_prot member
     367             :          * and vm_flags if necessary. The vma parameter is protected
     368             :          * by mmap_sem in write mode.
     369             :          */
     370           0 :         mapprot = ufi->entry->protection;
     371           0 :         if (bo->mem.bus.is_iomem) {
     372           0 :                 pmap_flags = ttm_io_prot(bo->mem.placement, 0);
     373           0 :         } else {
     374           0 :                 ttm = bo->ttm;
     375           0 :                 pmap_flags = ttm_io_prot(bo->mem.placement, 0);
     376             : 
     377             :                 /* Allocate all page at once, most common usage */
     378           0 :                 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
     379             :                         retval = VM_PAGER_ERROR;
     380           0 :                         goto out_io_unlock;
     381             :                 }
     382             :         }
     383             : 
     384             :         /*
     385             :          * Speculatively prefault a number of pages. Only error on
     386             :          * first page.
     387             :          */
     388           0 :         for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
     389           0 :                 if (bo->mem.bus.is_iomem) {
     390           0 :                         addr = bo->mem.bus.base + bo->mem.bus.offset;
     391           0 :                         paddr = bus_space_mmap(bdev->memt, addr,
     392             :                                                page_offset << PAGE_SHIFT,
     393             :                                                mapprot, 0);
     394           0 :                 } else {
     395           0 :                         page = ttm->pages[page_offset];
     396           0 :                         if (unlikely(!page && i == 0)) {
     397             :                                 retval = VM_PAGER_ERROR;
     398           0 :                                 goto out_io_unlock;
     399           0 :                         } else if (unlikely(!page)) {
     400             :                                 break;
     401             :                         }
     402           0 :                         paddr = VM_PAGE_TO_PHYS(page);
     403             :                 }
     404             : 
     405           0 :                 ret = pmap_enter(ufi->orig_map->pmap, vaddr,
     406           0 :                     paddr | pmap_flags, mapprot, PMAP_CANFAIL | mapprot);
     407             : 
     408             :                 /*
     409             :                  * Somebody beat us to this PTE or prefaulting to
     410             :                  * an already populated PTE, or prefaulting error.
     411             :                  */
     412             : 
     413           0 :                 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
     414             :                         break;
     415           0 :                 else if (unlikely(ret != 0)) {
     416           0 :                         uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
     417             :                             NULL, NULL);
     418           0 :                         uvm_wait("ttmflt");
     419           0 :                         return VM_PAGER_REFAULT;
     420             :                 }
     421             : 
     422           0 :                 address += PAGE_SIZE;
     423           0 :                 vaddr += PAGE_SIZE;
     424           0 :                 if (unlikely(++page_offset >= page_last))
     425             :                         break;
     426             :         }
     427             :         pmap_update(ufi->orig_map->pmap);
     428             : out_io_unlock:
     429           0 :         ttm_mem_io_unlock(man);
     430             : out_unlock:
     431           0 :         uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL, NULL);
     432           0 :         ttm_bo_unreserve(bo);
     433           0 :         return retval;
     434           0 : }
     435             : #endif
     436             : 
     437             : #ifdef notyet
     438             : static void ttm_bo_vm_open(struct vm_area_struct *vma)
     439             : {
     440             :         struct ttm_buffer_object *bo =
     441             :             (struct ttm_buffer_object *)vma->vm_private_data;
     442             : 
     443             :         WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
     444             : 
     445             :         (void)ttm_bo_reference(bo);
     446             : }
     447             : 
     448             : static void ttm_bo_vm_close(struct vm_area_struct *vma)
     449             : {
     450             :         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
     451             : 
     452             :         ttm_bo_unref(&bo);
     453             :         vma->vm_private_data = NULL;
     454             : }
     455             : 
     456             : static const struct vm_operations_struct ttm_bo_vm_ops = {
     457             :         .fault = ttm_bo_vm_fault,
     458             :         .open = ttm_bo_vm_open,
     459             :         .close = ttm_bo_vm_close
     460             : };
     461             : #endif
     462             : 
     463             : void
     464           0 : ttm_bo_vm_reference(struct uvm_object *uobj)
     465             : {
     466             :         struct ttm_buffer_object *bo =
     467           0 :             (struct ttm_buffer_object *)uobj;
     468             : 
     469           0 :         (void)ttm_bo_reference(bo);
     470           0 :         uobj->uo_refs++;
     471           0 : }
     472             : 
     473             : void
     474           0 : ttm_bo_vm_detach(struct uvm_object *uobj)
     475             : {
     476           0 :         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)uobj;
     477             : 
     478           0 :         uobj->uo_refs--;
     479           0 :         ttm_bo_unref(&bo);
     480           0 : }
     481             : 
     482             : struct uvm_pagerops ttm_bo_vm_ops = {
     483             :         .pgo_fault = ttm_bo_vm_fault,
     484             :         .pgo_reference = ttm_bo_vm_reference,
     485             :         .pgo_detach = ttm_bo_vm_detach
     486             : };
     487             : 
     488           0 : static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
     489             :                                                   unsigned long offset,
     490             :                                                   unsigned long pages)
     491             : {
     492             :         struct drm_vma_offset_node *node;
     493             :         struct ttm_buffer_object *bo = NULL;
     494             : 
     495           0 :         drm_vma_offset_lock_lookup(&bdev->vma_manager);
     496             : 
     497           0 :         node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
     498           0 :         if (likely(node)) {
     499           0 :                 bo = container_of(node, struct ttm_buffer_object, vma_node);
     500           0 :                 if (!kref_get_unless_zero(&bo->kref))
     501           0 :                         bo = NULL;
     502             :         }
     503             : 
     504           0 :         drm_vma_offset_unlock_lookup(&bdev->vma_manager);
     505             : 
     506           0 :         if (!bo)
     507           0 :                 pr_err("Could not find buffer object to map\n");
     508             : 
     509           0 :         return bo;
     510             : }
     511             : 
     512             : #ifdef __linux__
     513             : int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
     514             :                 struct ttm_bo_device *bdev)
     515             : {
     516             :         struct ttm_bo_driver *driver;
     517             :         struct ttm_buffer_object *bo;
     518             :         int ret;
     519             : 
     520             :         bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
     521             :         if (unlikely(!bo))
     522             :                 return -EINVAL;
     523             : 
     524             :         driver = bo->bdev->driver;
     525             :         if (unlikely(!driver->verify_access)) {
     526             :                 ret = -EPERM;
     527             :                 goto out_unref;
     528             :         }
     529             :         ret = driver->verify_access(bo, filp);
     530             :         if (unlikely(ret != 0))
     531             :                 goto out_unref;
     532             : 
     533             :         vma->vm_ops = &ttm_bo_vm_ops;
     534             : 
     535             :         /*
     536             :          * Note: We're transferring the bo reference to
     537             :          * vma->vm_private_data here.
     538             :          */
     539             : 
     540             :         vma->vm_private_data = bo;
     541             : 
     542             :         /*
     543             :          * We'd like to use VM_PFNMAP on shared mappings, where
     544             :          * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
     545             :          * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
     546             :          * bad for performance. Until that has been sorted out, use
     547             :          * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
     548             :          */
     549             :         vma->vm_flags |= VM_MIXEDMAP;
     550             :         vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
     551             :         return 0;
     552             : out_unref:
     553             :         ttm_bo_unref(&bo);
     554             :         return ret;
     555             : }
     556             : #else
     557             : struct uvm_object *
     558           0 : ttm_bo_mmap(voff_t off, vsize_t size, struct ttm_bo_device *bdev)
     559             : {
     560             :         struct ttm_bo_driver *driver;
     561           0 :         struct ttm_buffer_object *bo;
     562             :         int ret;
     563             : 
     564           0 :         bo = ttm_bo_vm_lookup(bdev, off >> PAGE_SHIFT, size >> PAGE_SHIFT);
     565           0 :         if (unlikely(!bo))
     566           0 :                 return NULL;
     567             : 
     568           0 :         driver = bo->bdev->driver;
     569           0 :         if (unlikely(!driver->verify_access)) {
     570             :                 ret = -EPERM;
     571             :                 goto out_unref;
     572             :         }
     573             : #ifdef notyet
     574             :         ret = driver->verify_access(bo, filp);
     575             :         if (unlikely(ret != 0))
     576             :                 goto out_unref;
     577             : #endif
     578             : 
     579           0 :         bo->uobj.pgops = &ttm_bo_vm_ops;
     580           0 :         bo->uobj.uo_refs++;
     581           0 :         return &bo->uobj;
     582             : out_unref:
     583           0 :         ttm_bo_unref(&bo);
     584           0 :         return NULL;
     585           0 : }
     586             : #endif
     587             : EXPORT_SYMBOL(ttm_bo_mmap);
     588             : 
     589             : #ifdef notyet
     590             : int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
     591             : {
     592             :         if (vma->vm_pgoff != 0)
     593             :                 return -EACCES;
     594             : 
     595             :         vma->vm_ops = &ttm_bo_vm_ops;
     596             :         vma->vm_private_data = ttm_bo_reference(bo);
     597             :         vma->vm_flags |= VM_MIXEDMAP;
     598             :         vma->vm_flags |= VM_IO | VM_DONTEXPAND;
     599             :         return 0;
     600             : }
     601             : EXPORT_SYMBOL(ttm_fbdev_mmap);
     602             : #endif

Generated by: LCOV version 1.13