LCOV - code coverage report
Current view: top level - dev/pci/drm/ttm - ttm_bo_util.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 354 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 23 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /**************************************************************************
       2             :  *
       3             :  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
       4             :  * All Rights Reserved.
       5             :  *
       6             :  * Permission is hereby granted, free of charge, to any person obtaining a
       7             :  * copy of this software and associated documentation files (the
       8             :  * "Software"), to deal in the Software without restriction, including
       9             :  * without limitation the rights to use, copy, modify, merge, publish,
      10             :  * distribute, sub license, and/or sell copies of the Software, and to
      11             :  * permit persons to whom the Software is furnished to do so, subject to
      12             :  * the following conditions:
      13             :  *
      14             :  * The above copyright notice and this permission notice (including the
      15             :  * next paragraph) shall be included in all copies or substantial portions
      16             :  * of the Software.
      17             :  *
      18             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      19             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      20             :  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
      21             :  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
      22             :  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
      23             :  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
      24             :  * USE OR OTHER DEALINGS IN THE SOFTWARE.
      25             :  *
      26             :  **************************************************************************/
      27             : /*
      28             :  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
      29             :  */
      30             : 
      31             : #include <dev/pci/drm/ttm/ttm_bo_driver.h>
      32             : #include <dev/pci/drm/ttm/ttm_placement.h>
      33             : #include <dev/pci/drm/drm_vma_manager.h>
      34             : #include <dev/pci/drm/drmP.h>
      35             : #include <dev/pci/drm/linux_ww_mutex.h>
      36             : 
      37           0 : void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
      38             : {
      39           0 :         ttm_bo_mem_put(bo, &bo->mem);
      40           0 : }
      41             : 
      42           0 : int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
      43             :                     bool evict,
      44             :                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
      45             : {
      46           0 :         struct ttm_tt *ttm = bo->ttm;
      47           0 :         struct ttm_mem_reg *old_mem = &bo->mem;
      48             :         int ret;
      49             : 
      50           0 :         if (old_mem->mem_type != TTM_PL_SYSTEM) {
      51           0 :                 ttm_tt_unbind(ttm);
      52           0 :                 ttm_bo_free_old_node(bo);
      53           0 :                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
      54             :                                 TTM_PL_MASK_MEM);
      55           0 :                 old_mem->mem_type = TTM_PL_SYSTEM;
      56           0 :         }
      57             : 
      58           0 :         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
      59           0 :         if (unlikely(ret != 0))
      60           0 :                 return ret;
      61             : 
      62           0 :         if (new_mem->mem_type != TTM_PL_SYSTEM) {
      63           0 :                 ret = ttm_tt_bind(ttm, new_mem);
      64           0 :                 if (unlikely(ret != 0))
      65           0 :                         return ret;
      66             :         }
      67             : 
      68           0 :         *old_mem = *new_mem;
      69           0 :         new_mem->mm_node = NULL;
      70             : 
      71           0 :         return 0;
      72           0 : }
      73             : EXPORT_SYMBOL(ttm_bo_move_ttm);
      74             : 
      75           0 : int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
      76             : {
      77           0 :         if (likely(man->io_reserve_fastpath))
      78           0 :                 return 0;
      79             : 
      80           0 :         if (interruptible)
      81           0 :                 return mutex_lock_interruptible(&man->io_reserve_mutex);
      82             : 
      83           0 :         mutex_lock(&man->io_reserve_mutex);
      84           0 :         return 0;
      85           0 : }
      86             : EXPORT_SYMBOL(ttm_mem_io_lock);
      87             : 
      88           0 : void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
      89             : {
      90           0 :         if (likely(man->io_reserve_fastpath))
      91             :                 return;
      92             : 
      93           0 :         mutex_unlock(&man->io_reserve_mutex);
      94           0 : }
      95             : EXPORT_SYMBOL(ttm_mem_io_unlock);
      96             : 
      97           0 : static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
      98             : {
      99             :         struct ttm_buffer_object *bo;
     100             : 
     101           0 :         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
     102           0 :                 return -EAGAIN;
     103             : 
     104           0 :         bo = list_first_entry(&man->io_reserve_lru,
     105             :                               struct ttm_buffer_object,
     106             :                               io_reserve_lru);
     107           0 :         list_del_init(&bo->io_reserve_lru);
     108           0 :         ttm_bo_unmap_virtual_locked(bo);
     109             : 
     110           0 :         return 0;
     111           0 : }
     112             : 
     113             : 
     114           0 : int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
     115             :                        struct ttm_mem_reg *mem)
     116             : {
     117           0 :         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
     118             :         int ret = 0;
     119             : 
     120           0 :         if (!bdev->driver->io_mem_reserve)
     121           0 :                 return 0;
     122           0 :         if (likely(man->io_reserve_fastpath))
     123           0 :                 return bdev->driver->io_mem_reserve(bdev, mem);
     124             : 
     125           0 :         if (bdev->driver->io_mem_reserve &&
     126           0 :             mem->bus.io_reserved_count++ == 0) {
     127             : retry:
     128           0 :                 ret = bdev->driver->io_mem_reserve(bdev, mem);
     129           0 :                 if (ret == -EAGAIN) {
     130           0 :                         ret = ttm_mem_io_evict(man);
     131           0 :                         if (ret == 0)
     132           0 :                                 goto retry;
     133             :                 }
     134             :         }
     135           0 :         return ret;
     136           0 : }
     137             : EXPORT_SYMBOL(ttm_mem_io_reserve);
     138             : 
     139           0 : void ttm_mem_io_free(struct ttm_bo_device *bdev,
     140             :                      struct ttm_mem_reg *mem)
     141             : {
     142           0 :         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
     143             : 
     144           0 :         if (likely(man->io_reserve_fastpath))
     145           0 :                 return;
     146             : 
     147           0 :         if (bdev->driver->io_mem_reserve &&
     148           0 :             --mem->bus.io_reserved_count == 0 &&
     149           0 :             bdev->driver->io_mem_free)
     150           0 :                 bdev->driver->io_mem_free(bdev, mem);
     151             : 
     152           0 : }
     153             : EXPORT_SYMBOL(ttm_mem_io_free);
     154             : 
     155           0 : int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
     156             : {
     157           0 :         struct ttm_mem_reg *mem = &bo->mem;
     158             :         int ret;
     159             : 
     160           0 :         if (!mem->bus.io_reserved_vm) {
     161             :                 struct ttm_mem_type_manager *man =
     162           0 :                         &bo->bdev->man[mem->mem_type];
     163             : 
     164           0 :                 ret = ttm_mem_io_reserve(bo->bdev, mem);
     165           0 :                 if (unlikely(ret != 0))
     166           0 :                         return ret;
     167           0 :                 mem->bus.io_reserved_vm = true;
     168           0 :                 if (man->use_io_reserve_lru)
     169           0 :                         list_add_tail(&bo->io_reserve_lru,
     170           0 :                                       &man->io_reserve_lru);
     171           0 :         }
     172           0 :         return 0;
     173           0 : }
     174             : 
     175           0 : void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
     176             : {
     177           0 :         struct ttm_mem_reg *mem = &bo->mem;
     178             : 
     179           0 :         if (mem->bus.io_reserved_vm) {
     180           0 :                 mem->bus.io_reserved_vm = false;
     181           0 :                 list_del_init(&bo->io_reserve_lru);
     182           0 :                 ttm_mem_io_free(bo->bdev, mem);
     183           0 :         }
     184           0 : }
     185             : 
     186           0 : static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
     187             :                         void **virtual)
     188             : {
     189           0 :         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
     190             :         int ret;
     191             :         void *addr;
     192             :         int flags;
     193             : 
     194           0 :         *virtual = NULL;
     195           0 :         (void) ttm_mem_io_lock(man, false);
     196           0 :         ret = ttm_mem_io_reserve(bdev, mem);
     197           0 :         ttm_mem_io_unlock(man);
     198           0 :         if (ret || !mem->bus.is_iomem)
     199           0 :                 return ret;
     200             : 
     201           0 :         if (mem->bus.addr) {
     202             :                 addr = mem->bus.addr;
     203           0 :         } else {
     204           0 :                 if (mem->placement & TTM_PL_FLAG_WC)
     205           0 :                         flags = BUS_SPACE_MAP_PREFETCHABLE;
     206             :                 else
     207             :                         flags = 0;
     208             : 
     209           0 :                 if (bus_space_map(bdev->memt, mem->bus.base + mem->bus.offset,
     210           0 :                     mem->bus.size, BUS_SPACE_MAP_LINEAR | flags,
     211           0 :                     &mem->bus.bsh)) {
     212           0 :                         printf("%s bus_space_map failed\n", __func__);
     213           0 :                         return -ENOMEM;
     214             :                 }
     215             : 
     216           0 :                 addr = bus_space_vaddr(bdev->memt, mem->bus.bsh);
     217             : 
     218           0 :                 if (!addr) {
     219           0 :                         (void) ttm_mem_io_lock(man, false);
     220           0 :                         ttm_mem_io_free(bdev, mem);
     221           0 :                         ttm_mem_io_unlock(man);
     222           0 :                         return -ENOMEM;
     223             :                 }
     224             :         }
     225           0 :         *virtual = addr;
     226           0 :         return 0;
     227           0 : }
     228             : 
     229           0 : static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
     230             :                          void *virtual)
     231             : {
     232             :         struct ttm_mem_type_manager *man;
     233             : 
     234           0 :         man = &bdev->man[mem->mem_type];
     235             : 
     236           0 :         if (virtual && mem->bus.addr == NULL)
     237           0 :                 bus_space_unmap(bdev->memt, mem->bus.bsh, mem->bus.size);
     238           0 :         (void) ttm_mem_io_lock(man, false);
     239           0 :         ttm_mem_io_free(bdev, mem);
     240           0 :         ttm_mem_io_unlock(man);
     241           0 : }
     242             : 
     243           0 : static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
     244             : {
     245             :         uint32_t *dstP =
     246           0 :             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
     247             :         uint32_t *srcP =
     248           0 :             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
     249             : 
     250             :         int i;
     251           0 :         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
     252           0 :                 iowrite32(ioread32(srcP++), dstP++);
     253           0 :         return 0;
     254             : }
     255             : 
     256           0 : static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
     257             :                                 unsigned long page,
     258             :                                 pgprot_t prot)
     259             : {
     260           0 :         struct vm_page *d = ttm->pages[page];
     261             :         void *dst;
     262             : 
     263           0 :         if (!d)
     264           0 :                 return -ENOMEM;
     265             : 
     266           0 :         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
     267             : 
     268             : #ifdef CONFIG_X86
     269             :         dst = kmap_atomic_prot(d, prot);
     270             : #else
     271           0 :         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
     272           0 :                 dst = vmap(&d, 1, 0, prot);
     273             :         else
     274           0 :                 dst = kmap(d);
     275             : #endif
     276           0 :         if (!dst)
     277           0 :                 return -ENOMEM;
     278             : 
     279           0 :         memcpy_fromio(dst, src, PAGE_SIZE);
     280             : 
     281             : #ifdef CONFIG_X86
     282             :         kunmap_atomic(dst);
     283             : #else
     284           0 :         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
     285           0 :                 vunmap(dst, PAGE_SIZE);
     286             :         else
     287           0 :                 kunmap(d);
     288             : #endif
     289             : 
     290           0 :         return 0;
     291           0 : }
     292             : 
     293           0 : static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
     294             :                                 unsigned long page,
     295             :                                 pgprot_t prot)
     296             : {
     297           0 :         struct vm_page *s = ttm->pages[page];
     298             :         void *src;
     299             : 
     300           0 :         if (!s)
     301           0 :                 return -ENOMEM;
     302             : 
     303           0 :         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
     304             : #ifdef CONFIG_X86
     305             :         src = kmap_atomic_prot(s, prot);
     306             : #else
     307           0 :         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
     308           0 :                 src = vmap(&s, 1, 0, prot);
     309             :         else
     310           0 :                 src = kmap(s);
     311             : #endif
     312           0 :         if (!src)
     313           0 :                 return -ENOMEM;
     314             : 
     315           0 :         memcpy_toio(dst, src, PAGE_SIZE);
     316             : 
     317             : #ifdef CONFIG_X86
     318             :         kunmap_atomic(src);
     319             : #else
     320           0 :         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
     321           0 :                 vunmap(src, PAGE_SIZE);
     322             :         else
     323           0 :                 kunmap(s);
     324             : #endif
     325             : 
     326           0 :         return 0;
     327           0 : }
     328             : 
     329           0 : int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
     330             :                        bool evict, bool no_wait_gpu,
     331             :                        struct ttm_mem_reg *new_mem)
     332             : {
     333           0 :         struct ttm_bo_device *bdev = bo->bdev;
     334           0 :         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
     335           0 :         struct ttm_tt *ttm = bo->ttm;
     336           0 :         struct ttm_mem_reg *old_mem = &bo->mem;
     337           0 :         struct ttm_mem_reg old_copy = *old_mem;
     338           0 :         void *old_iomap;
     339           0 :         void *new_iomap;
     340             :         int ret;
     341             :         unsigned long i;
     342             :         unsigned long page;
     343             :         unsigned long add = 0;
     344             :         int dir;
     345             : 
     346           0 :         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
     347           0 :         if (ret)
     348           0 :                 return ret;
     349           0 :         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
     350           0 :         if (ret)
     351             :                 goto out;
     352             : 
     353             :         /*
     354             :          * Single TTM move. NOP.
     355             :          */
     356           0 :         if (old_iomap == NULL && new_iomap == NULL)
     357             :                 goto out2;
     358             : 
     359             :         /*
     360             :          * Don't move nonexistent data. Clear destination instead.
     361             :          */
     362           0 :         if (old_iomap == NULL &&
     363           0 :             (ttm == NULL || (ttm->state == tt_unpopulated &&
     364           0 :                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
     365           0 :                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
     366           0 :                 goto out2;
     367             :         }
     368             : 
     369             :         /*
     370             :          * TTM might be null for moves within the same region.
     371             :          */
     372           0 :         if (ttm && ttm->state == tt_unpopulated) {
     373           0 :                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
     374           0 :                 if (ret)
     375             :                         goto out1;
     376             :         }
     377             : 
     378             :         add = 0;
     379             :         dir = 1;
     380             : 
     381           0 :         if ((old_mem->mem_type == new_mem->mem_type) &&
     382           0 :             (new_mem->start < old_mem->start + old_mem->size)) {
     383             :                 dir = -1;
     384           0 :                 add = new_mem->num_pages - 1;
     385           0 :         }
     386             : 
     387           0 :         for (i = 0; i < new_mem->num_pages; ++i) {
     388           0 :                 page = i * dir + add;
     389           0 :                 if (old_iomap == NULL) {
     390           0 :                         pgprot_t prot = ttm_io_prot(old_mem->placement,
     391             :                                                     PAGE_KERNEL);
     392           0 :                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
     393             :                                                    prot);
     394           0 :                 } else if (new_iomap == NULL) {
     395           0 :                         pgprot_t prot = ttm_io_prot(new_mem->placement,
     396             :                                                     PAGE_KERNEL);
     397           0 :                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
     398             :                                                    prot);
     399           0 :                 } else
     400           0 :                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
     401           0 :                 if (ret)
     402             :                         goto out1;
     403             :         }
     404           0 :         mb();
     405             : out2:
     406           0 :         old_copy = *old_mem;
     407           0 :         *old_mem = *new_mem;
     408           0 :         new_mem->mm_node = NULL;
     409             : 
     410           0 :         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
     411           0 :                 ttm_tt_unbind(ttm);
     412           0 :                 ttm_tt_destroy(ttm);
     413           0 :                 bo->ttm = NULL;
     414           0 :         }
     415             : 
     416             : out1:
     417           0 :         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
     418             : out:
     419           0 :         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
     420             : 
     421             :         /*
     422             :          * On error, keep the mm node!
     423             :          */
     424           0 :         if (!ret)
     425           0 :                 ttm_bo_mem_put(bo, &old_copy);
     426           0 :         return ret;
     427           0 : }
     428             : EXPORT_SYMBOL(ttm_bo_move_memcpy);
     429             : 
     430           0 : static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
     431             : {
     432           0 :         kfree(bo);
     433           0 : }
     434             : 
     435             : /**
     436             :  * ttm_buffer_object_transfer
     437             :  *
     438             :  * @bo: A pointer to a struct ttm_buffer_object.
     439             :  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
     440             :  * holding the data of @bo with the old placement.
     441             :  *
     442             :  * This is a utility function that may be called after an accelerated move
     443             :  * has been scheduled. A new buffer object is created as a placeholder for
     444             :  * the old data while it's being copied. When that buffer object is idle,
     445             :  * it can be destroyed, releasing the space of the old placement.
     446             :  * Returns:
     447             :  * !0: Failure.
     448             :  */
     449             : 
     450           0 : static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
     451             :                                       struct ttm_buffer_object **new_obj)
     452             : {
     453             :         struct ttm_buffer_object *fbo;
     454             :         int ret;
     455             : 
     456           0 :         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
     457           0 :         if (!fbo)
     458           0 :                 return -ENOMEM;
     459             : 
     460           0 :         *fbo = *bo;
     461             : 
     462             :         /**
     463             :          * Fix up members that we shouldn't copy directly:
     464             :          * TODO: Explicit member copy would probably be better here.
     465             :          */
     466             : 
     467           0 :         INIT_LIST_HEAD(&fbo->ddestroy);
     468           0 :         INIT_LIST_HEAD(&fbo->lru);
     469           0 :         INIT_LIST_HEAD(&fbo->swap);
     470           0 :         INIT_LIST_HEAD(&fbo->io_reserve_lru);
     471           0 :         drm_vma_node_reset(&fbo->vma_node);
     472           0 :         atomic_set(&fbo->cpu_writers, 0);
     473             : 
     474           0 :         kref_init(&fbo->list_kref);
     475           0 :         kref_init(&fbo->kref);
     476           0 :         fbo->destroy = &ttm_transfered_destroy;
     477           0 :         fbo->acc_size = 0;
     478           0 :         fbo->resv = &fbo->ttm_resv;
     479           0 :         reservation_object_init(fbo->resv);
     480           0 :         ret = ww_mutex_trylock(&fbo->resv->lock);
     481           0 :         WARN_ON(!ret);
     482             : 
     483           0 :         *new_obj = fbo;
     484           0 :         return 0;
     485           0 : }
     486             : 
     487             : #ifdef __linux__
     488             : pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
     489             : {
     490             :         /* Cached mappings need no adjustment */
     491             :         if (caching_flags & TTM_PL_FLAG_CACHED)
     492             :                 return tmp;
     493             : 
     494             : #if defined(__i386__) || defined(__x86_64__)
     495             :         if (caching_flags & TTM_PL_FLAG_WC)
     496             :                 tmp = pgprot_writecombine(tmp);
     497             :         else if (boot_cpu_data.x86 > 3)
     498             :                 tmp = pgprot_noncached(tmp);
     499             : #endif
     500             : #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
     501             :     defined(__powerpc__)
     502             :         if (caching_flags & TTM_PL_FLAG_WC)
     503             :                 tmp = pgprot_writecombine(tmp);
     504             :         else
     505             :                 tmp = pgprot_noncached(tmp);
     506             : #endif
     507             : #if defined(__sparc__) || defined(__mips__)
     508             :         tmp = pgprot_noncached(tmp);
     509             : #endif
     510             :         return tmp;
     511             : }
     512             : EXPORT_SYMBOL(ttm_io_prot);
     513             : #endif
     514             : 
     515           0 : pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
     516             : {
     517             :         /* Cached mappings need no adjustment */
     518           0 :         if (caching_flags & TTM_PL_FLAG_CACHED)
     519           0 :                 return tmp;
     520             : 
     521           0 :         if (caching_flags & TTM_PL_FLAG_WC)
     522           0 :                 tmp = pgprot_writecombine(tmp);
     523             :         else
     524           0 :                 tmp = pgprot_noncached(tmp);
     525             : 
     526           0 :         return tmp;
     527           0 : }
     528             : 
     529           0 : static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
     530             :                           unsigned long offset,
     531             :                           unsigned long size,
     532             :                           struct ttm_bo_kmap_obj *map)
     533             : {
     534             :         int flags;
     535           0 :         struct ttm_mem_reg *mem = &bo->mem;
     536             : 
     537           0 :         if (bo->mem.bus.addr) {
     538           0 :                 map->bo_kmap_type = ttm_bo_map_premapped;
     539           0 :                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
     540           0 :         } else {
     541           0 :                 map->bo_kmap_type = ttm_bo_map_iomap;
     542           0 :                 if (mem->placement & TTM_PL_FLAG_WC)
     543           0 :                         flags = BUS_SPACE_MAP_PREFETCHABLE;
     544             :                 else
     545             :                         flags = 0;
     546             : 
     547           0 :                 if (bus_space_map(bo->bdev->memt,
     548           0 :                     mem->bus.base + bo->mem.bus.offset + offset,
     549           0 :                     size, BUS_SPACE_MAP_LINEAR | flags,
     550           0 :                     &bo->mem.bus.bsh)) {
     551           0 :                         printf("%s bus_space_map failed\n", __func__);
     552           0 :                         map->virtual = 0;
     553           0 :                 } else
     554           0 :                         map->virtual = bus_space_vaddr(bo->bdev->memt,
     555             :                             bo->mem.bus.bsh);
     556             :         }
     557           0 :         return (!map->virtual) ? -ENOMEM : 0;
     558             : }
     559             : 
     560           0 : static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
     561             :                            unsigned long start_page,
     562             :                            unsigned long num_pages,
     563             :                            struct ttm_bo_kmap_obj *map)
     564             : {
     565           0 :         struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
     566           0 :         struct ttm_tt *ttm = bo->ttm;
     567             :         int ret;
     568             : 
     569           0 :         BUG_ON(!ttm);
     570             : 
     571           0 :         if (ttm->state == tt_unpopulated) {
     572           0 :                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
     573           0 :                 if (ret)
     574           0 :                         return ret;
     575             :         }
     576             : 
     577           0 :         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
     578             :                 /*
     579             :                  * We're mapping a single page, and the desired
     580             :                  * page protection is consistent with the bo.
     581             :                  */
     582             : 
     583           0 :                 map->bo_kmap_type = ttm_bo_map_kmap;
     584           0 :                 map->page = ttm->pages[start_page];
     585           0 :                 map->virtual = kmap(map->page);
     586           0 :         } else {
     587             :                 /*
     588             :                  * We need to use vmap to get the desired page protection
     589             :                  * or to make the buffer object look contiguous.
     590             :                  */
     591           0 :                 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
     592           0 :                 map->bo_kmap_type = ttm_bo_map_vmap;
     593           0 :                 map->virtual = vmap(ttm->pages + start_page, num_pages,
     594             :                                     0, prot);
     595             :         }
     596           0 :         return (!map->virtual) ? -ENOMEM : 0;
     597           0 : }
     598             : 
     599           0 : int ttm_bo_kmap(struct ttm_buffer_object *bo,
     600             :                 unsigned long start_page, unsigned long num_pages,
     601             :                 struct ttm_bo_kmap_obj *map)
     602             : {
     603             :         struct ttm_mem_type_manager *man =
     604           0 :                 &bo->bdev->man[bo->mem.mem_type];
     605             :         unsigned long offset, size;
     606             :         int ret;
     607             : 
     608           0 :         BUG_ON(!list_empty(&bo->swap));
     609           0 :         map->virtual = NULL;
     610           0 :         map->bo = bo;
     611           0 :         if (num_pages > bo->num_pages)
     612           0 :                 return -EINVAL;
     613           0 :         if (start_page > bo->num_pages)
     614           0 :                 return -EINVAL;
     615             : #if 0
     616             :         if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
     617             :                 return -EPERM;
     618             : #endif
     619           0 :         (void) ttm_mem_io_lock(man, false);
     620           0 :         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
     621           0 :         ttm_mem_io_unlock(man);
     622           0 :         if (ret)
     623           0 :                 return ret;
     624           0 :         if (!bo->mem.bus.is_iomem) {
     625           0 :                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
     626             :         } else {
     627           0 :                 offset = start_page << PAGE_SHIFT;
     628           0 :                 size = num_pages << PAGE_SHIFT;
     629           0 :                 return ttm_bo_ioremap(bo, offset, size, map);
     630             :         }
     631           0 : }
     632             : EXPORT_SYMBOL(ttm_bo_kmap);
     633             : 
     634           0 : void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
     635             : {
     636           0 :         struct ttm_buffer_object *bo = map->bo;
     637             :         struct ttm_mem_type_manager *man =
     638           0 :                 &bo->bdev->man[bo->mem.mem_type];
     639             : 
     640           0 :         if (!map->virtual)
     641           0 :                 return;
     642           0 :         switch (map->bo_kmap_type) {
     643             :         case ttm_bo_map_iomap:
     644           0 :                 bus_space_unmap(bo->bdev->memt, bo->mem.bus.bsh,
     645           0 :                     bo->mem.bus.size);
     646           0 :                 break;
     647             :         case ttm_bo_map_vmap:
     648           0 :                 vunmap(map->virtual, bo->mem.bus.size);
     649           0 :                 break;
     650             :         case ttm_bo_map_kmap:
     651           0 :                 kunmap(map->virtual);
     652           0 :                 break;
     653             :         case ttm_bo_map_premapped:
     654             :                 break;
     655             :         default:
     656           0 :                 BUG();
     657             :         }
     658           0 :         (void) ttm_mem_io_lock(man, false);
     659           0 :         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
     660           0 :         ttm_mem_io_unlock(man);
     661           0 :         map->virtual = NULL;
     662           0 :         map->page = NULL;
     663           0 : }
     664             : EXPORT_SYMBOL(ttm_bo_kunmap);
     665             : 
     666           0 : int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
     667             :                               struct fence *fence,
     668             :                               bool evict,
     669             :                               bool no_wait_gpu,
     670             :                               struct ttm_mem_reg *new_mem)
     671             : {
     672           0 :         struct ttm_bo_device *bdev = bo->bdev;
     673           0 :         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
     674           0 :         struct ttm_mem_reg *old_mem = &bo->mem;
     675             :         int ret;
     676           0 :         struct ttm_buffer_object *ghost_obj;
     677             : 
     678           0 :         reservation_object_add_excl_fence(bo->resv, fence);
     679           0 :         if (evict) {
     680           0 :                 ret = ttm_bo_wait(bo, false, false, false);
     681           0 :                 if (ret)
     682           0 :                         return ret;
     683             : 
     684           0 :                 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
     685           0 :                     (bo->ttm != NULL)) {
     686           0 :                         ttm_tt_unbind(bo->ttm);
     687           0 :                         ttm_tt_destroy(bo->ttm);
     688           0 :                         bo->ttm = NULL;
     689           0 :                 }
     690           0 :                 ttm_bo_free_old_node(bo);
     691           0 :         } else {
     692             :                 /**
     693             :                  * This should help pipeline ordinary buffer moves.
     694             :                  *
     695             :                  * Hang old buffer memory on a new buffer object,
     696             :                  * and leave it to be released when the GPU
     697             :                  * operation has completed.
     698             :                  */
     699             : 
     700           0 :                 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
     701             : 
     702           0 :                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
     703           0 :                 if (ret)
     704           0 :                         return ret;
     705             : 
     706           0 :                 reservation_object_add_excl_fence(ghost_obj->resv, fence);
     707             : 
     708             :                 /**
     709             :                  * If we're not moving to fixed memory, the TTM object
     710             :                  * needs to stay alive. Otherwhise hang it on the ghost
     711             :                  * bo to be unbound and destroyed.
     712             :                  */
     713             : 
     714           0 :                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
     715           0 :                         ghost_obj->ttm = NULL;
     716             :                 else
     717           0 :                         bo->ttm = NULL;
     718             : 
     719           0 :                 ttm_bo_unreserve(ghost_obj);
     720           0 :                 ttm_bo_unref(&ghost_obj);
     721             :         }
     722             : 
     723           0 :         *old_mem = *new_mem;
     724           0 :         new_mem->mm_node = NULL;
     725             : 
     726           0 :         return 0;
     727           0 : }
     728             : EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);

Generated by: LCOV version 1.13