LCOV - code coverage report
Current view: top level - dev/pci/drm/ttm - ttm_bo.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 898 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 55 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /**************************************************************************
       2             :  *
       3             :  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
       4             :  * All Rights Reserved.
       5             :  *
       6             :  * Permission is hereby granted, free of charge, to any person obtaining a
       7             :  * copy of this software and associated documentation files (the
       8             :  * "Software"), to deal in the Software without restriction, including
       9             :  * without limitation the rights to use, copy, modify, merge, publish,
      10             :  * distribute, sub license, and/or sell copies of the Software, and to
      11             :  * permit persons to whom the Software is furnished to do so, subject to
      12             :  * the following conditions:
      13             :  *
      14             :  * The above copyright notice and this permission notice (including the
      15             :  * next paragraph) shall be included in all copies or substantial portions
      16             :  * of the Software.
      17             :  *
      18             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      19             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      20             :  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
      21             :  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
      22             :  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
      23             :  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
      24             :  * USE OR OTHER DEALINGS IN THE SOFTWARE.
      25             :  *
      26             :  **************************************************************************/
      27             : /*
      28             :  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
      29             :  */
      30             : 
      31             : #define pr_fmt(fmt) "[TTM] " fmt
      32             : 
      33             : #include <dev/pci/drm/ttm/ttm_module.h>
      34             : #include <dev/pci/drm/ttm/ttm_bo_driver.h>
      35             : #include <dev/pci/drm/ttm/ttm_placement.h>
      36             : #include <dev/pci/drm/drm_linux.h>
      37             : #include <dev/pci/drm/drm_linux_atomic.h>
      38             : #include <dev/pci/drm/linux_rcupdate.h>
      39             : #include <dev/pci/drm/linux_ww_mutex.h>
      40             : 
      41             : #define TTM_ASSERT_LOCKED(param)
      42             : #define TTM_DEBUG(fmt, arg...)
      43             : #define TTM_BO_HASH_ORDER 13
      44             : 
      45             : static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
      46             : static void ttm_bo_global_kobj_release(struct kobject *kobj);
      47             : 
      48             : #ifdef notyet
      49             : static struct attribute ttm_bo_count = {
      50             :         .name = "bo_count",
      51             :         .mode = S_IRUGO
      52             : };
      53             : #endif
      54             : 
      55             : struct kobject *
      56           0 : ttm_get_kobj(void)
      57             : {
      58           0 :         return (NULL);
      59             : }
      60             : 
      61           0 : static inline int ttm_mem_type_from_place(const struct ttm_place *place,
      62             :                                           uint32_t *mem_type)
      63             : {
      64             :         int i;
      65             : 
      66           0 :         for (i = 0; i <= TTM_PL_PRIV5; i++)
      67           0 :                 if (place->flags & (1 << i)) {
      68           0 :                         *mem_type = i;
      69           0 :                         return 0;
      70             :                 }
      71           0 :         return -EINVAL;
      72           0 : }
      73             : 
      74           0 : static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
      75             : {
      76           0 :         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
      77             : 
      78           0 :         pr_err("    has_type: %d\n", man->has_type);
      79           0 :         pr_err("    use_type: %d\n", man->use_type);
      80           0 :         pr_err("    flags: 0x%08X\n", man->flags);
      81           0 :         pr_err("    gpu_offset: 0x%08llX\n", man->gpu_offset);
      82           0 :         pr_err("    size: %llu\n", man->size);
      83           0 :         pr_err("    available_caching: 0x%08X\n", man->available_caching);
      84           0 :         pr_err("    default_caching: 0x%08X\n", man->default_caching);
      85           0 :         if (mem_type != TTM_PL_SYSTEM)
      86           0 :                 (*man->func->debug)(man, TTM_PFX);
      87           0 : }
      88             : 
      89           0 : static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
      90             :                                         struct ttm_placement *placement)
      91             : {
      92           0 :         int i, ret, mem_type;
      93             : 
      94           0 :         pr_err("No space for %p (%lu pages, %luK, %luM)\n",
      95             :                bo, bo->mem.num_pages, bo->mem.size >> 10,
      96             :                bo->mem.size >> 20);
      97           0 :         for (i = 0; i < placement->num_placement; i++) {
      98           0 :                 ret = ttm_mem_type_from_place(&placement->placement[i],
      99             :                                                 &mem_type);
     100           0 :                 if (ret)
     101           0 :                         return;
     102           0 :                 pr_err("  placement[%d]=0x%08X (%d)\n",
     103             :                        i, placement->placement[i].flags, mem_type);
     104           0 :                 ttm_mem_type_debug(bo->bdev, mem_type);
     105             :         }
     106           0 : }
     107             : 
     108             : #ifdef notyet
     109             : static ssize_t ttm_bo_global_show(struct kobject *kobj,
     110             :                                   struct attribute *attr,
     111             :                                   char *buffer)
     112             : {
     113             :         struct ttm_bo_global *glob =
     114             :                 container_of(kobj, struct ttm_bo_global, kobj);
     115             : 
     116             :         return snprintf(buffer, PAGE_SIZE, "%lu\n",
     117             :                         (unsigned long) atomic_read(&glob->bo_count));
     118             : }
     119             : 
     120             : static struct attribute *ttm_bo_global_attrs[] = {
     121             :         &ttm_bo_count,
     122             :         NULL
     123             : };
     124             : 
     125             : static const struct sysfs_ops ttm_bo_global_ops = {
     126             :         .show = &ttm_bo_global_show
     127             : };
     128             : #endif
     129             : 
     130             : static struct kobj_type ttm_bo_glob_kobj_type  = {
     131             :         .release = &ttm_bo_global_kobj_release,
     132             : #ifdef __linux__
     133             :         .sysfs_ops = &ttm_bo_global_ops,
     134             :         .default_attrs = ttm_bo_global_attrs
     135             : #endif
     136             : };
     137             : 
     138             : 
     139           0 : static inline uint32_t ttm_bo_type_flags(unsigned type)
     140             : {
     141           0 :         return 1 << (type);
     142             : }
     143             : 
     144           0 : static void ttm_bo_release_list(struct kref *list_kref)
     145             : {
     146             :         struct ttm_buffer_object *bo =
     147           0 :             container_of(list_kref, struct ttm_buffer_object, list_kref);
     148           0 :         struct ttm_bo_device *bdev = bo->bdev;
     149           0 :         size_t acc_size = bo->acc_size;
     150             : 
     151           0 :         BUG_ON(atomic_read(&bo->list_kref.refcount));
     152           0 :         BUG_ON(atomic_read(&bo->kref.refcount));
     153           0 :         BUG_ON(atomic_read(&bo->cpu_writers));
     154           0 :         BUG_ON(bo->mem.mm_node != NULL);
     155           0 :         BUG_ON(!list_empty(&bo->lru));
     156           0 :         BUG_ON(!list_empty(&bo->ddestroy));
     157             : 
     158           0 :         if (bo->ttm)
     159           0 :                 ttm_tt_destroy(bo->ttm);
     160           0 :         atomic_dec(&bo->glob->bo_count);
     161           0 :         if (bo->resv == &bo->ttm_resv)
     162           0 :                 reservation_object_fini(&bo->ttm_resv);
     163             :         mutex_destroy(&bo->wu_mutex);
     164           0 :         if (bo->destroy)
     165           0 :                 bo->destroy(bo);
     166             :         else {
     167           0 :                 kfree(bo);
     168             :         }
     169           0 :         ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
     170           0 : }
     171             : 
     172           0 : void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
     173             : {
     174           0 :         struct ttm_bo_device *bdev = bo->bdev;
     175             :         struct ttm_mem_type_manager *man;
     176             : 
     177             : #ifdef notyet
     178             :         lockdep_assert_held(&bo->resv->lock.base);
     179             : #endif
     180             : 
     181           0 :         if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
     182             : 
     183           0 :                 BUG_ON(!list_empty(&bo->lru));
     184             : 
     185           0 :                 man = &bdev->man[bo->mem.mem_type];
     186           0 :                 list_add_tail(&bo->lru, &man->lru);
     187           0 :                 kref_get(&bo->list_kref);
     188             : 
     189           0 :                 if (bo->ttm != NULL) {
     190           0 :                         list_add_tail(&bo->swap, &bo->glob->swap_lru);
     191           0 :                         kref_get(&bo->list_kref);
     192           0 :                 }
     193             :         }
     194           0 : }
     195             : EXPORT_SYMBOL(ttm_bo_add_to_lru);
     196             : 
     197           0 : int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
     198             : {
     199             :         int put_count = 0;
     200             : 
     201           0 :         if (!list_empty(&bo->swap)) {
     202           0 :                 list_del_init(&bo->swap);
     203             :                 ++put_count;
     204           0 :         }
     205           0 :         if (!list_empty(&bo->lru)) {
     206           0 :                 list_del_init(&bo->lru);
     207           0 :                 ++put_count;
     208           0 :         }
     209             : 
     210             :         /*
     211             :          * TODO: Add a driver hook to delete from
     212             :          * driver-specific LRU's here.
     213             :          */
     214             : 
     215           0 :         return put_count;
     216             : }
     217             : 
     218           0 : static void ttm_bo_ref_bug(struct kref *list_kref)
     219             : {
     220           0 :         BUG();
     221             : }
     222             : 
     223           0 : void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
     224             :                          bool never_free)
     225             : {
     226           0 :         kref_sub(&bo->list_kref, count,
     227           0 :                  (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
     228           0 : }
     229             : 
     230           0 : void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
     231             : {
     232             :         int put_count;
     233             : 
     234           0 :         spin_lock(&bo->glob->lru_lock);
     235           0 :         put_count = ttm_bo_del_from_lru(bo);
     236           0 :         spin_unlock(&bo->glob->lru_lock);
     237           0 :         ttm_bo_list_ref_sub(bo, put_count, true);
     238           0 : }
     239             : EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
     240             : 
     241             : /*
     242             :  * Call bo->mutex locked.
     243             :  */
     244           0 : static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
     245             : {
     246           0 :         struct ttm_bo_device *bdev = bo->bdev;
     247           0 :         struct ttm_bo_global *glob = bo->glob;
     248             :         int ret = 0;
     249             :         uint32_t page_flags = 0;
     250             : 
     251             :         TTM_ASSERT_LOCKED(&bo->mutex);
     252           0 :         bo->ttm = NULL;
     253             : 
     254           0 :         if (bdev->need_dma32)
     255           0 :                 page_flags |= TTM_PAGE_FLAG_DMA32;
     256             : 
     257           0 :         switch (bo->type) {
     258             :         case ttm_bo_type_device:
     259           0 :                 if (zero_alloc)
     260           0 :                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
     261             :         case ttm_bo_type_kernel:
     262           0 :                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
     263           0 :                                                       page_flags, glob->dummy_read_page);
     264           0 :                 if (unlikely(bo->ttm == NULL))
     265           0 :                         ret = -ENOMEM;
     266             :                 break;
     267             :         case ttm_bo_type_sg:
     268           0 :                 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
     269           0 :                                                       page_flags | TTM_PAGE_FLAG_SG,
     270           0 :                                                       glob->dummy_read_page);
     271           0 :                 if (unlikely(bo->ttm == NULL)) {
     272             :                         ret = -ENOMEM;
     273           0 :                         break;
     274             :                 }
     275           0 :                 bo->ttm->sg = bo->sg;
     276           0 :                 break;
     277             :         default:
     278           0 :                 pr_err("Illegal buffer object type\n");
     279             :                 ret = -EINVAL;
     280           0 :                 break;
     281             :         }
     282             : 
     283           0 :         return ret;
     284             : }
     285             : 
     286           0 : static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
     287             :                                   struct ttm_mem_reg *mem,
     288             :                                   bool evict, bool interruptible,
     289             :                                   bool no_wait_gpu)
     290             : {
     291           0 :         struct ttm_bo_device *bdev = bo->bdev;
     292           0 :         bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
     293           0 :         bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
     294           0 :         struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
     295           0 :         struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
     296             :         int ret = 0;
     297             : 
     298           0 :         if (old_is_pci || new_is_pci ||
     299           0 :             ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
     300           0 :                 ret = ttm_mem_io_lock(old_man, true);
     301           0 :                 if (unlikely(ret != 0))
     302             :                         goto out_err;
     303           0 :                 ttm_bo_unmap_virtual_locked(bo);
     304           0 :                 ttm_mem_io_unlock(old_man);
     305           0 :         }
     306             : 
     307             :         /*
     308             :          * Create and bind a ttm if required.
     309             :          */
     310             : 
     311           0 :         if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
     312           0 :                 if (bo->ttm == NULL) {
     313           0 :                         bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
     314           0 :                         ret = ttm_bo_add_ttm(bo, zero);
     315           0 :                         if (ret)
     316           0 :                                 goto out_err;
     317           0 :                 }
     318             : 
     319           0 :                 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
     320           0 :                 if (ret)
     321             :                         goto out_err;
     322             : 
     323           0 :                 if (mem->mem_type != TTM_PL_SYSTEM) {
     324           0 :                         ret = ttm_tt_bind(bo->ttm, mem);
     325           0 :                         if (ret)
     326             :                                 goto out_err;
     327             :                 }
     328             : 
     329           0 :                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
     330           0 :                         if (bdev->driver->move_notify)
     331           0 :                                 bdev->driver->move_notify(bo, mem);
     332           0 :                         bo->mem = *mem;
     333           0 :                         mem->mm_node = NULL;
     334           0 :                         goto moved;
     335             :                 }
     336             :         }
     337             : 
     338           0 :         if (bdev->driver->move_notify)
     339           0 :                 bdev->driver->move_notify(bo, mem);
     340             : 
     341           0 :         if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
     342           0 :             !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
     343           0 :                 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
     344           0 :         else if (bdev->driver->move)
     345           0 :                 ret = bdev->driver->move(bo, evict, interruptible,
     346             :                                          no_wait_gpu, mem);
     347             :         else
     348           0 :                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
     349             : 
     350           0 :         if (ret) {
     351           0 :                 if (bdev->driver->move_notify) {
     352           0 :                         struct ttm_mem_reg tmp_mem = *mem;
     353           0 :                         *mem = bo->mem;
     354           0 :                         bo->mem = tmp_mem;
     355           0 :                         bdev->driver->move_notify(bo, mem);
     356           0 :                         bo->mem = *mem;
     357           0 :                         *mem = tmp_mem;
     358           0 :                 }
     359             : 
     360             :                 goto out_err;
     361             :         }
     362             : 
     363             : moved:
     364           0 :         if (bo->evicted) {
     365           0 :                 if (bdev->driver->invalidate_caches) {
     366           0 :                         ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
     367           0 :                         if (ret)
     368           0 :                                 pr_err("Can not flush read caches\n");
     369             :                 }
     370           0 :                 bo->evicted = false;
     371           0 :         }
     372             : 
     373           0 :         if (bo->mem.mm_node) {
     374           0 :                 bo->offset = (bo->mem.start << PAGE_SHIFT) +
     375           0 :                     bdev->man[bo->mem.mem_type].gpu_offset;
     376           0 :                 bo->cur_placement = bo->mem.placement;
     377           0 :         } else
     378           0 :                 bo->offset = 0;
     379             : 
     380           0 :         return 0;
     381             : 
     382             : out_err:
     383           0 :         new_man = &bdev->man[bo->mem.mem_type];
     384           0 :         if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
     385           0 :                 ttm_tt_unbind(bo->ttm);
     386           0 :                 ttm_tt_destroy(bo->ttm);
     387           0 :                 bo->ttm = NULL;
     388           0 :         }
     389             : 
     390           0 :         return ret;
     391           0 : }
     392             : 
     393             : /**
     394             :  * Call bo::reserved.
     395             :  * Will release GPU memory type usage on destruction.
     396             :  * This is the place to put in driver specific hooks to release
     397             :  * driver private resources.
     398             :  * Will release the bo::reserved lock.
     399             :  */
     400             : 
     401           0 : static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
     402             : {
     403           0 :         if (bo->bdev->driver->move_notify)
     404           0 :                 bo->bdev->driver->move_notify(bo, NULL);
     405             : 
     406           0 :         if (bo->ttm) {
     407           0 :                 ttm_tt_unbind(bo->ttm);
     408           0 :                 ttm_tt_destroy(bo->ttm);
     409           0 :                 bo->ttm = NULL;
     410           0 :         }
     411           0 :         ttm_bo_mem_put(bo, &bo->mem);
     412             : 
     413           0 :         ww_mutex_unlock (&bo->resv->lock);
     414           0 : }
     415             : 
     416           0 : static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
     417             : {
     418             :         struct reservation_object_list *fobj;
     419             :         struct fence *fence;
     420             :         int i;
     421             : 
     422           0 :         fobj = reservation_object_get_list(bo->resv);
     423           0 :         fence = reservation_object_get_excl(bo->resv);
     424           0 :         if (fence && !fence->ops->signaled)
     425           0 :                 fence_enable_sw_signaling(fence);
     426             : 
     427           0 :         for (i = 0; fobj && i < fobj->shared_count; ++i) {
     428           0 :                 fence = rcu_dereference_protected(fobj->shared[i],
     429             :                                         reservation_object_held(bo->resv));
     430             : 
     431           0 :                 if (!fence->ops->signaled)
     432           0 :                         fence_enable_sw_signaling(fence);
     433             :         }
     434           0 : }
     435             : 
     436           0 : static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
     437             : {
     438           0 :         struct ttm_bo_device *bdev = bo->bdev;
     439           0 :         struct ttm_bo_global *glob = bo->glob;
     440             :         int put_count;
     441             :         int ret;
     442             : 
     443           0 :         spin_lock(&glob->lru_lock);
     444           0 :         ret = __ttm_bo_reserve(bo, false, true, false, NULL);
     445             : 
     446           0 :         if (!ret) {
     447           0 :                 if (!ttm_bo_wait(bo, false, false, true)) {
     448           0 :                         put_count = ttm_bo_del_from_lru(bo);
     449             : 
     450           0 :                         spin_unlock(&glob->lru_lock);
     451           0 :                         ttm_bo_cleanup_memtype_use(bo);
     452             : 
     453           0 :                         ttm_bo_list_ref_sub(bo, put_count, true);
     454             : 
     455           0 :                         return;
     456             :                 } else
     457           0 :                         ttm_bo_flush_all_fences(bo);
     458             : 
     459             :                 /*
     460             :                  * Make NO_EVICT bos immediately available to
     461             :                  * shrinkers, now that they are queued for
     462             :                  * destruction.
     463             :                  */
     464           0 :                 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
     465           0 :                         bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
     466           0 :                         ttm_bo_add_to_lru(bo);
     467           0 :                 }
     468             : 
     469           0 :                 __ttm_bo_unreserve(bo);
     470           0 :         }
     471             : 
     472           0 :         kref_get(&bo->list_kref);
     473           0 :         list_add_tail(&bo->ddestroy, &bdev->ddestroy);
     474           0 :         spin_unlock(&glob->lru_lock);
     475             : 
     476           0 :         schedule_delayed_work(&bdev->wq,
     477           0 :                               ((HZ / 100) < 1) ? 1 : HZ / 100);
     478           0 : }
     479             : 
     480             : /**
     481             :  * function ttm_bo_cleanup_refs_and_unlock
     482             :  * If bo idle, remove from delayed- and lru lists, and unref.
     483             :  * If not idle, do nothing.
     484             :  *
     485             :  * Must be called with lru_lock and reservation held, this function
     486             :  * will drop both before returning.
     487             :  *
     488             :  * @interruptible         Any sleeps should occur interruptibly.
     489             :  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
     490             :  */
     491             : 
     492           0 : static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
     493             :                                           bool interruptible,
     494             :                                           bool no_wait_gpu)
     495             : {
     496           0 :         struct ttm_bo_global *glob = bo->glob;
     497             :         int put_count;
     498             :         int ret;
     499             : 
     500           0 :         ret = ttm_bo_wait(bo, false, false, true);
     501             : 
     502           0 :         if (ret && !no_wait_gpu) {
     503             :                 long lret;
     504           0 :                 ww_mutex_unlock(&bo->resv->lock);
     505           0 :                 spin_unlock(&glob->lru_lock);
     506             : 
     507           0 :                 lret = reservation_object_wait_timeout_rcu(bo->resv,
     508             :                                                            true,
     509             :                                                            interruptible,
     510           0 :                                                            30 * HZ);
     511             : 
     512           0 :                 if (lret < 0)
     513           0 :                         return lret;
     514           0 :                 else if (lret == 0)
     515           0 :                         return -EBUSY;
     516             : 
     517           0 :                 spin_lock(&glob->lru_lock);
     518           0 :                 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
     519             : 
     520             :                 /*
     521             :                  * We raced, and lost, someone else holds the reservation now,
     522             :                  * and is probably busy in ttm_bo_cleanup_memtype_use.
     523             :                  *
     524             :                  * Even if it's not the case, because we finished waiting any
     525             :                  * delayed destruction would succeed, so just return success
     526             :                  * here.
     527             :                  */
     528           0 :                 if (ret) {
     529           0 :                         spin_unlock(&glob->lru_lock);
     530           0 :                         return 0;
     531             :                 }
     532             : 
     533             :                 /*
     534             :                  * remove sync_obj with ttm_bo_wait, the wait should be
     535             :                  * finished, and no new wait object should have been added.
     536             :                  */
     537           0 :                 ret = ttm_bo_wait(bo, false, false, true);
     538           0 :                 WARN_ON(ret);
     539           0 :         }
     540             : 
     541           0 :         if (ret || unlikely(list_empty(&bo->ddestroy))) {
     542           0 :                 __ttm_bo_unreserve(bo);
     543           0 :                 spin_unlock(&glob->lru_lock);
     544           0 :                 return ret;
     545             :         }
     546             : 
     547           0 :         put_count = ttm_bo_del_from_lru(bo);
     548           0 :         list_del_init(&bo->ddestroy);
     549           0 :         ++put_count;
     550             : 
     551           0 :         spin_unlock(&glob->lru_lock);
     552           0 :         ttm_bo_cleanup_memtype_use(bo);
     553             : 
     554           0 :         ttm_bo_list_ref_sub(bo, put_count, true);
     555             : 
     556           0 :         return 0;
     557           0 : }
     558             : 
     559             : /**
     560             :  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
     561             :  * encountered buffers.
     562             :  */
     563             : 
     564           0 : static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
     565             : {
     566           0 :         struct ttm_bo_global *glob = bdev->glob;
     567             :         struct ttm_buffer_object *entry = NULL;
     568             :         int ret = 0;
     569             : 
     570           0 :         spin_lock(&glob->lru_lock);
     571           0 :         if (list_empty(&bdev->ddestroy))
     572             :                 goto out_unlock;
     573             : 
     574           0 :         entry = list_first_entry(&bdev->ddestroy,
     575             :                 struct ttm_buffer_object, ddestroy);
     576           0 :         kref_get(&entry->list_kref);
     577             : 
     578           0 :         for (;;) {
     579             :                 struct ttm_buffer_object *nentry = NULL;
     580             : 
     581           0 :                 if (entry->ddestroy.next != &bdev->ddestroy) {
     582           0 :                         nentry = list_first_entry(&entry->ddestroy,
     583             :                                 struct ttm_buffer_object, ddestroy);
     584           0 :                         kref_get(&nentry->list_kref);
     585           0 :                 }
     586             : 
     587           0 :                 ret = __ttm_bo_reserve(entry, false, true, false, NULL);
     588           0 :                 if (remove_all && ret) {
     589           0 :                         spin_unlock(&glob->lru_lock);
     590           0 :                         ret = __ttm_bo_reserve(entry, false, false,
     591             :                                                false, NULL);
     592           0 :                         spin_lock(&glob->lru_lock);
     593           0 :                 }
     594             : 
     595           0 :                 if (!ret)
     596           0 :                         ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
     597           0 :                                                              !remove_all);
     598             :                 else
     599           0 :                         spin_unlock(&glob->lru_lock);
     600             : 
     601           0 :                 kref_put(&entry->list_kref, ttm_bo_release_list);
     602             :                 entry = nentry;
     603             : 
     604           0 :                 if (ret || !entry)
     605           0 :                         goto out;
     606             : 
     607           0 :                 spin_lock(&glob->lru_lock);
     608           0 :                 if (list_empty(&entry->ddestroy))
     609           0 :                         break;
     610           0 :         }
     611             : 
     612             : out_unlock:
     613           0 :         spin_unlock(&glob->lru_lock);
     614             : out:
     615           0 :         if (entry)
     616           0 :                 kref_put(&entry->list_kref, ttm_bo_release_list);
     617           0 :         return ret;
     618           0 : }
     619             : 
     620           0 : static void ttm_bo_delayed_workqueue(struct work_struct *work)
     621             : {
     622             :         struct ttm_bo_device *bdev =
     623           0 :             container_of(work, struct ttm_bo_device, wq.work);
     624             : 
     625           0 :         if (ttm_bo_delayed_delete(bdev, false)) {
     626           0 :                 schedule_delayed_work(&bdev->wq,
     627           0 :                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
     628           0 :         }
     629           0 : }
     630             : 
     631           0 : static void ttm_bo_release(struct kref *kref)
     632             : {
     633             :         struct ttm_buffer_object *bo =
     634           0 :             container_of(kref, struct ttm_buffer_object, kref);
     635           0 :         struct ttm_bo_device *bdev = bo->bdev;
     636           0 :         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
     637             : 
     638           0 :         drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
     639           0 :         ttm_mem_io_lock(man, false);
     640           0 :         ttm_mem_io_free_vm(bo);
     641           0 :         ttm_mem_io_unlock(man);
     642           0 :         ttm_bo_cleanup_refs_or_queue(bo);
     643           0 :         kref_put(&bo->list_kref, ttm_bo_release_list);
     644           0 : }
     645             : 
     646           0 : void ttm_bo_unref(struct ttm_buffer_object **p_bo)
     647             : {
     648           0 :         struct ttm_buffer_object *bo = *p_bo;
     649             : 
     650           0 :         *p_bo = NULL;
     651           0 :         kref_put(&bo->kref, ttm_bo_release);
     652           0 : }
     653             : EXPORT_SYMBOL(ttm_bo_unref);
     654             : 
     655           0 : int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
     656             : {
     657           0 :         return cancel_delayed_work_sync(&bdev->wq);
     658             : }
     659             : EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
     660             : 
     661           0 : void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
     662             : {
     663           0 :         if (resched)
     664           0 :                 schedule_delayed_work(&bdev->wq,
     665           0 :                                       ((HZ / 100) < 1) ? 1 : HZ / 100);
     666           0 : }
     667             : EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
     668             : 
     669           0 : static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
     670             :                         bool no_wait_gpu)
     671             : {
     672           0 :         struct ttm_bo_device *bdev = bo->bdev;
     673           0 :         struct ttm_mem_reg evict_mem;
     674           0 :         struct ttm_placement placement;
     675             :         int ret = 0;
     676             : 
     677           0 :         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
     678             : 
     679           0 :         if (unlikely(ret != 0)) {
     680           0 :                 if (ret != -ERESTARTSYS) {
     681           0 :                         pr_err("Failed to expire sync object before buffer eviction\n");
     682           0 :                 }
     683             :                 goto out;
     684             :         }
     685             : 
     686             : #ifdef notyet
     687             :         lockdep_assert_held(&bo->resv->lock.base);
     688             : #endif
     689             : 
     690           0 :         evict_mem = bo->mem;
     691           0 :         evict_mem.mm_node = NULL;
     692           0 :         evict_mem.bus.io_reserved_vm = false;
     693           0 :         evict_mem.bus.io_reserved_count = 0;
     694             : 
     695           0 :         placement.num_placement = 0;
     696           0 :         placement.num_busy_placement = 0;
     697           0 :         bdev->driver->evict_flags(bo, &placement);
     698           0 :         ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
     699             :                                 no_wait_gpu);
     700           0 :         if (ret) {
     701           0 :                 if (ret != -ERESTARTSYS) {
     702           0 :                         pr_err("Failed to find memory space for buffer 0x%p eviction\n",
     703             :                                bo);
     704           0 :                         ttm_bo_mem_space_debug(bo, &placement);
     705           0 :                 }
     706             :                 goto out;
     707             :         }
     708             : 
     709           0 :         ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
     710             :                                      no_wait_gpu);
     711           0 :         if (ret) {
     712           0 :                 if (ret != -ERESTARTSYS)
     713           0 :                         pr_err("Buffer eviction failed\n");
     714           0 :                 ttm_bo_mem_put(bo, &evict_mem);
     715           0 :                 goto out;
     716             :         }
     717           0 :         bo->evicted = true;
     718             : out:
     719           0 :         return ret;
     720           0 : }
     721             : 
     722           0 : static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
     723             :                                 uint32_t mem_type,
     724             :                                 const struct ttm_place *place,
     725             :                                 bool interruptible,
     726             :                                 bool no_wait_gpu)
     727             : {
     728           0 :         struct ttm_bo_global *glob = bdev->glob;
     729           0 :         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
     730             :         struct ttm_buffer_object *bo;
     731             :         int ret = -EBUSY, put_count;
     732             : 
     733           0 :         spin_lock(&glob->lru_lock);
     734           0 :         list_for_each_entry(bo, &man->lru, lru) {
     735           0 :                 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
     736           0 :                 if (!ret) {
     737           0 :                         if (place && (place->fpfn || place->lpfn)) {
     738             :                                 /* Don't evict this BO if it's outside of the
     739             :                                  * requested placement range
     740             :                                  */
     741           0 :                                 if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
     742           0 :                                     (place->lpfn && place->lpfn <= bo->mem.start)) {
     743           0 :                                         __ttm_bo_unreserve(bo);
     744             :                                         ret = -EBUSY;
     745           0 :                                         continue;
     746             :                                 }
     747             :                         }
     748             : 
     749             :                         break;
     750             :                 }
     751             :         }
     752             : 
     753           0 :         if (ret) {
     754           0 :                 spin_unlock(&glob->lru_lock);
     755           0 :                 return ret;
     756             :         }
     757             : 
     758           0 :         kref_get(&bo->list_kref);
     759             : 
     760           0 :         if (!list_empty(&bo->ddestroy)) {
     761           0 :                 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
     762             :                                                      no_wait_gpu);
     763           0 :                 kref_put(&bo->list_kref, ttm_bo_release_list);
     764           0 :                 return ret;
     765             :         }
     766             : 
     767           0 :         put_count = ttm_bo_del_from_lru(bo);
     768           0 :         spin_unlock(&glob->lru_lock);
     769             : 
     770           0 :         BUG_ON(ret != 0);
     771             : 
     772           0 :         ttm_bo_list_ref_sub(bo, put_count, true);
     773             : 
     774           0 :         ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
     775           0 :         ttm_bo_unreserve(bo);
     776             : 
     777           0 :         kref_put(&bo->list_kref, ttm_bo_release_list);
     778           0 :         return ret;
     779           0 : }
     780             : 
     781           0 : void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
     782             : {
     783           0 :         struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
     784             : 
     785           0 :         if (mem->mm_node)
     786           0 :                 (*man->func->put_node)(man, mem);
     787           0 : }
     788             : EXPORT_SYMBOL(ttm_bo_mem_put);
     789             : 
     790             : /**
     791             :  * Repeatedly evict memory from the LRU for @mem_type until we create enough
     792             :  * space, or we've evicted everything and there isn't enough space.
     793             :  */
     794           0 : static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
     795             :                                         uint32_t mem_type,
     796             :                                         const struct ttm_place *place,
     797             :                                         struct ttm_mem_reg *mem,
     798             :                                         bool interruptible,
     799             :                                         bool no_wait_gpu)
     800             : {
     801           0 :         struct ttm_bo_device *bdev = bo->bdev;
     802           0 :         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
     803             :         int ret;
     804             : 
     805           0 :         do {
     806           0 :                 ret = (*man->func->get_node)(man, bo, place, mem);
     807           0 :                 if (unlikely(ret != 0))
     808           0 :                         return ret;
     809           0 :                 if (mem->mm_node)
     810             :                         break;
     811           0 :                 ret = ttm_mem_evict_first(bdev, mem_type, place,
     812             :                                           interruptible, no_wait_gpu);
     813           0 :                 if (unlikely(ret != 0))
     814           0 :                         return ret;
     815           0 :         } while (1);
     816           0 :         if (mem->mm_node == NULL)
     817           0 :                 return -ENOMEM;
     818           0 :         mem->mem_type = mem_type;
     819           0 :         return 0;
     820           0 : }
     821             : 
     822           0 : static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
     823             :                                       uint32_t cur_placement,
     824             :                                       uint32_t proposed_placement)
     825             : {
     826           0 :         uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
     827           0 :         uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
     828             : 
     829             :         /**
     830             :          * Keep current caching if possible.
     831             :          */
     832             : 
     833           0 :         if ((cur_placement & caching) != 0)
     834           0 :                 result |= (cur_placement & caching);
     835           0 :         else if ((man->default_caching & caching) != 0)
     836           0 :                 result |= man->default_caching;
     837           0 :         else if ((TTM_PL_FLAG_CACHED & caching) != 0)
     838           0 :                 result |= TTM_PL_FLAG_CACHED;
     839           0 :         else if ((TTM_PL_FLAG_WC & caching) != 0)
     840           0 :                 result |= TTM_PL_FLAG_WC;
     841           0 :         else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
     842           0 :                 result |= TTM_PL_FLAG_UNCACHED;
     843             : 
     844           0 :         return result;
     845             : }
     846             : 
     847           0 : static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
     848             :                                  uint32_t mem_type,
     849             :                                  const struct ttm_place *place,
     850             :                                  uint32_t *masked_placement)
     851             : {
     852           0 :         uint32_t cur_flags = ttm_bo_type_flags(mem_type);
     853             : 
     854           0 :         if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
     855           0 :                 return false;
     856             : 
     857           0 :         if ((place->flags & man->available_caching) == 0)
     858           0 :                 return false;
     859             : 
     860           0 :         cur_flags |= (place->flags & man->available_caching);
     861             : 
     862           0 :         *masked_placement = cur_flags;
     863           0 :         return true;
     864           0 : }
     865             : 
     866             : /**
     867             :  * Creates space for memory region @mem according to its type.
     868             :  *
     869             :  * This function first searches for free space in compatible memory types in
     870             :  * the priority order defined by the driver.  If free space isn't found, then
     871             :  * ttm_bo_mem_force_space is attempted in priority order to evict and find
     872             :  * space.
     873             :  */
     874           0 : int ttm_bo_mem_space(struct ttm_buffer_object *bo,
     875             :                         struct ttm_placement *placement,
     876             :                         struct ttm_mem_reg *mem,
     877             :                         bool interruptible,
     878             :                         bool no_wait_gpu)
     879             : {
     880           0 :         struct ttm_bo_device *bdev = bo->bdev;
     881             :         struct ttm_mem_type_manager *man;
     882           0 :         uint32_t mem_type = TTM_PL_SYSTEM;
     883           0 :         uint32_t cur_flags = 0;
     884             :         bool type_found = false;
     885             :         bool type_ok = false;
     886             :         bool has_erestartsys = false;
     887             :         int i, ret;
     888             : 
     889           0 :         mem->mm_node = NULL;
     890           0 :         for (i = 0; i < placement->num_placement; ++i) {
     891           0 :                 const struct ttm_place *place = &placement->placement[i];
     892             : 
     893           0 :                 ret = ttm_mem_type_from_place(place, &mem_type);
     894           0 :                 if (ret)
     895           0 :                         return ret;
     896           0 :                 man = &bdev->man[mem_type];
     897           0 :                 if (!man->has_type || !man->use_type)
     898           0 :                         continue;
     899             : 
     900           0 :                 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
     901             :                                                 &cur_flags);
     902             : 
     903           0 :                 if (!type_ok)
     904           0 :                         continue;
     905             : 
     906             :                 type_found = true;
     907           0 :                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
     908           0 :                                                   cur_flags);
     909             :                 /*
     910             :                  * Use the access and other non-mapping-related flag bits from
     911             :                  * the memory placement flags to the current flags
     912             :                  */
     913           0 :                 ttm_flag_masked(&cur_flags, place->flags,
     914             :                                 ~TTM_PL_MASK_MEMTYPE);
     915             : 
     916           0 :                 if (mem_type == TTM_PL_SYSTEM)
     917           0 :                         break;
     918             : 
     919           0 :                 ret = (*man->func->get_node)(man, bo, place, mem);
     920           0 :                 if (unlikely(ret))
     921           0 :                         return ret;
     922             :                 
     923           0 :                 if (mem->mm_node)
     924           0 :                         break;
     925           0 :         }
     926             : 
     927           0 :         if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
     928           0 :                 mem->mem_type = mem_type;
     929           0 :                 mem->placement = cur_flags;
     930           0 :                 return 0;
     931             :         }
     932             : 
     933           0 :         for (i = 0; i < placement->num_busy_placement; ++i) {
     934           0 :                 const struct ttm_place *place = &placement->busy_placement[i];
     935             : 
     936           0 :                 ret = ttm_mem_type_from_place(place, &mem_type);
     937           0 :                 if (ret)
     938           0 :                         return ret;
     939           0 :                 man = &bdev->man[mem_type];
     940           0 :                 if (!man->has_type || !man->use_type)
     941           0 :                         continue;
     942           0 :                 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
     943           0 :                         continue;
     944             : 
     945             :                 type_found = true;
     946           0 :                 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
     947           0 :                                                   cur_flags);
     948             :                 /*
     949             :                  * Use the access and other non-mapping-related flag bits from
     950             :                  * the memory placement flags to the current flags
     951             :                  */
     952           0 :                 ttm_flag_masked(&cur_flags, place->flags,
     953             :                                 ~TTM_PL_MASK_MEMTYPE);
     954             : 
     955           0 :                 if (mem_type == TTM_PL_SYSTEM) {
     956           0 :                         mem->mem_type = mem_type;
     957           0 :                         mem->placement = cur_flags;
     958           0 :                         mem->mm_node = NULL;
     959           0 :                         return 0;
     960             :                 }
     961             : 
     962           0 :                 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
     963             :                                                 interruptible, no_wait_gpu);
     964           0 :                 if (ret == 0 && mem->mm_node) {
     965           0 :                         mem->placement = cur_flags;
     966           0 :                         return 0;
     967             :                 }
     968           0 :                 if (ret == -ERESTARTSYS)
     969           0 :                         has_erestartsys = true;
     970           0 :         }
     971             : 
     972           0 :         if (!type_found) {
     973           0 :                 printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
     974           0 :                 return -EINVAL;
     975             :         }
     976             : 
     977           0 :         return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
     978           0 : }
     979             : EXPORT_SYMBOL(ttm_bo_mem_space);
     980             : 
     981           0 : static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
     982             :                         struct ttm_placement *placement,
     983             :                         bool interruptible,
     984             :                         bool no_wait_gpu)
     985             : {
     986             :         int ret = 0;
     987           0 :         struct ttm_mem_reg mem;
     988             : 
     989             : #ifdef notyet
     990             :         lockdep_assert_held(&bo->resv->lock.base);
     991             : #endif
     992             : 
     993             :         /*
     994             :          * FIXME: It's possible to pipeline buffer moves.
     995             :          * Have the driver move function wait for idle when necessary,
     996             :          * instead of doing it here.
     997             :          */
     998           0 :         ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
     999           0 :         if (ret)
    1000           0 :                 return ret;
    1001           0 :         mem.num_pages = bo->num_pages;
    1002           0 :         mem.size = mem.num_pages << PAGE_SHIFT;
    1003           0 :         mem.page_alignment = bo->mem.page_alignment;
    1004           0 :         mem.bus.io_reserved_vm = false;
    1005           0 :         mem.bus.io_reserved_count = 0;
    1006             :         /*
    1007             :          * Determine where to move the buffer.
    1008             :          */
    1009           0 :         ret = ttm_bo_mem_space(bo, placement, &mem,
    1010             :                                interruptible, no_wait_gpu);
    1011           0 :         if (ret)
    1012             :                 goto out_unlock;
    1013           0 :         ret = ttm_bo_handle_move_mem(bo, &mem, false,
    1014             :                                      interruptible, no_wait_gpu);
    1015             : out_unlock:
    1016           0 :         if (ret && mem.mm_node)
    1017           0 :                 ttm_bo_mem_put(bo, &mem);
    1018           0 :         return ret;
    1019           0 : }
    1020             : 
    1021           0 : bool ttm_bo_mem_compat(struct ttm_placement *placement,
    1022             :                        struct ttm_mem_reg *mem,
    1023             :                        uint32_t *new_flags)
    1024             : {
    1025             :         int i;
    1026             : 
    1027           0 :         for (i = 0; i < placement->num_placement; i++) {
    1028           0 :                 const struct ttm_place *heap = &placement->placement[i];
    1029           0 :                 if (mem->mm_node &&
    1030           0 :                     (mem->start < heap->fpfn ||
    1031           0 :                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
    1032           0 :                         continue;
    1033             : 
    1034           0 :                 *new_flags = heap->flags;
    1035           0 :                 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
    1036           0 :                     (*new_flags & mem->placement & TTM_PL_MASK_MEM))
    1037           0 :                         return true;
    1038           0 :         }
    1039             : 
    1040           0 :         for (i = 0; i < placement->num_busy_placement; i++) {
    1041           0 :                 const struct ttm_place *heap = &placement->busy_placement[i];
    1042           0 :                 if (mem->mm_node &&
    1043           0 :                     (mem->start < heap->fpfn ||
    1044           0 :                      (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
    1045           0 :                         continue;
    1046             : 
    1047           0 :                 *new_flags = heap->flags;
    1048           0 :                 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
    1049           0 :                     (*new_flags & mem->placement & TTM_PL_MASK_MEM))
    1050           0 :                         return true;
    1051           0 :         }
    1052             : 
    1053           0 :         return false;
    1054           0 : }
    1055             : EXPORT_SYMBOL(ttm_bo_mem_compat);
    1056             : 
    1057           0 : int ttm_bo_validate(struct ttm_buffer_object *bo,
    1058             :                         struct ttm_placement *placement,
    1059             :                         bool interruptible,
    1060             :                         bool no_wait_gpu)
    1061             : {
    1062             :         int ret;
    1063           0 :         uint32_t new_flags;
    1064             : 
    1065             : #ifdef notyet
    1066             :         lockdep_assert_held(&bo->resv->lock.base);
    1067             : #endif
    1068             :         /*
    1069             :          * Check whether we need to move buffer.
    1070             :          */
    1071           0 :         if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
    1072           0 :                 ret = ttm_bo_move_buffer(bo, placement, interruptible,
    1073             :                                          no_wait_gpu);
    1074           0 :                 if (ret)
    1075           0 :                         return ret;
    1076             :         } else {
    1077             :                 /*
    1078             :                  * Use the access and other non-mapping-related flag bits from
    1079             :                  * the compatible memory placement flags to the active flags
    1080             :                  */
    1081           0 :                 ttm_flag_masked(&bo->mem.placement, new_flags,
    1082             :                                 ~TTM_PL_MASK_MEMTYPE);
    1083             :         }
    1084             :         /*
    1085             :          * We might need to add a TTM.
    1086             :          */
    1087           0 :         if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
    1088           0 :                 ret = ttm_bo_add_ttm(bo, true);
    1089           0 :                 if (ret)
    1090           0 :                         return ret;
    1091             :         }
    1092           0 :         return 0;
    1093           0 : }
    1094             : EXPORT_SYMBOL(ttm_bo_validate);
    1095             : 
    1096           0 : int ttm_bo_init(struct ttm_bo_device *bdev,
    1097             :                 struct ttm_buffer_object *bo,
    1098             :                 unsigned long size,
    1099             :                 enum ttm_bo_type type,
    1100             :                 struct ttm_placement *placement,
    1101             :                 uint32_t page_alignment,
    1102             :                 bool interruptible,
    1103             :                 struct uvm_object *persistent_swap_storage,
    1104             :                 size_t acc_size,
    1105             :                 struct sg_table *sg,
    1106             :                 struct reservation_object *resv,
    1107             :                 void (*destroy) (struct ttm_buffer_object *))
    1108             : {
    1109             :         int ret = 0;
    1110             :         unsigned long num_pages;
    1111           0 :         struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
    1112             :         bool locked;
    1113             : 
    1114           0 :         ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
    1115           0 :         if (ret) {
    1116           0 :                 pr_err("Out of kernel memory\n");
    1117           0 :                 if (destroy)
    1118           0 :                         (*destroy)(bo);
    1119             :                 else
    1120           0 :                         kfree(bo);
    1121           0 :                 return -ENOMEM;
    1122             :         }
    1123             : 
    1124           0 :         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
    1125           0 :         if (num_pages == 0) {
    1126           0 :                 pr_err("Illegal buffer object size\n");
    1127           0 :                 if (destroy)
    1128           0 :                         (*destroy)(bo);
    1129             :                 else
    1130           0 :                         kfree(bo);
    1131           0 :                 ttm_mem_global_free(mem_glob, acc_size);
    1132           0 :                 return -EINVAL;
    1133             :         }
    1134           0 :         bo->destroy = destroy;
    1135             : 
    1136           0 :         uvm_objinit(&bo->uobj, NULL, 0);
    1137           0 :         kref_init(&bo->kref);
    1138           0 :         kref_init(&bo->list_kref);
    1139           0 :         atomic_set(&bo->cpu_writers, 0);
    1140           0 :         INIT_LIST_HEAD(&bo->lru);
    1141           0 :         INIT_LIST_HEAD(&bo->ddestroy);
    1142           0 :         INIT_LIST_HEAD(&bo->swap);
    1143           0 :         INIT_LIST_HEAD(&bo->io_reserve_lru);
    1144           0 :         rw_init(&bo->wu_mutex, "ttmwu");
    1145           0 :         bo->bdev = bdev;
    1146           0 :         bo->glob = bdev->glob;
    1147           0 :         bo->type = type;
    1148           0 :         bo->num_pages = num_pages;
    1149           0 :         bo->mem.size = num_pages << PAGE_SHIFT;
    1150           0 :         bo->mem.mem_type = TTM_PL_SYSTEM;
    1151           0 :         bo->mem.num_pages = bo->num_pages;
    1152           0 :         bo->mem.mm_node = NULL;
    1153           0 :         bo->mem.page_alignment = page_alignment;
    1154           0 :         bo->mem.bus.io_reserved_vm = false;
    1155           0 :         bo->mem.bus.io_reserved_count = 0;
    1156           0 :         bo->priv_flags = 0;
    1157           0 :         bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
    1158           0 :         bo->persistent_swap_storage = persistent_swap_storage;
    1159           0 :         bo->acc_size = acc_size;
    1160           0 :         bo->sg = sg;
    1161           0 :         if (resv) {
    1162           0 :                 bo->resv = resv;
    1163             : #ifdef notyet
    1164             :                 lockdep_assert_held(&bo->resv->lock.base);
    1165             : #endif
    1166           0 :         } else {
    1167           0 :                 bo->resv = &bo->ttm_resv;
    1168           0 :                 reservation_object_init(&bo->ttm_resv);
    1169             :         }
    1170           0 :         atomic_inc(&bo->glob->bo_count);
    1171           0 :         drm_vma_node_reset(&bo->vma_node);
    1172             : 
    1173             :         /*
    1174             :          * For ttm_bo_type_device buffers, allocate
    1175             :          * address space from the device.
    1176             :          */
    1177           0 :         if (bo->type == ttm_bo_type_device ||
    1178           0 :             bo->type == ttm_bo_type_sg)
    1179           0 :                 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
    1180           0 :                                          bo->mem.num_pages);
    1181             : 
    1182             :         /* passed reservation objects should already be locked,
    1183             :          * since otherwise lockdep will be angered in radeon.
    1184             :          */
    1185           0 :         if (!resv) {
    1186           0 :                 locked = ww_mutex_trylock(&bo->resv->lock);
    1187           0 :                 WARN_ON(!locked);
    1188           0 :         }
    1189             : 
    1190           0 :         if (likely(!ret))
    1191           0 :                 ret = ttm_bo_validate(bo, placement, interruptible, false);
    1192             : 
    1193           0 :         if (!resv)
    1194           0 :                 ttm_bo_unreserve(bo);
    1195             : 
    1196           0 :         if (unlikely(ret))
    1197           0 :                 ttm_bo_unref(&bo);
    1198             : 
    1199           0 :         return ret;
    1200           0 : }
    1201             : EXPORT_SYMBOL(ttm_bo_init);
    1202             : 
    1203           0 : size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
    1204             :                        unsigned long bo_size,
    1205             :                        unsigned struct_size)
    1206             : {
    1207           0 :         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
    1208             :         size_t size = 0;
    1209             : 
    1210           0 :         size += ttm_round_pot(struct_size);
    1211           0 :         size += PAGE_ALIGN(npages * sizeof(void *));
    1212           0 :         size += ttm_round_pot(sizeof(struct ttm_tt));
    1213           0 :         return size;
    1214             : }
    1215             : EXPORT_SYMBOL(ttm_bo_acc_size);
    1216             : 
    1217           0 : size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
    1218             :                            unsigned long bo_size,
    1219             :                            unsigned struct_size)
    1220             : {
    1221           0 :         unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
    1222             :         size_t size = 0;
    1223             : 
    1224           0 :         size += ttm_round_pot(struct_size);
    1225           0 :         size += PAGE_ALIGN(npages * sizeof(void *));
    1226           0 :         size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
    1227           0 :         size += ttm_round_pot(sizeof(struct ttm_dma_tt));
    1228           0 :         return size;
    1229             : }
    1230             : EXPORT_SYMBOL(ttm_bo_dma_acc_size);
    1231             : 
    1232           0 : int ttm_bo_create(struct ttm_bo_device *bdev,
    1233             :                         unsigned long size,
    1234             :                         enum ttm_bo_type type,
    1235             :                         struct ttm_placement *placement,
    1236             :                         uint32_t page_alignment,
    1237             :                         bool interruptible,
    1238             :                         struct uvm_object *persistent_swap_storage,
    1239             :                         struct ttm_buffer_object **p_bo)
    1240             : {
    1241             :         struct ttm_buffer_object *bo;
    1242             :         size_t acc_size;
    1243             :         int ret;
    1244             : 
    1245           0 :         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
    1246           0 :         if (unlikely(bo == NULL))
    1247           0 :                 return -ENOMEM;
    1248             : 
    1249           0 :         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
    1250           0 :         ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
    1251             :                           interruptible, persistent_swap_storage, acc_size,
    1252             :                           NULL, NULL, NULL);
    1253           0 :         if (likely(ret == 0))
    1254           0 :                 *p_bo = bo;
    1255             : 
    1256           0 :         return ret;
    1257           0 : }
    1258             : EXPORT_SYMBOL(ttm_bo_create);
    1259             : 
    1260           0 : static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
    1261             :                                         unsigned mem_type, bool allow_errors)
    1262             : {
    1263           0 :         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
    1264           0 :         struct ttm_bo_global *glob = bdev->glob;
    1265             :         int ret;
    1266             : 
    1267             :         /*
    1268             :          * Can't use standard list traversal since we're unlocking.
    1269             :          */
    1270             : 
    1271           0 :         spin_lock(&glob->lru_lock);
    1272           0 :         while (!list_empty(&man->lru)) {
    1273             :                 spin_unlock(&glob->lru_lock);
    1274           0 :                 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
    1275           0 :                 if (ret) {
    1276           0 :                         if (allow_errors) {
    1277           0 :                                 return ret;
    1278             :                         } else {
    1279           0 :                                 pr_err("Cleanup eviction failed\n");
    1280             :                         }
    1281           0 :                 }
    1282           0 :                 spin_lock(&glob->lru_lock);
    1283             :         }
    1284             :         spin_unlock(&glob->lru_lock);
    1285           0 :         return 0;
    1286           0 : }
    1287             : 
    1288           0 : int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
    1289             : {
    1290             :         struct ttm_mem_type_manager *man;
    1291             :         int ret = -EINVAL;
    1292             : 
    1293           0 :         if (mem_type >= TTM_NUM_MEM_TYPES) {
    1294           0 :                 pr_err("Illegal memory type %d\n", mem_type);
    1295           0 :                 return ret;
    1296             :         }
    1297           0 :         man = &bdev->man[mem_type];
    1298             : 
    1299           0 :         if (!man->has_type) {
    1300           0 :                 pr_err("Trying to take down uninitialized memory manager type %u\n",
    1301             :                        mem_type);
    1302           0 :                 return ret;
    1303             :         }
    1304             : 
    1305           0 :         man->use_type = false;
    1306           0 :         man->has_type = false;
    1307             : 
    1308             :         ret = 0;
    1309           0 :         if (mem_type > 0) {
    1310           0 :                 ttm_bo_force_list_clean(bdev, mem_type, false);
    1311             : 
    1312           0 :                 ret = (*man->func->takedown)(man);
    1313           0 :         }
    1314             : 
    1315           0 :         return ret;
    1316           0 : }
    1317             : EXPORT_SYMBOL(ttm_bo_clean_mm);
    1318             : 
    1319           0 : int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
    1320             : {
    1321           0 :         struct ttm_mem_type_manager *man = &bdev->man[mem_type];
    1322             : 
    1323           0 :         if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
    1324           0 :                 pr_err("Illegal memory manager memory type %u\n", mem_type);
    1325           0 :                 return -EINVAL;
    1326             :         }
    1327             : 
    1328           0 :         if (!man->has_type) {
    1329           0 :                 pr_err("Memory type %u has not been initialized\n", mem_type);
    1330           0 :                 return 0;
    1331             :         }
    1332             : 
    1333           0 :         return ttm_bo_force_list_clean(bdev, mem_type, true);
    1334           0 : }
    1335             : EXPORT_SYMBOL(ttm_bo_evict_mm);
    1336             : 
    1337           0 : int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
    1338             :                         unsigned long p_size)
    1339             : {
    1340             :         int ret = -EINVAL;
    1341             :         struct ttm_mem_type_manager *man;
    1342             : 
    1343           0 :         BUG_ON(type >= TTM_NUM_MEM_TYPES);
    1344           0 :         man = &bdev->man[type];
    1345           0 :         BUG_ON(man->has_type);
    1346           0 :         man->io_reserve_fastpath = true;
    1347           0 :         man->use_io_reserve_lru = false;
    1348           0 :         rw_init(&man->io_reserve_mutex, "ior");
    1349           0 :         INIT_LIST_HEAD(&man->io_reserve_lru);
    1350             : 
    1351           0 :         ret = bdev->driver->init_mem_type(bdev, type, man);
    1352           0 :         if (ret)
    1353           0 :                 return ret;
    1354           0 :         man->bdev = bdev;
    1355             : 
    1356             :         ret = 0;
    1357           0 :         if (type != TTM_PL_SYSTEM) {
    1358           0 :                 ret = (*man->func->init)(man, p_size);
    1359           0 :                 if (ret)
    1360           0 :                         return ret;
    1361             :         }
    1362           0 :         man->has_type = true;
    1363           0 :         man->use_type = true;
    1364           0 :         man->size = p_size;
    1365             : 
    1366           0 :         INIT_LIST_HEAD(&man->lru);
    1367             : 
    1368           0 :         return 0;
    1369           0 : }
    1370             : EXPORT_SYMBOL(ttm_bo_init_mm);
    1371             : 
    1372           0 : static void ttm_bo_global_kobj_release(struct kobject *kobj)
    1373             : {
    1374             :         struct ttm_bo_global *glob =
    1375           0 :                 container_of(kobj, struct ttm_bo_global, kobj);
    1376             : 
    1377           0 :         ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
    1378           0 :         __free_page(glob->dummy_read_page);
    1379           0 :         kfree(glob);
    1380           0 : }
    1381             : 
    1382           0 : void ttm_bo_global_release(struct drm_global_reference *ref)
    1383             : {
    1384           0 :         struct ttm_bo_global *glob = ref->object;
    1385             : 
    1386           0 :         kobject_del(&glob->kobj);
    1387           0 :         kobject_put(&glob->kobj);
    1388           0 : }
    1389             : EXPORT_SYMBOL(ttm_bo_global_release);
    1390             : 
    1391           0 : int ttm_bo_global_init(struct drm_global_reference *ref)
    1392             : {
    1393             :         struct ttm_bo_global_ref *bo_ref =
    1394           0 :                 container_of(ref, struct ttm_bo_global_ref, ref);
    1395           0 :         struct ttm_bo_global *glob = ref->object;
    1396             :         int ret;
    1397             : 
    1398           0 :         rw_init(&glob->device_list_mutex, "gdl");
    1399           0 :         mtx_init(&glob->lru_lock, IPL_NONE);
    1400           0 :         glob->mem_glob = bo_ref->mem_glob;
    1401           0 :         glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
    1402             : 
    1403           0 :         if (unlikely(glob->dummy_read_page == NULL)) {
    1404             :                 ret = -ENOMEM;
    1405           0 :                 goto out_no_drp;
    1406             :         }
    1407             : 
    1408           0 :         INIT_LIST_HEAD(&glob->swap_lru);
    1409           0 :         INIT_LIST_HEAD(&glob->device_list);
    1410             : 
    1411           0 :         ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
    1412           0 :         ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
    1413           0 :         if (unlikely(ret != 0)) {
    1414           0 :                 pr_err("Could not register buffer object swapout\n");
    1415             :                 goto out_no_shrink;
    1416             :         }
    1417             : 
    1418           0 :         atomic_set(&glob->bo_count, 0);
    1419             : 
    1420           0 :         ret = kobject_init_and_add(
    1421           0 :                 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
    1422           0 :         if (unlikely(ret != 0))
    1423           0 :                 kobject_put(&glob->kobj);
    1424           0 :         return ret;
    1425             : out_no_shrink:
    1426           0 :         __free_page(glob->dummy_read_page);
    1427             : out_no_drp:
    1428           0 :         kfree(glob);
    1429           0 :         return ret;
    1430           0 : }
    1431             : EXPORT_SYMBOL(ttm_bo_global_init);
    1432             : 
    1433             : 
    1434           0 : int ttm_bo_device_release(struct ttm_bo_device *bdev)
    1435             : {
    1436             :         int ret = 0;
    1437             :         unsigned i = TTM_NUM_MEM_TYPES;
    1438             :         struct ttm_mem_type_manager *man;
    1439           0 :         struct ttm_bo_global *glob = bdev->glob;
    1440             : 
    1441           0 :         while (i--) {
    1442           0 :                 man = &bdev->man[i];
    1443           0 :                 if (man->has_type) {
    1444           0 :                         man->use_type = false;
    1445           0 :                         if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
    1446             :                                 ret = -EBUSY;
    1447           0 :                                 pr_err("DRM memory manager type %d is not clean\n",
    1448             :                                        i);
    1449           0 :                         }
    1450           0 :                         man->has_type = false;
    1451           0 :                 }
    1452             :         }
    1453             : 
    1454           0 :         mutex_lock(&glob->device_list_mutex);
    1455           0 :         list_del(&bdev->device_list);
    1456           0 :         mutex_unlock(&glob->device_list_mutex);
    1457             : 
    1458           0 :         cancel_delayed_work_sync(&bdev->wq);
    1459             : 
    1460           0 :         while (ttm_bo_delayed_delete(bdev, true))
    1461             :                 ;
    1462             : 
    1463           0 :         spin_lock(&glob->lru_lock);
    1464           0 :         if (list_empty(&bdev->ddestroy))
    1465             :                 TTM_DEBUG("Delayed destroy list was clean\n");
    1466             : 
    1467           0 :         if (list_empty(&bdev->man[0].lru))
    1468             :                 TTM_DEBUG("Swap list was clean\n");
    1469           0 :         spin_unlock(&glob->lru_lock);
    1470             : 
    1471           0 :         drm_vma_offset_manager_destroy(&bdev->vma_manager);
    1472             : 
    1473           0 :         return ret;
    1474             : }
    1475             : EXPORT_SYMBOL(ttm_bo_device_release);
    1476             : 
    1477           0 : int ttm_bo_device_init(struct ttm_bo_device *bdev,
    1478             :                        struct ttm_bo_global *glob,
    1479             :                        struct ttm_bo_driver *driver,
    1480             :                        struct address_space *mapping,
    1481             :                        uint64_t file_page_offset,
    1482             :                        bool need_dma32)
    1483             : {
    1484             :         int ret = -EINVAL;
    1485             : 
    1486           0 :         bdev->driver = driver;
    1487             : 
    1488           0 :         memset(bdev->man, 0, sizeof(bdev->man));
    1489             : 
    1490             :         /*
    1491             :          * Initialize the system memory buffer type.
    1492             :          * Other types need to be driver / IOCTL initialized.
    1493             :          */
    1494           0 :         ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
    1495           0 :         if (unlikely(ret != 0))
    1496             :                 goto out_no_sys;
    1497             : 
    1498           0 :         drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
    1499             :                                     0x10000000);
    1500           0 :         INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
    1501           0 :         INIT_LIST_HEAD(&bdev->ddestroy);
    1502           0 :         bdev->dev_mapping = mapping;
    1503           0 :         bdev->glob = glob;
    1504           0 :         bdev->need_dma32 = need_dma32;
    1505           0 :         bdev->val_seq = 0;
    1506           0 :         mutex_lock(&glob->device_list_mutex);
    1507           0 :         list_add_tail(&bdev->device_list, &glob->device_list);
    1508           0 :         mutex_unlock(&glob->device_list_mutex);
    1509             : 
    1510           0 :         return 0;
    1511             : out_no_sys:
    1512           0 :         return ret;
    1513           0 : }
    1514             : EXPORT_SYMBOL(ttm_bo_device_init);
    1515             : 
    1516             : /*
    1517             :  * buffer object vm functions.
    1518             :  */
    1519             : 
    1520           0 : bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
    1521             : {
    1522           0 :         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
    1523             : 
    1524           0 :         if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
    1525           0 :                 if (mem->mem_type == TTM_PL_SYSTEM)
    1526           0 :                         return false;
    1527             : 
    1528           0 :                 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
    1529           0 :                         return false;
    1530             : 
    1531           0 :                 if (mem->placement & TTM_PL_FLAG_CACHED)
    1532           0 :                         return false;
    1533             :         }
    1534           0 :         return true;
    1535           0 : }
    1536             : 
    1537             : #ifdef __linux__
    1538             : void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
    1539             : {
    1540             :         struct ttm_bo_device *bdev = bo->bdev;
    1541             : 
    1542             :         drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
    1543             :         ttm_mem_io_free_vm(bo);
    1544             : }
    1545             : #else
    1546           0 : void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
    1547             : {
    1548           0 :         struct ttm_tt *ttm = bo->ttm;
    1549             :         struct vm_page *page;
    1550             :         bus_addr_t addr;
    1551             :         paddr_t paddr;
    1552             :         int i;
    1553             : 
    1554           0 :         if (drm_vma_node_has_offset(&bo->vma_node)) {
    1555           0 :                 if (bo->mem.bus.is_iomem) {
    1556           0 :                         for (i = 0; i < bo->mem.num_pages; ++i) {
    1557           0 :                                 addr = bo->mem.bus.base + bo->mem.bus.offset;
    1558           0 :                                 paddr = bus_space_mmap(bo->bdev->memt, addr,
    1559             :                                                        i << PAGE_SHIFT, 0, 0);
    1560           0 :                                 page = PHYS_TO_VM_PAGE(paddr);
    1561           0 :                                 if (unlikely(page == NULL))
    1562             :                                         continue;
    1563           0 :                                 pmap_page_protect(page, PROT_NONE);
    1564           0 :                         }
    1565           0 :                 } else if (ttm) {
    1566           0 :                         for (i = 0; i < ttm->num_pages; ++i) {
    1567           0 :                                 page = ttm->pages[i];
    1568           0 :                                 if (unlikely(page == NULL))
    1569             :                                         continue;
    1570           0 :                                 pmap_page_protect(page, PROT_NONE);
    1571           0 :                         }
    1572             :                 }
    1573             :         }
    1574           0 :         ttm_mem_io_free_vm(bo);
    1575           0 : }
    1576             : #endif
    1577             : 
    1578           0 : void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
    1579             : {
    1580           0 :         struct ttm_bo_device *bdev = bo->bdev;
    1581           0 :         struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
    1582             : 
    1583           0 :         ttm_mem_io_lock(man, false);
    1584           0 :         ttm_bo_unmap_virtual_locked(bo);
    1585           0 :         ttm_mem_io_unlock(man);
    1586           0 : }
    1587             : 
    1588             : 
    1589             : EXPORT_SYMBOL(ttm_bo_unmap_virtual);
    1590             : 
    1591           0 : int ttm_bo_wait(struct ttm_buffer_object *bo,
    1592             :                 bool lazy, bool interruptible, bool no_wait)
    1593             : {
    1594             :         struct reservation_object_list *fobj;
    1595             :         struct reservation_object *resv;
    1596             :         struct fence *excl;
    1597           0 :         long timeout = 15 * HZ;
    1598             :         int i;
    1599             : 
    1600           0 :         resv = bo->resv;
    1601           0 :         fobj = reservation_object_get_list(resv);
    1602           0 :         excl = reservation_object_get_excl(resv);
    1603           0 :         if (excl) {
    1604           0 :                 if (!fence_is_signaled(excl)) {
    1605           0 :                         if (no_wait)
    1606           0 :                                 return -EBUSY;
    1607             : 
    1608           0 :                         timeout = fence_wait_timeout(excl,
    1609             :                                                      interruptible, timeout);
    1610           0 :                 }
    1611             :         }
    1612             : 
    1613           0 :         for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
    1614             :                 struct fence *fence;
    1615           0 :                 fence = rcu_dereference_protected(fobj->shared[i],
    1616             :                                                 reservation_object_held(resv));
    1617             : 
    1618           0 :                 if (!fence_is_signaled(fence)) {
    1619           0 :                         if (no_wait)
    1620           0 :                                 return -EBUSY;
    1621             : 
    1622           0 :                         timeout = fence_wait_timeout(fence,
    1623             :                                                      interruptible, timeout);
    1624           0 :                 }
    1625           0 :         }
    1626             : 
    1627           0 :         if (timeout < 0)
    1628           0 :                 return timeout;
    1629             : 
    1630           0 :         if (timeout == 0)
    1631           0 :                 return -EBUSY;
    1632             : 
    1633           0 :         reservation_object_add_excl_fence(resv, NULL);
    1634           0 :         clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
    1635           0 :         return 0;
    1636           0 : }
    1637             : EXPORT_SYMBOL(ttm_bo_wait);
    1638             : 
    1639           0 : int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
    1640             : {
    1641             :         int ret = 0;
    1642             : 
    1643             :         /*
    1644             :          * Using ttm_bo_reserve makes sure the lru lists are updated.
    1645             :          */
    1646             : 
    1647           0 :         ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
    1648           0 :         if (unlikely(ret != 0))
    1649           0 :                 return ret;
    1650           0 :         ret = ttm_bo_wait(bo, false, true, no_wait);
    1651           0 :         if (likely(ret == 0))
    1652           0 :                 atomic_inc(&bo->cpu_writers);
    1653           0 :         ttm_bo_unreserve(bo);
    1654           0 :         return ret;
    1655           0 : }
    1656             : EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
    1657             : 
    1658           0 : void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
    1659             : {
    1660           0 :         atomic_dec(&bo->cpu_writers);
    1661           0 : }
    1662             : EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
    1663             : 
    1664             : /**
    1665             :  * A buffer object shrink method that tries to swap out the first
    1666             :  * buffer object on the bo_global::swap_lru list.
    1667             :  */
    1668             : 
    1669           0 : static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
    1670             : {
    1671             :         struct ttm_bo_global *glob =
    1672           0 :             container_of(shrink, struct ttm_bo_global, shrink);
    1673             :         struct ttm_buffer_object *bo;
    1674             :         int ret = -EBUSY;
    1675             :         int put_count;
    1676             : 
    1677           0 :         spin_lock(&glob->lru_lock);
    1678           0 :         list_for_each_entry(bo, &glob->swap_lru, swap) {
    1679           0 :                 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
    1680           0 :                 if (!ret)
    1681             :                         break;
    1682             :         }
    1683             : 
    1684           0 :         if (ret) {
    1685           0 :                 spin_unlock(&glob->lru_lock);
    1686           0 :                 return ret;
    1687             :         }
    1688             : 
    1689           0 :         kref_get(&bo->list_kref);
    1690             : 
    1691           0 :         if (!list_empty(&bo->ddestroy)) {
    1692           0 :                 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
    1693           0 :                 kref_put(&bo->list_kref, ttm_bo_release_list);
    1694           0 :                 return ret;
    1695             :         }
    1696             : 
    1697           0 :         put_count = ttm_bo_del_from_lru(bo);
    1698           0 :         spin_unlock(&glob->lru_lock);
    1699             : 
    1700           0 :         ttm_bo_list_ref_sub(bo, put_count, true);
    1701             : 
    1702             :         /**
    1703             :          * Wait for GPU, then move to system cached.
    1704             :          */
    1705             : 
    1706           0 :         ret = ttm_bo_wait(bo, false, false, false);
    1707             : 
    1708           0 :         if (unlikely(ret != 0))
    1709             :                 goto out;
    1710             : 
    1711           0 :         if (bo->mem.mem_type != TTM_PL_SYSTEM ||
    1712           0 :             bo->ttm->caching_state != tt_cached) {
    1713           0 :                 struct ttm_mem_reg evict_mem;
    1714             : 
    1715           0 :                 evict_mem = bo->mem;
    1716           0 :                 evict_mem.mm_node = NULL;
    1717           0 :                 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
    1718           0 :                 evict_mem.mem_type = TTM_PL_SYSTEM;
    1719             : 
    1720           0 :                 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
    1721             :                                              false, false);
    1722           0 :                 if (unlikely(ret != 0))
    1723           0 :                         goto out;
    1724           0 :         }
    1725             : 
    1726           0 :         ttm_bo_unmap_virtual(bo);
    1727             : 
    1728             :         /**
    1729             :          * Swap out. Buffer will be swapped in again as soon as
    1730             :          * anyone tries to access a ttm page.
    1731             :          */
    1732             : 
    1733           0 :         if (bo->bdev->driver->swap_notify)
    1734           0 :                 bo->bdev->driver->swap_notify(bo);
    1735             : 
    1736           0 :         ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
    1737             : out:
    1738             : 
    1739             :         /**
    1740             :          *
    1741             :          * Unreserve without putting on LRU to avoid swapping out an
    1742             :          * already swapped buffer.
    1743             :          */
    1744             : 
    1745           0 :         __ttm_bo_unreserve(bo);
    1746           0 :         kref_put(&bo->list_kref, ttm_bo_release_list);
    1747           0 :         return ret;
    1748           0 : }
    1749             : 
    1750           0 : void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
    1751             : {
    1752           0 :         while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
    1753             :                 ;
    1754           0 : }
    1755             : EXPORT_SYMBOL(ttm_bo_swapout_all);
    1756             : 
    1757             : /**
    1758             :  * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
    1759             :  * unreserved
    1760             :  *
    1761             :  * @bo: Pointer to buffer
    1762             :  */
    1763           0 : int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
    1764             : {
    1765             :         int ret;
    1766             : 
    1767             :         /*
    1768             :          * In the absense of a wait_unlocked API,
    1769             :          * Use the bo::wu_mutex to avoid triggering livelocks due to
    1770             :          * concurrent use of this function. Note that this use of
    1771             :          * bo::wu_mutex can go away if we change locking order to
    1772             :          * mmap_sem -> bo::reserve.
    1773             :          */
    1774           0 :         ret = mutex_lock_interruptible(&bo->wu_mutex);
    1775           0 :         if (unlikely(ret != 0))
    1776           0 :                 return -ERESTARTSYS;
    1777           0 :         if (!ww_mutex_is_locked(&bo->resv->lock))
    1778             :                 goto out_unlock;
    1779           0 :         ret = __ttm_bo_reserve(bo, true, false, false, NULL);
    1780           0 :         if (unlikely(ret != 0))
    1781             :                 goto out_unlock;
    1782           0 :         __ttm_bo_unreserve(bo);
    1783             : 
    1784             : out_unlock:
    1785           0 :         mutex_unlock(&bo->wu_mutex);
    1786           0 :         return ret;
    1787           0 : }

Generated by: LCOV version 1.13