LCOV - code coverage report
Current view: top level - dev/pci/drm/ttm - ttm_tt.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 180 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 16 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /**************************************************************************
       2             :  *
       3             :  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
       4             :  * All Rights Reserved.
       5             :  *
       6             :  * Permission is hereby granted, free of charge, to any person obtaining a
       7             :  * copy of this software and associated documentation files (the
       8             :  * "Software"), to deal in the Software without restriction, including
       9             :  * without limitation the rights to use, copy, modify, merge, publish,
      10             :  * distribute, sub license, and/or sell copies of the Software, and to
      11             :  * permit persons to whom the Software is furnished to do so, subject to
      12             :  * the following conditions:
      13             :  *
      14             :  * The above copyright notice and this permission notice (including the
      15             :  * next paragraph) shall be included in all copies or substantial portions
      16             :  * of the Software.
      17             :  *
      18             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      19             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      20             :  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
      21             :  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
      22             :  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
      23             :  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
      24             :  * USE OR OTHER DEALINGS IN THE SOFTWARE.
      25             :  *
      26             :  **************************************************************************/
      27             : /*
      28             :  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
      29             :  */
      30             : 
      31             : #define pr_fmt(fmt) "[TTM] " fmt
      32             : 
      33             : #include <dev/pci/drm/drmP.h>
      34             : #include <dev/pci/drm/drm_cache.h>
      35             : #include <dev/pci/drm/drm_mem_util.h>
      36             : #include <dev/pci/drm/ttm/ttm_module.h>
      37             : #include <dev/pci/drm/ttm/ttm_bo_driver.h>
      38             : #include <dev/pci/drm/ttm/ttm_placement.h>
      39             : #include <dev/pci/drm/ttm/ttm_page_alloc.h>
      40             : 
      41             : /**
      42             :  * Allocates storage for pointers to the pages that back the ttm.
      43             :  */
      44           0 : static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
      45             : {
      46           0 :         ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
      47           0 : }
      48             : 
      49           0 : static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
      50             : {
      51           0 :         ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
      52             :                                           sizeof(*ttm->ttm.pages) +
      53             :                                           sizeof(*ttm->dma_address) +
      54             :                                           sizeof(*ttm->cpu_address));
      55           0 :         ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
      56           0 :         ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
      57           0 : }
      58             : 
      59             : #ifdef CONFIG_X86
      60             : static inline int ttm_tt_set_page_caching(struct vm_page *p,
      61             :                                           enum ttm_caching_state c_old,
      62             :                                           enum ttm_caching_state c_new)
      63             : {
      64             :         int ret = 0;
      65             : 
      66             :         if (PageHighMem(p))
      67             :                 return 0;
      68             : 
      69             :         if (c_old != tt_cached) {
      70             :                 /* p isn't in the default caching state, set it to
      71             :                  * writeback first to free its current memtype. */
      72             : 
      73             :                 ret = set_pages_wb(p, 1);
      74             :                 if (ret)
      75             :                         return ret;
      76             :         }
      77             : 
      78             :         if (c_new == tt_wc)
      79             :                 ret = set_memory_wc((unsigned long) page_address(p), 1);
      80             :         else if (c_new == tt_uncached)
      81             :                 ret = set_pages_uc(p, 1);
      82             : 
      83             :         return ret;
      84             : }
      85             : #else /* CONFIG_X86 */
      86           0 : static inline int ttm_tt_set_page_caching(struct vm_page *p,
      87             :                                           enum ttm_caching_state c_old,
      88             :                                           enum ttm_caching_state c_new)
      89             : {
      90           0 :         return 0;
      91             : }
      92             : #endif /* CONFIG_X86 */
      93             : 
      94             : /*
      95             :  * Change caching policy for the linear kernel map
      96             :  * for range of pages in a ttm.
      97             :  */
      98             : 
      99           0 : static int ttm_tt_set_caching(struct ttm_tt *ttm,
     100             :                               enum ttm_caching_state c_state)
     101             : {
     102             :         int i, j;
     103             :         struct vm_page *cur_page;
     104             :         int ret;
     105             : 
     106           0 :         if (ttm->caching_state == c_state)
     107           0 :                 return 0;
     108             : 
     109           0 :         if (ttm->state == tt_unpopulated) {
     110             :                 /* Change caching but don't populate */
     111           0 :                 ttm->caching_state = c_state;
     112           0 :                 return 0;
     113             :         }
     114             : 
     115           0 :         if (ttm->caching_state == tt_cached)
     116           0 :                 drm_clflush_pages(ttm->pages, ttm->num_pages);
     117             : 
     118           0 :         for (i = 0; i < ttm->num_pages; ++i) {
     119           0 :                 cur_page = ttm->pages[i];
     120           0 :                 if (likely(cur_page != NULL)) {
     121           0 :                         ret = ttm_tt_set_page_caching(cur_page,
     122           0 :                                                       ttm->caching_state,
     123             :                                                       c_state);
     124           0 :                         if (unlikely(ret != 0))
     125             :                                 goto out_err;
     126             :                 }
     127             :         }
     128             : 
     129           0 :         ttm->caching_state = c_state;
     130             : 
     131           0 :         return 0;
     132             : 
     133             : out_err:
     134           0 :         for (j = 0; j < i; ++j) {
     135           0 :                 cur_page = ttm->pages[j];
     136           0 :                 if (likely(cur_page != NULL)) {
     137           0 :                         (void)ttm_tt_set_page_caching(cur_page, c_state,
     138           0 :                                                       ttm->caching_state);
     139           0 :                 }
     140             :         }
     141             : 
     142           0 :         return ret;
     143           0 : }
     144             : 
     145           0 : int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
     146             : {
     147             :         enum ttm_caching_state state;
     148             : 
     149           0 :         if (placement & TTM_PL_FLAG_WC)
     150           0 :                 state = tt_wc;
     151           0 :         else if (placement & TTM_PL_FLAG_UNCACHED)
     152           0 :                 state = tt_uncached;
     153             :         else
     154             :                 state = tt_cached;
     155             : 
     156           0 :         return ttm_tt_set_caching(ttm, state);
     157             : }
     158             : EXPORT_SYMBOL(ttm_tt_set_placement_caching);
     159             : 
     160           0 : void ttm_tt_destroy(struct ttm_tt *ttm)
     161             : {
     162           0 :         if (unlikely(ttm == NULL))
     163             :                 return;
     164             : 
     165           0 :         if (ttm->state == tt_bound) {
     166           0 :                 ttm_tt_unbind(ttm);
     167           0 :         }
     168             : 
     169           0 :         if (ttm->state == tt_unbound)
     170           0 :                 ttm_tt_unpopulate(ttm);
     171             : 
     172           0 :         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
     173           0 :             ttm->swap_storage)
     174           0 :                 uao_detach(ttm->swap_storage);
     175             : 
     176           0 :         ttm->swap_storage = NULL;
     177           0 :         ttm->func->destroy(ttm);
     178           0 : }
     179             : 
     180           0 : int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
     181             :                 unsigned long size, uint32_t page_flags,
     182             :                 struct vm_page *dummy_read_page)
     183             : {
     184           0 :         ttm->bdev = bdev;
     185           0 :         ttm->glob = bdev->glob;
     186           0 :         ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
     187           0 :         ttm->caching_state = tt_cached;
     188           0 :         ttm->page_flags = page_flags;
     189           0 :         ttm->dummy_read_page = dummy_read_page;
     190           0 :         ttm->state = tt_unpopulated;
     191           0 :         ttm->swap_storage = NULL;
     192             : 
     193           0 :         ttm_tt_alloc_page_directory(ttm);
     194           0 :         if (!ttm->pages) {
     195           0 :                 ttm_tt_destroy(ttm);
     196           0 :                 pr_err("Failed allocating page table\n");
     197           0 :                 return -ENOMEM;
     198             :         }
     199           0 :         return 0;
     200           0 : }
     201             : EXPORT_SYMBOL(ttm_tt_init);
     202             : 
     203           0 : void ttm_tt_fini(struct ttm_tt *ttm)
     204             : {
     205           0 :         drm_free_large(ttm->pages);
     206           0 :         ttm->pages = NULL;
     207           0 : }
     208             : EXPORT_SYMBOL(ttm_tt_fini);
     209             : 
     210           0 : int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
     211             :                 unsigned long size, uint32_t page_flags,
     212             :                 struct vm_page *dummy_read_page)
     213             : {
     214           0 :         struct ttm_tt *ttm = &ttm_dma->ttm;
     215             : 
     216           0 :         ttm->bdev = bdev;
     217           0 :         ttm->glob = bdev->glob;
     218           0 :         ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
     219           0 :         ttm->caching_state = tt_cached;
     220           0 :         ttm->page_flags = page_flags;
     221           0 :         ttm->dummy_read_page = dummy_read_page;
     222           0 :         ttm->state = tt_unpopulated;
     223           0 :         ttm->swap_storage = NULL;
     224             : 
     225           0 :         INIT_LIST_HEAD(&ttm_dma->pages_list);
     226           0 :         ttm_dma_tt_alloc_page_directory(ttm_dma);
     227           0 :         if (!ttm->pages) {
     228           0 :                 ttm_tt_destroy(ttm);
     229           0 :                 pr_err("Failed allocating page table\n");
     230           0 :                 return -ENOMEM;
     231             :         }
     232           0 :         return 0;
     233           0 : }
     234             : EXPORT_SYMBOL(ttm_dma_tt_init);
     235             : 
     236           0 : void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
     237             : {
     238           0 :         struct ttm_tt *ttm = &ttm_dma->ttm;
     239             : 
     240           0 :         drm_free_large(ttm->pages);
     241           0 :         ttm->pages = NULL;
     242           0 :         ttm_dma->cpu_address = NULL;
     243           0 :         ttm_dma->dma_address = NULL;
     244           0 : }
     245             : EXPORT_SYMBOL(ttm_dma_tt_fini);
     246             : 
     247           0 : void ttm_tt_unbind(struct ttm_tt *ttm)
     248             : {
     249             :         int ret;
     250             : 
     251           0 :         if (ttm->state == tt_bound) {
     252           0 :                 ret = ttm->func->unbind(ttm);
     253           0 :                 BUG_ON(ret);
     254           0 :                 ttm->state = tt_unbound;
     255           0 :         }
     256           0 : }
     257             : 
     258           0 : int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
     259             : {
     260             :         int ret = 0;
     261             : 
     262           0 :         if (!ttm)
     263           0 :                 return -EINVAL;
     264             : 
     265           0 :         if (ttm->state == tt_bound)
     266           0 :                 return 0;
     267             : 
     268           0 :         ret = ttm->bdev->driver->ttm_tt_populate(ttm);
     269           0 :         if (ret)
     270           0 :                 return ret;
     271             : 
     272           0 :         ret = ttm->func->bind(ttm, bo_mem);
     273           0 :         if (unlikely(ret != 0))
     274           0 :                 return ret;
     275             : 
     276           0 :         ttm->state = tt_bound;
     277             : 
     278           0 :         return 0;
     279           0 : }
     280             : EXPORT_SYMBOL(ttm_tt_bind);
     281             : 
     282           0 : int ttm_tt_swapin(struct ttm_tt *ttm)
     283             : {
     284             :         struct uvm_object *swap_storage;
     285             :         struct vm_page *from_page;
     286             :         struct vm_page *to_page;
     287           0 :         struct pglist plist;
     288             :         int i;
     289             :         int ret = -ENOMEM;
     290             : 
     291           0 :         swap_storage = ttm->swap_storage;
     292           0 :         BUG_ON(swap_storage == NULL);
     293             : 
     294           0 :         TAILQ_INIT(&plist);
     295           0 :         if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist))
     296             :                 goto out_err;
     297             : 
     298           0 :         from_page = TAILQ_FIRST(&plist);
     299           0 :         for (i = 0; i < ttm->num_pages; ++i) {
     300           0 :                 to_page = ttm->pages[i];
     301           0 :                 if (unlikely(to_page == NULL))
     302             :                         goto out_err;
     303             : 
     304           0 :                 uvm_pagecopy(from_page, to_page);
     305           0 :                 from_page = TAILQ_NEXT(from_page, pageq);
     306             :         }
     307             : 
     308           0 :         uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT);
     309             : 
     310           0 :         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
     311           0 :                 uao_detach(swap_storage);
     312           0 :         ttm->swap_storage = NULL;
     313           0 :         ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
     314             : 
     315             : 
     316           0 :         return 0;
     317             : out_err:
     318           0 :         return ret;
     319           0 : }
     320             : 
     321           0 : int ttm_tt_swapout(struct ttm_tt *ttm, struct uvm_object *persistent_swap_storage)
     322             : {
     323             :         struct uvm_object *swap_storage;
     324             :         struct vm_page *from_page;
     325             :         struct vm_page *to_page;
     326           0 :         struct pglist plist;
     327             :         int i;
     328             :         int ret = -ENOMEM;
     329             : 
     330           0 :         BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
     331           0 :         BUG_ON(ttm->caching_state != tt_cached);
     332             : 
     333           0 :         if (!persistent_swap_storage) {
     334           0 :                 swap_storage = uao_create(ttm->num_pages << PAGE_SHIFT, 0);
     335             : #ifdef notyet
     336             :                 if (unlikely(IS_ERR(swap_storage))) {
     337             :                         pr_err("Failed allocating swap storage\n");
     338             :                         return PTR_ERR(swap_storage);
     339             :                 }
     340             : #endif
     341           0 :         } else
     342             :                 swap_storage = persistent_swap_storage;
     343             : 
     344           0 :         TAILQ_INIT(&plist);
     345           0 :         if (uvm_objwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT, &plist))
     346             :                 goto out_err;
     347             : 
     348           0 :         to_page = TAILQ_FIRST(&plist);
     349           0 :         for (i = 0; i < ttm->num_pages; ++i) {
     350           0 :                 from_page = ttm->pages[i];
     351           0 :                 if (unlikely(from_page == NULL))
     352             :                         continue;
     353           0 :                 uvm_pagecopy(from_page, to_page);
     354             : #ifdef notyet
     355             :                 set_page_dirty(to_page);
     356             :                 mark_page_accessed(to_page);
     357             : #endif
     358           0 :                 to_page = TAILQ_NEXT(to_page, pageq);
     359           0 :         }
     360             : 
     361           0 :         uvm_objunwire(swap_storage, 0, ttm->num_pages << PAGE_SHIFT);
     362             : 
     363           0 :         ttm->bdev->driver->ttm_tt_unpopulate(ttm);
     364           0 :         ttm->swap_storage = swap_storage;
     365           0 :         ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
     366           0 :         if (persistent_swap_storage)
     367           0 :                 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
     368             : 
     369           0 :         return 0;
     370             : out_err:
     371           0 :         if (!persistent_swap_storage)
     372           0 :                 uao_detach(swap_storage);
     373             : 
     374           0 :         return ret;
     375           0 : }
     376             : 
     377           0 : static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
     378             : {
     379             :         int i;
     380             :         struct vm_page *page;
     381             : 
     382           0 :         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
     383           0 :                 return;
     384             : 
     385           0 :         for (i = 0; i < ttm->num_pages; ++i) {
     386           0 :                 page = ttm->pages[i];
     387           0 :                 if (unlikely(page == NULL))
     388             :                         continue;
     389           0 :                 pmap_page_protect(page, PROT_NONE);
     390           0 :         }
     391           0 : }
     392             : 
     393           0 : void ttm_tt_unpopulate(struct ttm_tt *ttm)
     394             : {
     395           0 :         if (ttm->state == tt_unpopulated)
     396             :                 return;
     397             : 
     398           0 :         ttm_tt_clear_mapping(ttm);
     399           0 :         ttm->bdev->driver->ttm_tt_unpopulate(ttm);
     400           0 : }

Generated by: LCOV version 1.13