LCOV - code coverage report
Current view: top level - dev/pci/drm/radeon - radeon_test.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 321 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 7 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  * Copyright 2009 VMware, Inc.
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice shall be included in
      12             :  * all copies or substantial portions of the Software.
      13             :  *
      14             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      15             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      16             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      17             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      18             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      19             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      20             :  * OTHER DEALINGS IN THE SOFTWARE.
      21             :  *
      22             :  * Authors: Michel Dänzer
      23             :  */
      24             : #include <dev/pci/drm/drmP.h>
      25             : #include <dev/pci/drm/radeon_drm.h>
      26             : #include "radeon_reg.h"
      27             : #include "radeon.h"
      28             : 
      29             : #define RADEON_TEST_COPY_BLIT 1
      30             : #define RADEON_TEST_COPY_DMA  0
      31             : 
      32             : 
      33             : /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
      34           0 : static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
      35             : {
      36           0 :         struct radeon_bo *vram_obj = NULL;
      37             :         struct radeon_bo **gtt_obj = NULL;
      38           0 :         uint64_t gtt_addr, vram_addr;
      39             :         unsigned n, size;
      40             :         int i, r, ring;
      41             : 
      42           0 :         switch (flag) {
      43             :         case RADEON_TEST_COPY_DMA:
      44           0 :                 ring = radeon_copy_dma_ring_index(rdev);
      45           0 :                 break;
      46             :         case RADEON_TEST_COPY_BLIT:
      47           0 :                 ring = radeon_copy_blit_ring_index(rdev);
      48           0 :                 break;
      49             :         default:
      50           0 :                 DRM_ERROR("Unknown copy method\n");
      51           0 :                 return;
      52             :         }
      53             : 
      54             :         size = 1024 * 1024;
      55             : 
      56             :         /* Number of tests =
      57             :          * (Total GTT - IB pool - writeback page - ring buffers) / test size
      58             :          */
      59           0 :         n = rdev->mc.gtt_size - rdev->gart_pin_size;
      60           0 :         n /= size;
      61             : 
      62           0 :         gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
      63           0 :         if (!gtt_obj) {
      64           0 :                 DRM_ERROR("Failed to allocate %d pointers\n", n);
      65             :                 r = 1;
      66           0 :                 goto out_cleanup;
      67             :         }
      68             : 
      69           0 :         r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
      70             :                              0, NULL, NULL, &vram_obj);
      71           0 :         if (r) {
      72           0 :                 DRM_ERROR("Failed to create VRAM object\n");
      73           0 :                 goto out_cleanup;
      74             :         }
      75           0 :         r = radeon_bo_reserve(vram_obj, false);
      76           0 :         if (unlikely(r != 0))
      77             :                 goto out_unref;
      78           0 :         r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
      79           0 :         if (r) {
      80           0 :                 DRM_ERROR("Failed to pin VRAM object\n");
      81           0 :                 goto out_unres;
      82             :         }
      83           0 :         for (i = 0; i < n; i++) {
      84           0 :                 void *gtt_map, *vram_map;
      85             :                 void **gtt_start, **gtt_end;
      86             :                 void **vram_start, **vram_end;
      87           0 :                 struct radeon_fence *fence = NULL;
      88             : 
      89           0 :                 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
      90             :                                      RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
      91           0 :                                      gtt_obj + i);
      92           0 :                 if (r) {
      93           0 :                         DRM_ERROR("Failed to create GTT object %d\n", i);
      94           0 :                         goto out_lclean;
      95             :                 }
      96             : 
      97           0 :                 r = radeon_bo_reserve(gtt_obj[i], false);
      98           0 :                 if (unlikely(r != 0))
      99             :                         goto out_lclean_unref;
     100           0 :                 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
     101           0 :                 if (r) {
     102           0 :                         DRM_ERROR("Failed to pin GTT object %d\n", i);
     103           0 :                         goto out_lclean_unres;
     104             :                 }
     105             : 
     106           0 :                 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
     107           0 :                 if (r) {
     108           0 :                         DRM_ERROR("Failed to map GTT object %d\n", i);
     109           0 :                         goto out_lclean_unpin;
     110             :                 }
     111             : 
     112           0 :                 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
     113           0 :                      gtt_start < gtt_end;
     114           0 :                      gtt_start++)
     115           0 :                         *gtt_start = gtt_start;
     116             : 
     117           0 :                 radeon_bo_kunmap(gtt_obj[i]);
     118             : 
     119           0 :                 if (ring == R600_RING_TYPE_DMA_INDEX)
     120           0 :                         fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
     121             :                                                 size / RADEON_GPU_PAGE_SIZE,
     122             :                                                 vram_obj->tbo.resv);
     123             :                 else
     124           0 :                         fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
     125             :                                                  size / RADEON_GPU_PAGE_SIZE,
     126             :                                                  vram_obj->tbo.resv);
     127           0 :                 if (IS_ERR(fence)) {
     128           0 :                         DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
     129           0 :                         r = PTR_ERR(fence);
     130           0 :                         goto out_lclean_unpin;
     131             :                 }
     132             : 
     133           0 :                 r = radeon_fence_wait(fence, false);
     134           0 :                 if (r) {
     135           0 :                         DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
     136           0 :                         goto out_lclean_unpin;
     137             :                 }
     138             : 
     139           0 :                 radeon_fence_unref(&fence);
     140             : 
     141           0 :                 r = radeon_bo_kmap(vram_obj, &vram_map);
     142           0 :                 if (r) {
     143           0 :                         DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
     144           0 :                         goto out_lclean_unpin;
     145             :                 }
     146             : 
     147           0 :                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
     148           0 :                      vram_start = vram_map, vram_end = vram_map + size;
     149           0 :                      vram_start < vram_end;
     150           0 :                      gtt_start++, vram_start++) {
     151           0 :                         if (*vram_start != gtt_start) {
     152           0 :                                 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
     153             :                                           "expected 0x%p (GTT/VRAM offset "
     154             :                                           "0x%16llx/0x%16llx)\n",
     155             :                                           i, *vram_start, gtt_start,
     156             :                                           (unsigned long long)
     157             :                                           (gtt_addr - rdev->mc.gtt_start +
     158             :                                            (void*)gtt_start - gtt_map),
     159             :                                           (unsigned long long)
     160             :                                           (vram_addr - rdev->mc.vram_start +
     161             :                                            (void*)gtt_start - gtt_map));
     162           0 :                                 radeon_bo_kunmap(vram_obj);
     163           0 :                                 goto out_lclean_unpin;
     164             :                         }
     165           0 :                         *vram_start = vram_start;
     166             :                 }
     167             : 
     168           0 :                 radeon_bo_kunmap(vram_obj);
     169             : 
     170           0 :                 if (ring == R600_RING_TYPE_DMA_INDEX)
     171           0 :                         fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
     172             :                                                 size / RADEON_GPU_PAGE_SIZE,
     173             :                                                 vram_obj->tbo.resv);
     174             :                 else
     175           0 :                         fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
     176             :                                                  size / RADEON_GPU_PAGE_SIZE,
     177             :                                                  vram_obj->tbo.resv);
     178           0 :                 if (IS_ERR(fence)) {
     179           0 :                         DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
     180           0 :                         r = PTR_ERR(fence);
     181           0 :                         goto out_lclean_unpin;
     182             :                 }
     183             : 
     184           0 :                 r = radeon_fence_wait(fence, false);
     185           0 :                 if (r) {
     186           0 :                         DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
     187           0 :                         goto out_lclean_unpin;
     188             :                 }
     189             : 
     190           0 :                 radeon_fence_unref(&fence);
     191             : 
     192           0 :                 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
     193           0 :                 if (r) {
     194           0 :                         DRM_ERROR("Failed to map GTT object after copy %d\n", i);
     195           0 :                         goto out_lclean_unpin;
     196             :                 }
     197             : 
     198           0 :                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
     199           0 :                      vram_start = vram_map, vram_end = vram_map + size;
     200           0 :                      gtt_start < gtt_end;
     201           0 :                      gtt_start++, vram_start++) {
     202           0 :                         if (*gtt_start != vram_start) {
     203           0 :                                 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
     204             :                                           "expected 0x%p (VRAM/GTT offset "
     205             :                                           "0x%16llx/0x%16llx)\n",
     206             :                                           i, *gtt_start, vram_start,
     207             :                                           (unsigned long long)
     208             :                                           (vram_addr - rdev->mc.vram_start +
     209             :                                            (void*)vram_start - vram_map),
     210             :                                           (unsigned long long)
     211             :                                           (gtt_addr - rdev->mc.gtt_start +
     212             :                                            (void*)vram_start - vram_map));
     213           0 :                                 radeon_bo_kunmap(gtt_obj[i]);
     214           0 :                                 goto out_lclean_unpin;
     215             :                         }
     216             :                 }
     217             : 
     218           0 :                 radeon_bo_kunmap(gtt_obj[i]);
     219             : 
     220             :                 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
     221             :                          gtt_addr - rdev->mc.gtt_start);
     222           0 :                 continue;
     223             : 
     224             : out_lclean_unpin:
     225           0 :                 radeon_bo_unpin(gtt_obj[i]);
     226             : out_lclean_unres:
     227           0 :                 radeon_bo_unreserve(gtt_obj[i]);
     228             : out_lclean_unref:
     229           0 :                 radeon_bo_unref(&gtt_obj[i]);
     230             : out_lclean:
     231           0 :                 for (--i; i >= 0; --i) {
     232           0 :                         radeon_bo_unpin(gtt_obj[i]);
     233           0 :                         radeon_bo_unreserve(gtt_obj[i]);
     234           0 :                         radeon_bo_unref(&gtt_obj[i]);
     235             :                 }
     236           0 :                 if (fence && !IS_ERR(fence))
     237           0 :                         radeon_fence_unref(&fence);
     238           0 :                 break;
     239           0 :         }
     240             : 
     241           0 :         radeon_bo_unpin(vram_obj);
     242             : out_unres:
     243           0 :         radeon_bo_unreserve(vram_obj);
     244             : out_unref:
     245           0 :         radeon_bo_unref(&vram_obj);
     246             : out_cleanup:
     247           0 :         kfree(gtt_obj);
     248           0 :         if (r) {
     249           0 :                 printk(KERN_WARNING "Error while testing BO move.\n");
     250           0 :         }
     251           0 : }
     252             : 
     253           0 : void radeon_test_moves(struct radeon_device *rdev)
     254             : {
     255           0 :         if (rdev->asic->copy.dma)
     256           0 :                 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
     257           0 :         if (rdev->asic->copy.blit)
     258           0 :                 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
     259           0 : }
     260             : 
     261           0 : static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
     262             :                                              struct radeon_ring *ring,
     263             :                                              struct radeon_fence **fence)
     264             : {
     265           0 :         uint32_t handle = ring->idx ^ 0xdeafbeef;
     266             :         int r;
     267             : 
     268           0 :         if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
     269           0 :                 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
     270           0 :                 if (r) {
     271           0 :                         DRM_ERROR("Failed to get dummy create msg\n");
     272           0 :                         return r;
     273             :                 }
     274             : 
     275           0 :                 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
     276           0 :                 if (r) {
     277           0 :                         DRM_ERROR("Failed to get dummy destroy msg\n");
     278           0 :                         return r;
     279             :                 }
     280             : 
     281           0 :         } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
     282           0 :                    ring->idx == TN_RING_TYPE_VCE2_INDEX) {
     283           0 :                 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
     284           0 :                 if (r) {
     285           0 :                         DRM_ERROR("Failed to get dummy create msg\n");
     286           0 :                         return r;
     287             :                 }
     288             : 
     289           0 :                 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
     290           0 :                 if (r) {
     291           0 :                         DRM_ERROR("Failed to get dummy destroy msg\n");
     292           0 :                         return r;
     293             :                 }
     294             : 
     295             :         } else {
     296           0 :                 r = radeon_ring_lock(rdev, ring, 64);
     297           0 :                 if (r) {
     298           0 :                         DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
     299           0 :                         return r;
     300             :                 }
     301           0 :                 radeon_fence_emit(rdev, fence, ring->idx);
     302           0 :                 radeon_ring_unlock_commit(rdev, ring, false);
     303             :         }
     304           0 :         return 0;
     305           0 : }
     306             : 
     307           0 : void radeon_test_ring_sync(struct radeon_device *rdev,
     308             :                            struct radeon_ring *ringA,
     309             :                            struct radeon_ring *ringB)
     310             : {
     311           0 :         struct radeon_fence *fence1 = NULL, *fence2 = NULL;
     312           0 :         struct radeon_semaphore *semaphore = NULL;
     313             :         int r;
     314             : 
     315           0 :         r = radeon_semaphore_create(rdev, &semaphore);
     316           0 :         if (r) {
     317           0 :                 DRM_ERROR("Failed to create semaphore\n");
     318           0 :                 goto out_cleanup;
     319             :         }
     320             : 
     321           0 :         r = radeon_ring_lock(rdev, ringA, 64);
     322           0 :         if (r) {
     323           0 :                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
     324           0 :                 goto out_cleanup;
     325             :         }
     326           0 :         radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
     327           0 :         radeon_ring_unlock_commit(rdev, ringA, false);
     328             : 
     329           0 :         r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
     330           0 :         if (r)
     331             :                 goto out_cleanup;
     332             : 
     333           0 :         r = radeon_ring_lock(rdev, ringA, 64);
     334           0 :         if (r) {
     335           0 :                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
     336           0 :                 goto out_cleanup;
     337             :         }
     338           0 :         radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
     339           0 :         radeon_ring_unlock_commit(rdev, ringA, false);
     340             : 
     341           0 :         r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
     342           0 :         if (r)
     343             :                 goto out_cleanup;
     344             : 
     345           0 :         mdelay(1000);
     346             : 
     347           0 :         if (radeon_fence_signaled(fence1)) {
     348           0 :                 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
     349           0 :                 goto out_cleanup;
     350             :         }
     351             : 
     352           0 :         r = radeon_ring_lock(rdev, ringB, 64);
     353           0 :         if (r) {
     354           0 :                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
     355           0 :                 goto out_cleanup;
     356             :         }
     357           0 :         radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
     358           0 :         radeon_ring_unlock_commit(rdev, ringB, false);
     359             : 
     360           0 :         r = radeon_fence_wait(fence1, false);
     361           0 :         if (r) {
     362           0 :                 DRM_ERROR("Failed to wait for sync fence 1\n");
     363           0 :                 goto out_cleanup;
     364             :         }
     365             : 
     366           0 :         mdelay(1000);
     367             : 
     368           0 :         if (radeon_fence_signaled(fence2)) {
     369           0 :                 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
     370           0 :                 goto out_cleanup;
     371             :         }
     372             : 
     373           0 :         r = radeon_ring_lock(rdev, ringB, 64);
     374           0 :         if (r) {
     375           0 :                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
     376           0 :                 goto out_cleanup;
     377             :         }
     378           0 :         radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
     379           0 :         radeon_ring_unlock_commit(rdev, ringB, false);
     380             : 
     381           0 :         r = radeon_fence_wait(fence2, false);
     382           0 :         if (r) {
     383           0 :                 DRM_ERROR("Failed to wait for sync fence 1\n");
     384           0 :                 goto out_cleanup;
     385             :         }
     386             : 
     387             : out_cleanup:
     388           0 :         radeon_semaphore_free(rdev, &semaphore, NULL);
     389             : 
     390           0 :         if (fence1)
     391           0 :                 radeon_fence_unref(&fence1);
     392             : 
     393           0 :         if (fence2)
     394           0 :                 radeon_fence_unref(&fence2);
     395             : 
     396           0 :         if (r)
     397           0 :                 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
     398           0 : }
     399             : 
     400           0 : static void radeon_test_ring_sync2(struct radeon_device *rdev,
     401             :                             struct radeon_ring *ringA,
     402             :                             struct radeon_ring *ringB,
     403             :                             struct radeon_ring *ringC)
     404             : {
     405           0 :         struct radeon_fence *fenceA = NULL, *fenceB = NULL;
     406           0 :         struct radeon_semaphore *semaphore = NULL;
     407             :         bool sigA, sigB;
     408             :         int i, r;
     409             : 
     410           0 :         r = radeon_semaphore_create(rdev, &semaphore);
     411           0 :         if (r) {
     412           0 :                 DRM_ERROR("Failed to create semaphore\n");
     413           0 :                 goto out_cleanup;
     414             :         }
     415             : 
     416           0 :         r = radeon_ring_lock(rdev, ringA, 64);
     417           0 :         if (r) {
     418           0 :                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
     419           0 :                 goto out_cleanup;
     420             :         }
     421           0 :         radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
     422           0 :         radeon_ring_unlock_commit(rdev, ringA, false);
     423             : 
     424           0 :         r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
     425           0 :         if (r)
     426             :                 goto out_cleanup;
     427             : 
     428           0 :         r = radeon_ring_lock(rdev, ringB, 64);
     429           0 :         if (r) {
     430           0 :                 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
     431           0 :                 goto out_cleanup;
     432             :         }
     433           0 :         radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
     434           0 :         radeon_ring_unlock_commit(rdev, ringB, false);
     435           0 :         r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
     436           0 :         if (r)
     437             :                 goto out_cleanup;
     438             : 
     439           0 :         mdelay(1000);
     440             : 
     441           0 :         if (radeon_fence_signaled(fenceA)) {
     442           0 :                 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
     443           0 :                 goto out_cleanup;
     444             :         }
     445           0 :         if (radeon_fence_signaled(fenceB)) {
     446           0 :                 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
     447           0 :                 goto out_cleanup;
     448             :         }
     449             : 
     450           0 :         r = radeon_ring_lock(rdev, ringC, 64);
     451           0 :         if (r) {
     452           0 :                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
     453           0 :                 goto out_cleanup;
     454             :         }
     455           0 :         radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
     456           0 :         radeon_ring_unlock_commit(rdev, ringC, false);
     457             : 
     458           0 :         for (i = 0; i < 30; ++i) {
     459           0 :                 mdelay(100);
     460           0 :                 sigA = radeon_fence_signaled(fenceA);
     461           0 :                 sigB = radeon_fence_signaled(fenceB);
     462           0 :                 if (sigA || sigB)
     463             :                         break;
     464             :         }
     465             : 
     466           0 :         if (!sigA && !sigB) {
     467           0 :                 DRM_ERROR("Neither fence A nor B has been signaled\n");
     468           0 :                 goto out_cleanup;
     469           0 :         } else if (sigA && sigB) {
     470           0 :                 DRM_ERROR("Both fence A and B has been signaled\n");
     471           0 :                 goto out_cleanup;
     472             :         }
     473             : 
     474             :         DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
     475             : 
     476           0 :         r = radeon_ring_lock(rdev, ringC, 64);
     477           0 :         if (r) {
     478           0 :                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
     479           0 :                 goto out_cleanup;
     480             :         }
     481           0 :         radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
     482           0 :         radeon_ring_unlock_commit(rdev, ringC, false);
     483             : 
     484           0 :         mdelay(1000);
     485             : 
     486           0 :         r = radeon_fence_wait(fenceA, false);
     487           0 :         if (r) {
     488           0 :                 DRM_ERROR("Failed to wait for sync fence A\n");
     489           0 :                 goto out_cleanup;
     490             :         }
     491           0 :         r = radeon_fence_wait(fenceB, false);
     492           0 :         if (r) {
     493           0 :                 DRM_ERROR("Failed to wait for sync fence B\n");
     494           0 :                 goto out_cleanup;
     495             :         }
     496             : 
     497             : out_cleanup:
     498           0 :         radeon_semaphore_free(rdev, &semaphore, NULL);
     499             : 
     500           0 :         if (fenceA)
     501           0 :                 radeon_fence_unref(&fenceA);
     502             : 
     503           0 :         if (fenceB)
     504           0 :                 radeon_fence_unref(&fenceB);
     505             : 
     506           0 :         if (r)
     507           0 :                 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
     508           0 : }
     509             : 
     510           0 : static bool radeon_test_sync_possible(struct radeon_ring *ringA,
     511             :                                       struct radeon_ring *ringB)
     512             : {
     513           0 :         if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
     514           0 :             ringB->idx == TN_RING_TYPE_VCE1_INDEX)
     515           0 :                 return false;
     516             : 
     517           0 :         return true;
     518           0 : }
     519             : 
     520           0 : void radeon_test_syncing(struct radeon_device *rdev)
     521             : {
     522             :         int i, j, k;
     523             : 
     524           0 :         for (i = 1; i < RADEON_NUM_RINGS; ++i) {
     525           0 :                 struct radeon_ring *ringA = &rdev->ring[i];
     526           0 :                 if (!ringA->ready)
     527           0 :                         continue;
     528             : 
     529           0 :                 for (j = 0; j < i; ++j) {
     530           0 :                         struct radeon_ring *ringB = &rdev->ring[j];
     531           0 :                         if (!ringB->ready)
     532           0 :                                 continue;
     533             : 
     534           0 :                         if (!radeon_test_sync_possible(ringA, ringB))
     535           0 :                                 continue;
     536             : 
     537             :                         DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
     538           0 :                         radeon_test_ring_sync(rdev, ringA, ringB);
     539             : 
     540             :                         DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
     541           0 :                         radeon_test_ring_sync(rdev, ringB, ringA);
     542             : 
     543           0 :                         for (k = 0; k < j; ++k) {
     544           0 :                                 struct radeon_ring *ringC = &rdev->ring[k];
     545           0 :                                 if (!ringC->ready)
     546           0 :                                         continue;
     547             : 
     548           0 :                                 if (!radeon_test_sync_possible(ringA, ringC))
     549           0 :                                         continue;
     550             : 
     551           0 :                                 if (!radeon_test_sync_possible(ringB, ringC))
     552           0 :                                         continue;
     553             : 
     554             :                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
     555           0 :                                 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
     556             : 
     557             :                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
     558           0 :                                 radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
     559             : 
     560             :                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
     561           0 :                                 radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
     562             : 
     563             :                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
     564           0 :                                 radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
     565             : 
     566             :                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
     567           0 :                                 radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
     568             : 
     569             :                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
     570           0 :                                 radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
     571           0 :                         }
     572           0 :                 }
     573           0 :         }
     574           0 : }

Generated by: LCOV version 1.13