LCOV - code coverage report
Current view: top level - dev/pci/drm/radeon - ni_dma.c (source / functions) Hit Total Coverage
Test: 6.4 Lines: 0 166 0.0 %
Date: 2018-10-19 03:25:38 Functions: 0 13 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*
       2             :  * Copyright 2010 Advanced Micro Devices, Inc.
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice shall be included in
      12             :  * all copies or substantial portions of the Software.
      13             :  *
      14             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      15             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      16             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      17             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      18             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      19             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      20             :  * OTHER DEALINGS IN THE SOFTWARE.
      21             :  *
      22             :  * Authors: Alex Deucher
      23             :  */
      24             : #include <dev/pci/drm/drmP.h>
      25             : #include "radeon.h"
      26             : #include "radeon_asic.h"
      27             : #include "radeon_trace.h"
      28             : #include "nid.h"
      29             : 
      30             : u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
      31             : 
      32             : /*
      33             :  * DMA
      34             :  * Starting with R600, the GPU has an asynchronous
      35             :  * DMA engine.  The programming model is very similar
      36             :  * to the 3D engine (ring buffer, IBs, etc.), but the
      37             :  * DMA controller has it's own packet format that is
      38             :  * different form the PM4 format used by the 3D engine.
      39             :  * It supports copying data, writing embedded data,
      40             :  * solid fills, and a number of other things.  It also
      41             :  * has support for tiling/detiling of buffers.
      42             :  * Cayman and newer support two asynchronous DMA engines.
      43             :  */
      44             : 
      45             : /**
      46             :  * cayman_dma_get_rptr - get the current read pointer
      47             :  *
      48             :  * @rdev: radeon_device pointer
      49             :  * @ring: radeon ring pointer
      50             :  *
      51             :  * Get the current rptr from the hardware (cayman+).
      52             :  */
      53           0 : uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
      54             :                              struct radeon_ring *ring)
      55             : {
      56             :         u32 rptr, reg;
      57             : 
      58           0 :         if (rdev->wb.enabled) {
      59           0 :                 rptr = rdev->wb.wb[ring->rptr_offs/4];
      60           0 :         } else {
      61           0 :                 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
      62           0 :                         reg = DMA_RB_RPTR + DMA0_REGISTER_OFFSET;
      63             :                 else
      64             :                         reg = DMA_RB_RPTR + DMA1_REGISTER_OFFSET;
      65             : 
      66           0 :                 rptr = RREG32(reg);
      67             :         }
      68             : 
      69           0 :         return (rptr & 0x3fffc) >> 2;
      70             : }
      71             : 
      72             : /**
      73             :  * cayman_dma_get_wptr - get the current write pointer
      74             :  *
      75             :  * @rdev: radeon_device pointer
      76             :  * @ring: radeon ring pointer
      77             :  *
      78             :  * Get the current wptr from the hardware (cayman+).
      79             :  */
      80           0 : uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
      81             :                            struct radeon_ring *ring)
      82             : {
      83             :         u32 reg;
      84             : 
      85           0 :         if (ring->idx == R600_RING_TYPE_DMA_INDEX)
      86           0 :                 reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
      87             :         else
      88             :                 reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
      89             : 
      90           0 :         return (RREG32(reg) & 0x3fffc) >> 2;
      91             : }
      92             : 
      93             : /**
      94             :  * cayman_dma_set_wptr - commit the write pointer
      95             :  *
      96             :  * @rdev: radeon_device pointer
      97             :  * @ring: radeon ring pointer
      98             :  *
      99             :  * Write the wptr back to the hardware (cayman+).
     100             :  */
     101           0 : void cayman_dma_set_wptr(struct radeon_device *rdev,
     102             :                          struct radeon_ring *ring)
     103             : {
     104             :         u32 reg;
     105             : 
     106           0 :         if (ring->idx == R600_RING_TYPE_DMA_INDEX)
     107           0 :                 reg = DMA_RB_WPTR + DMA0_REGISTER_OFFSET;
     108             :         else
     109             :                 reg = DMA_RB_WPTR + DMA1_REGISTER_OFFSET;
     110             : 
     111           0 :         WREG32(reg, (ring->wptr << 2) & 0x3fffc);
     112           0 : }
     113             : 
     114             : /**
     115             :  * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
     116             :  *
     117             :  * @rdev: radeon_device pointer
     118             :  * @ib: IB object to schedule
     119             :  *
     120             :  * Schedule an IB in the DMA ring (cayman-SI).
     121             :  */
     122           0 : void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
     123             :                                 struct radeon_ib *ib)
     124             : {
     125           0 :         struct radeon_ring *ring = &rdev->ring[ib->ring];
     126           0 :         unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
     127             : 
     128           0 :         if (rdev->wb.enabled) {
     129           0 :                 u32 next_rptr = ring->wptr + 4;
     130           0 :                 while ((next_rptr & 7) != 5)
     131           0 :                         next_rptr++;
     132           0 :                 next_rptr += 3;
     133           0 :                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
     134           0 :                 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
     135           0 :                 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
     136           0 :                 radeon_ring_write(ring, next_rptr);
     137           0 :         }
     138             : 
     139             :         /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
     140             :          * Pad as necessary with NOPs.
     141             :          */
     142           0 :         while ((ring->wptr & 7) != 5)
     143           0 :                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
     144           0 :         radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
     145           0 :         radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
     146           0 :         radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
     147             : 
     148           0 : }
     149             : 
     150             : /**
     151             :  * cayman_dma_stop - stop the async dma engines
     152             :  *
     153             :  * @rdev: radeon_device pointer
     154             :  *
     155             :  * Stop the async dma engines (cayman-SI).
     156             :  */
     157           0 : void cayman_dma_stop(struct radeon_device *rdev)
     158             : {
     159             :         u32 rb_cntl;
     160             : 
     161           0 :         if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
     162           0 :             (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
     163           0 :                 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
     164             : 
     165             :         /* dma0 */
     166           0 :         rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
     167           0 :         rb_cntl &= ~DMA_RB_ENABLE;
     168           0 :         WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
     169             : 
     170             :         /* dma1 */
     171           0 :         rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
     172           0 :         rb_cntl &= ~DMA_RB_ENABLE;
     173           0 :         WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
     174             : 
     175           0 :         rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
     176           0 :         rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
     177           0 : }
     178             : 
     179             : /**
     180             :  * cayman_dma_resume - setup and start the async dma engines
     181             :  *
     182             :  * @rdev: radeon_device pointer
     183             :  *
     184             :  * Set up the DMA ring buffers and enable them. (cayman-SI).
     185             :  * Returns 0 for success, error for failure.
     186             :  */
     187           0 : int cayman_dma_resume(struct radeon_device *rdev)
     188             : {
     189             :         struct radeon_ring *ring;
     190             :         u32 rb_cntl, dma_cntl, ib_cntl;
     191             :         u32 rb_bufsz;
     192             :         u32 reg_offset, wb_offset;
     193             :         int i, r;
     194             : 
     195           0 :         for (i = 0; i < 2; i++) {
     196           0 :                 if (i == 0) {
     197           0 :                         ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
     198             :                         reg_offset = DMA0_REGISTER_OFFSET;
     199             :                         wb_offset = R600_WB_DMA_RPTR_OFFSET;
     200           0 :                 } else {
     201           0 :                         ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
     202             :                         reg_offset = DMA1_REGISTER_OFFSET;
     203             :                         wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
     204             :                 }
     205             : 
     206           0 :                 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
     207           0 :                 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
     208             : 
     209             :                 /* Set ring buffer size in dwords */
     210           0 :                 rb_bufsz = order_base_2(ring->ring_size / 4);
     211           0 :                 rb_cntl = rb_bufsz << 1;
     212             : #ifdef __BIG_ENDIAN
     213             :                 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
     214             : #endif
     215           0 :                 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
     216             : 
     217             :                 /* Initialize the ring buffer's read and write pointers */
     218           0 :                 WREG32(DMA_RB_RPTR + reg_offset, 0);
     219           0 :                 WREG32(DMA_RB_WPTR + reg_offset, 0);
     220             : 
     221             :                 /* set the wb address whether it's enabled or not */
     222           0 :                 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
     223             :                        upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
     224           0 :                 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
     225             :                        ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
     226             : 
     227           0 :                 if (rdev->wb.enabled)
     228           0 :                         rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
     229             : 
     230           0 :                 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
     231             : 
     232             :                 /* enable DMA IBs */
     233             :                 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
     234             : #ifdef __BIG_ENDIAN
     235             :                 ib_cntl |= DMA_IB_SWAP_ENABLE;
     236             : #endif
     237           0 :                 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
     238             : 
     239           0 :                 dma_cntl = RREG32(DMA_CNTL + reg_offset);
     240           0 :                 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
     241           0 :                 WREG32(DMA_CNTL + reg_offset, dma_cntl);
     242             : 
     243           0 :                 ring->wptr = 0;
     244           0 :                 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
     245             : 
     246           0 :                 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
     247             : 
     248           0 :                 ring->ready = true;
     249             : 
     250           0 :                 r = radeon_ring_test(rdev, ring->idx, ring);
     251           0 :                 if (r) {
     252           0 :                         ring->ready = false;
     253           0 :                         return r;
     254             :                 }
     255             :         }
     256             : 
     257           0 :         if ((rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX) ||
     258           0 :             (rdev->asic->copy.copy_ring_index == CAYMAN_RING_TYPE_DMA1_INDEX))
     259           0 :                 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
     260             : 
     261           0 :         return 0;
     262           0 : }
     263             : 
     264             : /**
     265             :  * cayman_dma_fini - tear down the async dma engines
     266             :  *
     267             :  * @rdev: radeon_device pointer
     268             :  *
     269             :  * Stop the async dma engines and free the rings (cayman-SI).
     270             :  */
     271           0 : void cayman_dma_fini(struct radeon_device *rdev)
     272             : {
     273           0 :         cayman_dma_stop(rdev);
     274           0 :         radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
     275           0 :         radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
     276           0 : }
     277             : 
     278             : /**
     279             :  * cayman_dma_is_lockup - Check if the DMA engine is locked up
     280             :  *
     281             :  * @rdev: radeon_device pointer
     282             :  * @ring: radeon_ring structure holding ring information
     283             :  *
     284             :  * Check if the async DMA engine is locked up.
     285             :  * Returns true if the engine appears to be locked up, false if not.
     286             :  */
     287           0 : bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
     288             : {
     289           0 :         u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
     290             :         u32 mask;
     291             : 
     292           0 :         if (ring->idx == R600_RING_TYPE_DMA_INDEX)
     293           0 :                 mask = RADEON_RESET_DMA;
     294             :         else
     295             :                 mask = RADEON_RESET_DMA1;
     296             : 
     297           0 :         if (!(reset_mask & mask)) {
     298           0 :                 radeon_ring_lockup_update(rdev, ring);
     299           0 :                 return false;
     300             :         }
     301           0 :         return radeon_ring_test_lockup(rdev, ring);
     302           0 : }
     303             : 
     304             : /**
     305             :  * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
     306             :  *
     307             :  * @rdev: radeon_device pointer
     308             :  * @ib: indirect buffer to fill with commands
     309             :  * @pe: addr of the page entry
     310             :  * @src: src addr where to copy from
     311             :  * @count: number of page entries to update
     312             :  *
     313             :  * Update PTEs by copying them from the GART using the DMA (cayman/TN).
     314             :  */
     315           0 : void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
     316             :                               struct radeon_ib *ib,
     317             :                               uint64_t pe, uint64_t src,
     318             :                               unsigned count)
     319             : {
     320             :         unsigned ndw;
     321             : 
     322           0 :         while (count) {
     323           0 :                 ndw = count * 2;
     324           0 :                 if (ndw > 0xFFFFE)
     325             :                         ndw = 0xFFFFE;
     326             : 
     327           0 :                 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
     328             :                                                       0, 0, ndw);
     329           0 :                 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
     330           0 :                 ib->ptr[ib->length_dw++] = lower_32_bits(src);
     331           0 :                 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
     332           0 :                 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
     333             : 
     334           0 :                 pe += ndw * 4;
     335           0 :                 src += ndw * 4;
     336           0 :                 count -= ndw / 2;
     337             :         }
     338           0 : }
     339             : 
     340             : /**
     341             :  * cayman_dma_vm_write_pages - update PTEs by writing them manually
     342             :  *
     343             :  * @rdev: radeon_device pointer
     344             :  * @ib: indirect buffer to fill with commands
     345             :  * @pe: addr of the page entry
     346             :  * @addr: dst addr to write into pe
     347             :  * @count: number of page entries to update
     348             :  * @incr: increase next addr by incr bytes
     349             :  * @flags: hw access flags
     350             :  *
     351             :  * Update PTEs by writing them manually using the DMA (cayman/TN).
     352             :  */
     353           0 : void cayman_dma_vm_write_pages(struct radeon_device *rdev,
     354             :                                struct radeon_ib *ib,
     355             :                                uint64_t pe,
     356             :                                uint64_t addr, unsigned count,
     357             :                                uint32_t incr, uint32_t flags)
     358             : {
     359             :         uint64_t value;
     360             :         unsigned ndw;
     361             : 
     362           0 :         while (count) {
     363           0 :                 ndw = count * 2;
     364           0 :                 if (ndw > 0xFFFFE)
     365             :                         ndw = 0xFFFFE;
     366             : 
     367             :                 /* for non-physically contiguous pages (system) */
     368           0 :                 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
     369             :                                                       0, 0, ndw);
     370           0 :                 ib->ptr[ib->length_dw++] = pe;
     371           0 :                 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
     372           0 :                 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
     373           0 :                         if (flags & R600_PTE_SYSTEM) {
     374           0 :                                 value = radeon_vm_map_gart(rdev, addr);
     375           0 :                         } else if (flags & R600_PTE_VALID) {
     376             :                                 value = addr;
     377           0 :                         } else {
     378             :                                 value = 0;
     379             :                         }
     380           0 :                         addr += incr;
     381           0 :                         value |= flags;
     382           0 :                         ib->ptr[ib->length_dw++] = value;
     383           0 :                         ib->ptr[ib->length_dw++] = upper_32_bits(value);
     384             :                 }
     385             :         }
     386           0 : }
     387             : 
     388             : /**
     389             :  * cayman_dma_vm_set_pages - update the page tables using the DMA
     390             :  *
     391             :  * @rdev: radeon_device pointer
     392             :  * @ib: indirect buffer to fill with commands
     393             :  * @pe: addr of the page entry
     394             :  * @addr: dst addr to write into pe
     395             :  * @count: number of page entries to update
     396             :  * @incr: increase next addr by incr bytes
     397             :  * @flags: hw access flags
     398             :  *
     399             :  * Update the page tables using the DMA (cayman/TN).
     400             :  */
     401           0 : void cayman_dma_vm_set_pages(struct radeon_device *rdev,
     402             :                              struct radeon_ib *ib,
     403             :                              uint64_t pe,
     404             :                              uint64_t addr, unsigned count,
     405             :                              uint32_t incr, uint32_t flags)
     406             : {
     407             :         uint64_t value;
     408             :         unsigned ndw;
     409             : 
     410           0 :         while (count) {
     411           0 :                 ndw = count * 2;
     412           0 :                 if (ndw > 0xFFFFE)
     413             :                         ndw = 0xFFFFE;
     414             : 
     415           0 :                 if (flags & R600_PTE_VALID)
     416           0 :                         value = addr;
     417             :                 else
     418             :                         value = 0;
     419             : 
     420             :                 /* for physically contiguous pages (vram) */
     421           0 :                 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
     422           0 :                 ib->ptr[ib->length_dw++] = pe; /* dst addr */
     423           0 :                 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
     424           0 :                 ib->ptr[ib->length_dw++] = flags; /* mask */
     425           0 :                 ib->ptr[ib->length_dw++] = 0;
     426           0 :                 ib->ptr[ib->length_dw++] = value; /* value */
     427           0 :                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
     428           0 :                 ib->ptr[ib->length_dw++] = incr; /* increment size */
     429           0 :                 ib->ptr[ib->length_dw++] = 0;
     430             : 
     431           0 :                 pe += ndw * 4;
     432           0 :                 addr += (ndw / 2) * incr;
     433           0 :                 count -= ndw / 2;
     434             :         }
     435           0 : }
     436             : 
     437             : /**
     438             :  * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
     439             :  *
     440             :  * @ib: indirect buffer to fill with padding
     441             :  *
     442             :  */
     443           0 : void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
     444             : {
     445           0 :         while (ib->length_dw & 0x7)
     446           0 :                 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
     447           0 : }
     448             : 
     449           0 : void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
     450             :                          unsigned vm_id, uint64_t pd_addr)
     451             : {
     452           0 :         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
     453           0 :         radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
     454           0 :         radeon_ring_write(ring, pd_addr >> 12);
     455             : 
     456             :         /* flush hdp cache */
     457           0 :         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
     458           0 :         radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
     459           0 :         radeon_ring_write(ring, 1);
     460             : 
     461             :         /* bits 0-7 are the VM contexts0-7 */
     462           0 :         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
     463           0 :         radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
     464           0 :         radeon_ring_write(ring, 1 << vm_id);
     465             : 
     466             :         /* wait for invalidate to complete */
     467           0 :         radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
     468           0 :         radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
     469           0 :         radeon_ring_write(ring, 0); /* mask */
     470           0 :         radeon_ring_write(ring, 0); /* value */
     471           0 : }
     472             : 

Generated by: LCOV version 1.13