Line data Source code
1 : /*
2 : * Copyright 2013 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : * Authors: Alex Deucher
23 : */
24 : #include <dev/pci/drm/drmP.h>
25 : #include "radeon.h"
26 : #include "radeon_asic.h"
27 : #include "radeon_trace.h"
28 : #include "sid.h"
29 :
30 : u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
31 :
32 : /**
33 : * si_dma_is_lockup - Check if the DMA engine is locked up
34 : *
35 : * @rdev: radeon_device pointer
36 : * @ring: radeon_ring structure holding ring information
37 : *
38 : * Check if the async DMA engine is locked up.
39 : * Returns true if the engine appears to be locked up, false if not.
40 : */
41 0 : bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
42 : {
43 0 : u32 reset_mask = si_gpu_check_soft_reset(rdev);
44 : u32 mask;
45 :
46 0 : if (ring->idx == R600_RING_TYPE_DMA_INDEX)
47 0 : mask = RADEON_RESET_DMA;
48 : else
49 : mask = RADEON_RESET_DMA1;
50 :
51 0 : if (!(reset_mask & mask)) {
52 0 : radeon_ring_lockup_update(rdev, ring);
53 0 : return false;
54 : }
55 0 : return radeon_ring_test_lockup(rdev, ring);
56 0 : }
57 :
58 : /**
59 : * si_dma_vm_copy_pages - update PTEs by copying them from the GART
60 : *
61 : * @rdev: radeon_device pointer
62 : * @ib: indirect buffer to fill with commands
63 : * @pe: addr of the page entry
64 : * @src: src addr where to copy from
65 : * @count: number of page entries to update
66 : *
67 : * Update PTEs by copying them from the GART using the DMA (SI).
68 : */
69 0 : void si_dma_vm_copy_pages(struct radeon_device *rdev,
70 : struct radeon_ib *ib,
71 : uint64_t pe, uint64_t src,
72 : unsigned count)
73 : {
74 0 : while (count) {
75 0 : unsigned bytes = count * 8;
76 0 : if (bytes > 0xFFFF8)
77 : bytes = 0xFFFF8;
78 :
79 0 : ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
80 : 1, 0, 0, bytes);
81 0 : ib->ptr[ib->length_dw++] = lower_32_bits(pe);
82 0 : ib->ptr[ib->length_dw++] = lower_32_bits(src);
83 0 : ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
84 0 : ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
85 :
86 0 : pe += bytes;
87 0 : src += bytes;
88 0 : count -= bytes / 8;
89 : }
90 0 : }
91 :
92 : /**
93 : * si_dma_vm_write_pages - update PTEs by writing them manually
94 : *
95 : * @rdev: radeon_device pointer
96 : * @ib: indirect buffer to fill with commands
97 : * @pe: addr of the page entry
98 : * @addr: dst addr to write into pe
99 : * @count: number of page entries to update
100 : * @incr: increase next addr by incr bytes
101 : * @flags: access flags
102 : *
103 : * Update PTEs by writing them manually using the DMA (SI).
104 : */
105 0 : void si_dma_vm_write_pages(struct radeon_device *rdev,
106 : struct radeon_ib *ib,
107 : uint64_t pe,
108 : uint64_t addr, unsigned count,
109 : uint32_t incr, uint32_t flags)
110 : {
111 : uint64_t value;
112 : unsigned ndw;
113 :
114 0 : while (count) {
115 0 : ndw = count * 2;
116 0 : if (ndw > 0xFFFFE)
117 : ndw = 0xFFFFE;
118 :
119 : /* for non-physically contiguous pages (system) */
120 0 : ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
121 0 : ib->ptr[ib->length_dw++] = pe;
122 0 : ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
123 0 : for (; ndw > 0; ndw -= 2, --count, pe += 8) {
124 0 : if (flags & R600_PTE_SYSTEM) {
125 0 : value = radeon_vm_map_gart(rdev, addr);
126 0 : } else if (flags & R600_PTE_VALID) {
127 : value = addr;
128 0 : } else {
129 : value = 0;
130 : }
131 0 : addr += incr;
132 0 : value |= flags;
133 0 : ib->ptr[ib->length_dw++] = value;
134 0 : ib->ptr[ib->length_dw++] = upper_32_bits(value);
135 : }
136 : }
137 0 : }
138 :
139 : /**
140 : * si_dma_vm_set_pages - update the page tables using the DMA
141 : *
142 : * @rdev: radeon_device pointer
143 : * @ib: indirect buffer to fill with commands
144 : * @pe: addr of the page entry
145 : * @addr: dst addr to write into pe
146 : * @count: number of page entries to update
147 : * @incr: increase next addr by incr bytes
148 : * @flags: access flags
149 : *
150 : * Update the page tables using the DMA (SI).
151 : */
152 0 : void si_dma_vm_set_pages(struct radeon_device *rdev,
153 : struct radeon_ib *ib,
154 : uint64_t pe,
155 : uint64_t addr, unsigned count,
156 : uint32_t incr, uint32_t flags)
157 : {
158 : uint64_t value;
159 : unsigned ndw;
160 :
161 0 : while (count) {
162 0 : ndw = count * 2;
163 0 : if (ndw > 0xFFFFE)
164 : ndw = 0xFFFFE;
165 :
166 0 : if (flags & R600_PTE_VALID)
167 0 : value = addr;
168 : else
169 : value = 0;
170 :
171 : /* for physically contiguous pages (vram) */
172 0 : ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
173 0 : ib->ptr[ib->length_dw++] = pe; /* dst addr */
174 0 : ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
175 0 : ib->ptr[ib->length_dw++] = flags; /* mask */
176 0 : ib->ptr[ib->length_dw++] = 0;
177 0 : ib->ptr[ib->length_dw++] = value; /* value */
178 0 : ib->ptr[ib->length_dw++] = upper_32_bits(value);
179 0 : ib->ptr[ib->length_dw++] = incr; /* increment size */
180 0 : ib->ptr[ib->length_dw++] = 0;
181 0 : pe += ndw * 4;
182 0 : addr += (ndw / 2) * incr;
183 0 : count -= ndw / 2;
184 : }
185 0 : }
186 :
187 0 : void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
188 : unsigned vm_id, uint64_t pd_addr)
189 :
190 : {
191 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
192 0 : if (vm_id < 8) {
193 0 : radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
194 0 : } else {
195 0 : radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
196 : }
197 0 : radeon_ring_write(ring, pd_addr >> 12);
198 :
199 : /* flush hdp cache */
200 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
201 0 : radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
202 0 : radeon_ring_write(ring, 1);
203 :
204 : /* bits 0-7 are the VM contexts0-7 */
205 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
206 0 : radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
207 0 : radeon_ring_write(ring, 1 << vm_id);
208 :
209 : /* wait for invalidate to complete */
210 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
211 0 : radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
212 0 : radeon_ring_write(ring, 0xff << 16); /* retry */
213 0 : radeon_ring_write(ring, 1 << vm_id); /* mask */
214 0 : radeon_ring_write(ring, 0); /* value */
215 0 : radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
216 0 : }
217 :
218 : /**
219 : * si_copy_dma - copy pages using the DMA engine
220 : *
221 : * @rdev: radeon_device pointer
222 : * @src_offset: src GPU address
223 : * @dst_offset: dst GPU address
224 : * @num_gpu_pages: number of GPU pages to xfer
225 : * @resv: reservation object to sync to
226 : *
227 : * Copy GPU paging using the DMA engine (SI).
228 : * Used by the radeon ttm implementation to move pages if
229 : * registered as the asic copy callback.
230 : */
231 0 : struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
232 : uint64_t src_offset, uint64_t dst_offset,
233 : unsigned num_gpu_pages,
234 : struct reservation_object *resv)
235 : {
236 0 : struct radeon_fence *fence;
237 0 : struct radeon_sync sync;
238 0 : int ring_index = rdev->asic->copy.dma_ring_index;
239 0 : struct radeon_ring *ring = &rdev->ring[ring_index];
240 : u32 size_in_bytes, cur_size_in_bytes;
241 : int i, num_loops;
242 : int r = 0;
243 :
244 0 : radeon_sync_create(&sync);
245 :
246 0 : size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
247 0 : num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
248 0 : r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
249 0 : if (r) {
250 0 : DRM_ERROR("radeon: moving bo (%d).\n", r);
251 0 : radeon_sync_free(rdev, &sync, NULL);
252 0 : return ERR_PTR(r);
253 : }
254 :
255 0 : radeon_sync_resv(rdev, &sync, resv, false);
256 0 : radeon_sync_rings(rdev, &sync, ring->idx);
257 :
258 0 : for (i = 0; i < num_loops; i++) {
259 : cur_size_in_bytes = size_in_bytes;
260 0 : if (cur_size_in_bytes > 0xFFFFF)
261 : cur_size_in_bytes = 0xFFFFF;
262 0 : size_in_bytes -= cur_size_in_bytes;
263 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
264 0 : radeon_ring_write(ring, lower_32_bits(dst_offset));
265 0 : radeon_ring_write(ring, lower_32_bits(src_offset));
266 0 : radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
267 0 : radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
268 0 : src_offset += cur_size_in_bytes;
269 0 : dst_offset += cur_size_in_bytes;
270 : }
271 :
272 0 : r = radeon_fence_emit(rdev, &fence, ring->idx);
273 0 : if (r) {
274 0 : radeon_ring_unlock_undo(rdev, ring);
275 0 : radeon_sync_free(rdev, &sync, NULL);
276 0 : return ERR_PTR(r);
277 : }
278 :
279 0 : radeon_ring_unlock_commit(rdev, ring, false);
280 0 : radeon_sync_free(rdev, &sync, fence);
281 :
282 0 : return fence;
283 0 : }
284 :
|