Line data Source code
1 : /*
2 : * Copyright 2013 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : * Authors: Alex Deucher
23 : */
24 : #include <dev/pci/drm/drmP.h>
25 : #include "radeon.h"
26 : #include "radeon_asic.h"
27 : #include "r600d.h"
28 :
29 : u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
30 :
31 : /*
32 : * DMA
33 : * Starting with R600, the GPU has an asynchronous
34 : * DMA engine. The programming model is very similar
35 : * to the 3D engine (ring buffer, IBs, etc.), but the
36 : * DMA controller has it's own packet format that is
37 : * different form the PM4 format used by the 3D engine.
38 : * It supports copying data, writing embedded data,
39 : * solid fills, and a number of other things. It also
40 : * has support for tiling/detiling of buffers.
41 : */
42 :
43 : /**
44 : * r600_dma_get_rptr - get the current read pointer
45 : *
46 : * @rdev: radeon_device pointer
47 : * @ring: radeon ring pointer
48 : *
49 : * Get the current rptr from the hardware (r6xx+).
50 : */
51 0 : uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
52 : struct radeon_ring *ring)
53 : {
54 : u32 rptr;
55 :
56 0 : if (rdev->wb.enabled)
57 0 : rptr = rdev->wb.wb[ring->rptr_offs/4];
58 : else
59 0 : rptr = RREG32(DMA_RB_RPTR);
60 :
61 0 : return (rptr & 0x3fffc) >> 2;
62 : }
63 :
64 : /**
65 : * r600_dma_get_wptr - get the current write pointer
66 : *
67 : * @rdev: radeon_device pointer
68 : * @ring: radeon ring pointer
69 : *
70 : * Get the current wptr from the hardware (r6xx+).
71 : */
72 0 : uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
73 : struct radeon_ring *ring)
74 : {
75 0 : return (RREG32(DMA_RB_WPTR) & 0x3fffc) >> 2;
76 : }
77 :
78 : /**
79 : * r600_dma_set_wptr - commit the write pointer
80 : *
81 : * @rdev: radeon_device pointer
82 : * @ring: radeon ring pointer
83 : *
84 : * Write the wptr back to the hardware (r6xx+).
85 : */
86 0 : void r600_dma_set_wptr(struct radeon_device *rdev,
87 : struct radeon_ring *ring)
88 : {
89 0 : WREG32(DMA_RB_WPTR, (ring->wptr << 2) & 0x3fffc);
90 0 : }
91 :
92 : /**
93 : * r600_dma_stop - stop the async dma engine
94 : *
95 : * @rdev: radeon_device pointer
96 : *
97 : * Stop the async dma engine (r6xx-evergreen).
98 : */
99 0 : void r600_dma_stop(struct radeon_device *rdev)
100 : {
101 0 : u32 rb_cntl = RREG32(DMA_RB_CNTL);
102 :
103 0 : if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
104 0 : radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
105 :
106 0 : rb_cntl &= ~DMA_RB_ENABLE;
107 0 : WREG32(DMA_RB_CNTL, rb_cntl);
108 :
109 0 : rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
110 0 : }
111 :
112 : /**
113 : * r600_dma_resume - setup and start the async dma engine
114 : *
115 : * @rdev: radeon_device pointer
116 : *
117 : * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
118 : * Returns 0 for success, error for failure.
119 : */
120 0 : int r600_dma_resume(struct radeon_device *rdev)
121 : {
122 0 : struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
123 : u32 rb_cntl, dma_cntl, ib_cntl;
124 : u32 rb_bufsz;
125 : int r;
126 :
127 0 : WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
128 0 : WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
129 :
130 : /* Set ring buffer size in dwords */
131 0 : rb_bufsz = order_base_2(ring->ring_size / 4);
132 0 : rb_cntl = rb_bufsz << 1;
133 : #ifdef __BIG_ENDIAN
134 : rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
135 : #endif
136 0 : WREG32(DMA_RB_CNTL, rb_cntl);
137 :
138 : /* Initialize the ring buffer's read and write pointers */
139 0 : WREG32(DMA_RB_RPTR, 0);
140 0 : WREG32(DMA_RB_WPTR, 0);
141 :
142 : /* set the wb address whether it's enabled or not */
143 0 : WREG32(DMA_RB_RPTR_ADDR_HI,
144 : upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
145 0 : WREG32(DMA_RB_RPTR_ADDR_LO,
146 : ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
147 :
148 0 : if (rdev->wb.enabled)
149 0 : rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
150 :
151 0 : WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
152 :
153 : /* enable DMA IBs */
154 : ib_cntl = DMA_IB_ENABLE;
155 : #ifdef __BIG_ENDIAN
156 : ib_cntl |= DMA_IB_SWAP_ENABLE;
157 : #endif
158 0 : WREG32(DMA_IB_CNTL, ib_cntl);
159 :
160 0 : dma_cntl = RREG32(DMA_CNTL);
161 0 : dma_cntl &= ~CTXEMPTY_INT_ENABLE;
162 0 : WREG32(DMA_CNTL, dma_cntl);
163 :
164 0 : if (rdev->family >= CHIP_RV770)
165 0 : WREG32(DMA_MODE, 1);
166 :
167 0 : ring->wptr = 0;
168 0 : WREG32(DMA_RB_WPTR, ring->wptr << 2);
169 :
170 0 : WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
171 :
172 0 : ring->ready = true;
173 :
174 0 : r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
175 0 : if (r) {
176 0 : ring->ready = false;
177 0 : return r;
178 : }
179 :
180 0 : if (rdev->asic->copy.copy_ring_index == R600_RING_TYPE_DMA_INDEX)
181 0 : radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
182 :
183 0 : return 0;
184 0 : }
185 :
186 : /**
187 : * r600_dma_fini - tear down the async dma engine
188 : *
189 : * @rdev: radeon_device pointer
190 : *
191 : * Stop the async dma engine and free the ring (r6xx-evergreen).
192 : */
193 0 : void r600_dma_fini(struct radeon_device *rdev)
194 : {
195 0 : r600_dma_stop(rdev);
196 0 : radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
197 0 : }
198 :
199 : /**
200 : * r600_dma_is_lockup - Check if the DMA engine is locked up
201 : *
202 : * @rdev: radeon_device pointer
203 : * @ring: radeon_ring structure holding ring information
204 : *
205 : * Check if the async DMA engine is locked up.
206 : * Returns true if the engine appears to be locked up, false if not.
207 : */
208 0 : bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
209 : {
210 0 : u32 reset_mask = r600_gpu_check_soft_reset(rdev);
211 :
212 0 : if (!(reset_mask & RADEON_RESET_DMA)) {
213 0 : radeon_ring_lockup_update(rdev, ring);
214 0 : return false;
215 : }
216 0 : return radeon_ring_test_lockup(rdev, ring);
217 0 : }
218 :
219 :
220 : /**
221 : * r600_dma_ring_test - simple async dma engine test
222 : *
223 : * @rdev: radeon_device pointer
224 : * @ring: radeon_ring structure holding ring information
225 : *
226 : * Test the DMA engine by writing using it to write an
227 : * value to memory. (r6xx-SI).
228 : * Returns 0 for success, error for failure.
229 : */
230 0 : int r600_dma_ring_test(struct radeon_device *rdev,
231 : struct radeon_ring *ring)
232 : {
233 : unsigned i;
234 : int r;
235 : unsigned index;
236 : u32 tmp;
237 : u64 gpu_addr;
238 :
239 0 : if (ring->idx == R600_RING_TYPE_DMA_INDEX)
240 0 : index = R600_WB_DMA_RING_TEST_OFFSET;
241 : else
242 : index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
243 :
244 0 : gpu_addr = rdev->wb.gpu_addr + index;
245 :
246 : tmp = 0xCAFEDEAD;
247 0 : rdev->wb.wb[index/4] = cpu_to_le32(tmp);
248 :
249 0 : r = radeon_ring_lock(rdev, ring, 4);
250 0 : if (r) {
251 0 : DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
252 0 : return r;
253 : }
254 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
255 0 : radeon_ring_write(ring, lower_32_bits(gpu_addr));
256 0 : radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
257 0 : radeon_ring_write(ring, 0xDEADBEEF);
258 0 : radeon_ring_unlock_commit(rdev, ring, false);
259 :
260 0 : for (i = 0; i < rdev->usec_timeout; i++) {
261 0 : tmp = le32_to_cpu(rdev->wb.wb[index/4]);
262 0 : if (tmp == 0xDEADBEEF)
263 : break;
264 0 : DRM_UDELAY(1);
265 : }
266 :
267 0 : if (i < rdev->usec_timeout) {
268 : DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
269 : } else {
270 0 : DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
271 : ring->idx, tmp);
272 : r = -EINVAL;
273 : }
274 0 : return r;
275 0 : }
276 :
277 : /**
278 : * r600_dma_fence_ring_emit - emit a fence on the DMA ring
279 : *
280 : * @rdev: radeon_device pointer
281 : * @fence: radeon fence object
282 : *
283 : * Add a DMA fence packet to the ring to write
284 : * the fence seq number and DMA trap packet to generate
285 : * an interrupt if needed (r6xx-r7xx).
286 : */
287 0 : void r600_dma_fence_ring_emit(struct radeon_device *rdev,
288 : struct radeon_fence *fence)
289 : {
290 0 : struct radeon_ring *ring = &rdev->ring[fence->ring];
291 0 : u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
292 :
293 : /* write the fence */
294 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
295 0 : radeon_ring_write(ring, addr & 0xfffffffc);
296 0 : radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
297 0 : radeon_ring_write(ring, lower_32_bits(fence->seq));
298 : /* generate an interrupt */
299 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
300 0 : }
301 :
302 : /**
303 : * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
304 : *
305 : * @rdev: radeon_device pointer
306 : * @ring: radeon_ring structure holding ring information
307 : * @semaphore: radeon semaphore object
308 : * @emit_wait: wait or signal semaphore
309 : *
310 : * Add a DMA semaphore packet to the ring wait on or signal
311 : * other rings (r6xx-SI).
312 : */
313 0 : bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
314 : struct radeon_ring *ring,
315 : struct radeon_semaphore *semaphore,
316 : bool emit_wait)
317 : {
318 0 : u64 addr = semaphore->gpu_addr;
319 0 : u32 s = emit_wait ? 0 : 1;
320 :
321 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
322 0 : radeon_ring_write(ring, addr & 0xfffffffc);
323 0 : radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
324 :
325 0 : return true;
326 : }
327 :
328 : /**
329 : * r600_dma_ib_test - test an IB on the DMA engine
330 : *
331 : * @rdev: radeon_device pointer
332 : * @ring: radeon_ring structure holding ring information
333 : *
334 : * Test a simple IB in the DMA ring (r6xx-SI).
335 : * Returns 0 on success, error on failure.
336 : */
337 0 : int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
338 : {
339 0 : struct radeon_ib ib;
340 : unsigned i;
341 : unsigned index;
342 : int r;
343 : u32 tmp = 0;
344 : u64 gpu_addr;
345 :
346 0 : if (ring->idx == R600_RING_TYPE_DMA_INDEX)
347 0 : index = R600_WB_DMA_RING_TEST_OFFSET;
348 : else
349 : index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
350 :
351 0 : gpu_addr = rdev->wb.gpu_addr + index;
352 :
353 0 : r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
354 0 : if (r) {
355 0 : DRM_ERROR("radeon: failed to get ib (%d).\n", r);
356 0 : return r;
357 : }
358 :
359 0 : ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
360 0 : ib.ptr[1] = lower_32_bits(gpu_addr);
361 0 : ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
362 0 : ib.ptr[3] = 0xDEADBEEF;
363 0 : ib.length_dw = 4;
364 :
365 0 : r = radeon_ib_schedule(rdev, &ib, NULL, false);
366 0 : if (r) {
367 0 : radeon_ib_free(rdev, &ib);
368 0 : DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
369 0 : return r;
370 : }
371 0 : r = radeon_fence_wait(ib.fence, false);
372 0 : if (r) {
373 0 : DRM_ERROR("radeon: fence wait failed (%d).\n", r);
374 0 : return r;
375 : }
376 0 : for (i = 0; i < rdev->usec_timeout; i++) {
377 0 : tmp = le32_to_cpu(rdev->wb.wb[index/4]);
378 0 : if (tmp == 0xDEADBEEF)
379 : break;
380 0 : DRM_UDELAY(1);
381 : }
382 0 : if (i < rdev->usec_timeout) {
383 : DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
384 : } else {
385 0 : DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
386 : r = -EINVAL;
387 : }
388 0 : radeon_ib_free(rdev, &ib);
389 0 : return r;
390 0 : }
391 :
392 : /**
393 : * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
394 : *
395 : * @rdev: radeon_device pointer
396 : * @ib: IB object to schedule
397 : *
398 : * Schedule an IB in the DMA ring (r6xx-r7xx).
399 : */
400 0 : void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
401 : {
402 0 : struct radeon_ring *ring = &rdev->ring[ib->ring];
403 :
404 0 : if (rdev->wb.enabled) {
405 0 : u32 next_rptr = ring->wptr + 4;
406 0 : while ((next_rptr & 7) != 5)
407 0 : next_rptr++;
408 0 : next_rptr += 3;
409 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
410 0 : radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
411 0 : radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
412 0 : radeon_ring_write(ring, next_rptr);
413 0 : }
414 :
415 : /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
416 : * Pad as necessary with NOPs.
417 : */
418 0 : while ((ring->wptr & 7) != 5)
419 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
420 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
421 0 : radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
422 0 : radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
423 :
424 0 : }
425 :
426 : /**
427 : * r600_copy_dma - copy pages using the DMA engine
428 : *
429 : * @rdev: radeon_device pointer
430 : * @src_offset: src GPU address
431 : * @dst_offset: dst GPU address
432 : * @num_gpu_pages: number of GPU pages to xfer
433 : * @resv: reservation object to sync to
434 : *
435 : * Copy GPU paging using the DMA engine (r6xx).
436 : * Used by the radeon ttm implementation to move pages if
437 : * registered as the asic copy callback.
438 : */
439 0 : struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
440 : uint64_t src_offset, uint64_t dst_offset,
441 : unsigned num_gpu_pages,
442 : struct reservation_object *resv)
443 : {
444 0 : struct radeon_fence *fence;
445 0 : struct radeon_sync sync;
446 0 : int ring_index = rdev->asic->copy.dma_ring_index;
447 0 : struct radeon_ring *ring = &rdev->ring[ring_index];
448 : u32 size_in_dw, cur_size_in_dw;
449 : int i, num_loops;
450 : int r = 0;
451 :
452 0 : radeon_sync_create(&sync);
453 :
454 0 : size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
455 0 : num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
456 0 : r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
457 0 : if (r) {
458 0 : DRM_ERROR("radeon: moving bo (%d).\n", r);
459 0 : radeon_sync_free(rdev, &sync, NULL);
460 0 : return ERR_PTR(r);
461 : }
462 :
463 0 : radeon_sync_resv(rdev, &sync, resv, false);
464 0 : radeon_sync_rings(rdev, &sync, ring->idx);
465 :
466 0 : for (i = 0; i < num_loops; i++) {
467 : cur_size_in_dw = size_in_dw;
468 0 : if (cur_size_in_dw > 0xFFFE)
469 : cur_size_in_dw = 0xFFFE;
470 0 : size_in_dw -= cur_size_in_dw;
471 0 : radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
472 0 : radeon_ring_write(ring, dst_offset & 0xfffffffc);
473 0 : radeon_ring_write(ring, src_offset & 0xfffffffc);
474 0 : radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
475 0 : (upper_32_bits(src_offset) & 0xff)));
476 0 : src_offset += cur_size_in_dw * 4;
477 0 : dst_offset += cur_size_in_dw * 4;
478 : }
479 :
480 0 : r = radeon_fence_emit(rdev, &fence, ring->idx);
481 0 : if (r) {
482 0 : radeon_ring_unlock_undo(rdev, ring);
483 0 : radeon_sync_free(rdev, &sync, NULL);
484 0 : return ERR_PTR(r);
485 : }
486 :
487 0 : radeon_ring_unlock_commit(rdev, ring, false);
488 0 : radeon_sync_free(rdev, &sync, fence);
489 :
490 0 : return fence;
491 0 : }
|