Line data Source code
1 : /*
2 : * Copyright 2008 Jerome Glisse.
3 : * All Rights Reserved.
4 : *
5 : * Permission is hereby granted, free of charge, to any person obtaining a
6 : * copy of this software and associated documentation files (the "Software"),
7 : * to deal in the Software without restriction, including without limitation
8 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 : * and/or sell copies of the Software, and to permit persons to whom the
10 : * Software is furnished to do so, subject to the following conditions:
11 : *
12 : * The above copyright notice and this permission notice (including the next
13 : * paragraph) shall be included in all copies or substantial portions of the
14 : * Software.
15 : *
16 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 : * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 : * DEALINGS IN THE SOFTWARE.
23 : *
24 : * Authors:
25 : * Jerome Glisse <glisse@freedesktop.org>
26 : */
27 : #include <dev/pci/drm/drmP.h>
28 : #include <dev/pci/drm/radeon_drm.h>
29 : #include "radeon_reg.h"
30 : #include "radeon.h"
31 : #include "radeon_trace.h"
32 :
33 : #define RADEON_CS_MAX_PRIORITY 32u
34 : #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
35 :
36 : /* This is based on the bucket sort with O(n) time complexity.
37 : * An item with priority "i" is added to bucket[i]. The lists are then
38 : * concatenated in descending order.
39 : */
40 : struct radeon_cs_buckets {
41 : struct list_head bucket[RADEON_CS_NUM_BUCKETS];
42 : };
43 :
44 0 : static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
45 : {
46 : unsigned i;
47 :
48 0 : for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
49 0 : INIT_LIST_HEAD(&b->bucket[i]);
50 0 : }
51 :
52 0 : static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
53 : struct list_head *item, unsigned priority)
54 : {
55 : /* Since buffers which appear sooner in the relocation list are
56 : * likely to be used more often than buffers which appear later
57 : * in the list, the sort mustn't change the ordering of buffers
58 : * with the same priority, i.e. it must be stable.
59 : */
60 0 : list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
61 0 : }
62 :
63 0 : static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
64 : struct list_head *out_list)
65 : {
66 : unsigned i;
67 :
68 : /* Connect the sorted buckets in the output list. */
69 0 : for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
70 0 : list_splice(&b->bucket[i], out_list);
71 : }
72 0 : }
73 :
74 0 : static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
75 : {
76 0 : struct drm_device *ddev = p->rdev->ddev;
77 : struct radeon_cs_chunk *chunk;
78 0 : struct radeon_cs_buckets buckets;
79 : unsigned i;
80 : bool need_mmap_lock = false;
81 : int r;
82 :
83 0 : if (p->chunk_relocs == NULL) {
84 0 : return 0;
85 : }
86 : chunk = p->chunk_relocs;
87 0 : p->dma_reloc_idx = 0;
88 : /* FIXME: we assume that each relocs use 4 dwords */
89 0 : p->nrelocs = chunk->length_dw / 4;
90 0 : p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
91 0 : if (p->relocs == NULL) {
92 0 : return -ENOMEM;
93 : }
94 :
95 0 : radeon_cs_buckets_init(&buckets);
96 :
97 0 : for (i = 0; i < p->nrelocs; i++) {
98 : struct drm_radeon_cs_reloc *r;
99 : struct drm_gem_object *gobj;
100 : unsigned priority;
101 :
102 0 : r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
103 0 : gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
104 0 : if (gobj == NULL) {
105 0 : DRM_ERROR("gem object lookup failed 0x%x\n",
106 : r->handle);
107 0 : return -ENOENT;
108 : }
109 0 : p->relocs[i].robj = gem_to_radeon_bo(gobj);
110 :
111 : /* The userspace buffer priorities are from 0 to 15. A higher
112 : * number means the buffer is more important.
113 : * Also, the buffers used for write have a higher priority than
114 : * the buffers used for read only, which doubles the range
115 : * to 0 to 31. 32 is reserved for the kernel driver.
116 : */
117 0 : priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
118 0 : + !!r->write_domain;
119 :
120 : /* the first reloc of an UVD job is the msg and that must be in
121 : VRAM, also but everything into VRAM on AGP cards and older
122 : IGP chips to avoid image corruptions */
123 0 : if (p->ring == R600_RING_TYPE_UVD_INDEX &&
124 0 : (i == 0 || (p->rdev->flags & RADEON_IS_AGP) ||
125 0 : p->rdev->family == CHIP_RS780 ||
126 0 : p->rdev->family == CHIP_RS880)) {
127 :
128 : /* TODO: is this still needed for NI+ ? */
129 0 : p->relocs[i].prefered_domains =
130 : RADEON_GEM_DOMAIN_VRAM;
131 :
132 0 : p->relocs[i].allowed_domains =
133 : RADEON_GEM_DOMAIN_VRAM;
134 :
135 : /* prioritize this over any other relocation */
136 : priority = RADEON_CS_MAX_PRIORITY;
137 0 : } else {
138 0 : uint32_t domain = r->write_domain ?
139 0 : r->write_domain : r->read_domains;
140 :
141 0 : if (domain & RADEON_GEM_DOMAIN_CPU) {
142 0 : DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
143 : "for command submission\n");
144 0 : return -EINVAL;
145 : }
146 :
147 0 : p->relocs[i].prefered_domains = domain;
148 0 : if (domain == RADEON_GEM_DOMAIN_VRAM)
149 0 : domain |= RADEON_GEM_DOMAIN_GTT;
150 0 : p->relocs[i].allowed_domains = domain;
151 0 : }
152 :
153 0 : if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
154 0 : uint32_t domain = p->relocs[i].prefered_domains;
155 0 : if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
156 0 : DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
157 : "allowed for userptr BOs\n");
158 0 : return -EINVAL;
159 : }
160 : need_mmap_lock = true;
161 : domain = RADEON_GEM_DOMAIN_GTT;
162 0 : p->relocs[i].prefered_domains = domain;
163 0 : p->relocs[i].allowed_domains = domain;
164 0 : }
165 :
166 0 : p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
167 0 : p->relocs[i].tv.shared = !r->write_domain;
168 :
169 0 : radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
170 : priority);
171 0 : }
172 :
173 0 : radeon_cs_buckets_get_list(&buckets, &p->validated);
174 :
175 0 : if (p->cs_flags & RADEON_CS_USE_VM)
176 0 : p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
177 : &p->validated);
178 : #ifdef notyet
179 : if (need_mmap_lock)
180 : down_read(¤t->mm->mmap_sem);
181 : #endif
182 :
183 0 : r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
184 :
185 : #ifdef notyet
186 : if (need_mmap_lock)
187 : up_read(¤t->mm->mmap_sem);
188 : #endif
189 :
190 0 : return r;
191 0 : }
192 :
193 0 : static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
194 : {
195 0 : p->priority = priority;
196 :
197 0 : switch (ring) {
198 : default:
199 0 : DRM_ERROR("unknown ring id: %d\n", ring);
200 0 : return -EINVAL;
201 : case RADEON_CS_RING_GFX:
202 0 : p->ring = RADEON_RING_TYPE_GFX_INDEX;
203 0 : break;
204 : case RADEON_CS_RING_COMPUTE:
205 0 : if (p->rdev->family >= CHIP_TAHITI) {
206 0 : if (p->priority > 0)
207 0 : p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
208 : else
209 0 : p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
210 : } else
211 0 : p->ring = RADEON_RING_TYPE_GFX_INDEX;
212 : break;
213 : case RADEON_CS_RING_DMA:
214 0 : if (p->rdev->family >= CHIP_CAYMAN) {
215 0 : if (p->priority > 0)
216 0 : p->ring = R600_RING_TYPE_DMA_INDEX;
217 : else
218 0 : p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
219 0 : } else if (p->rdev->family >= CHIP_RV770) {
220 0 : p->ring = R600_RING_TYPE_DMA_INDEX;
221 : } else {
222 0 : return -EINVAL;
223 : }
224 : break;
225 : case RADEON_CS_RING_UVD:
226 0 : p->ring = R600_RING_TYPE_UVD_INDEX;
227 0 : break;
228 : case RADEON_CS_RING_VCE:
229 : /* TODO: only use the low priority ring for now */
230 0 : p->ring = TN_RING_TYPE_VCE1_INDEX;
231 0 : break;
232 : }
233 0 : return 0;
234 0 : }
235 :
236 0 : static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
237 : {
238 : struct radeon_bo_list *reloc;
239 : int r;
240 :
241 0 : list_for_each_entry(reloc, &p->validated, tv.head) {
242 : struct reservation_object *resv;
243 :
244 0 : resv = reloc->robj->tbo.resv;
245 0 : r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
246 0 : reloc->tv.shared);
247 0 : if (r)
248 0 : return r;
249 0 : }
250 0 : return 0;
251 0 : }
252 :
253 : /* XXX: note that this is called from the legacy UMS CS ioctl as well */
254 0 : int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
255 : {
256 0 : struct drm_radeon_cs *cs = data;
257 : uint64_t *chunk_array_ptr;
258 : unsigned size, i;
259 : u32 ring = RADEON_CS_RING_GFX;
260 : s32 priority = 0;
261 :
262 0 : INIT_LIST_HEAD(&p->validated);
263 :
264 0 : if (!cs->num_chunks) {
265 0 : return 0;
266 : }
267 :
268 : /* get chunks */
269 0 : p->idx = 0;
270 0 : p->ib.sa_bo = NULL;
271 0 : p->const_ib.sa_bo = NULL;
272 0 : p->chunk_ib = NULL;
273 0 : p->chunk_relocs = NULL;
274 0 : p->chunk_flags = NULL;
275 0 : p->chunk_const_ib = NULL;
276 0 : p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
277 0 : if (p->chunks_array == NULL) {
278 0 : return -ENOMEM;
279 : }
280 0 : chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
281 0 : if (copy_from_user(p->chunks_array, chunk_array_ptr,
282 0 : sizeof(uint64_t)*cs->num_chunks)) {
283 0 : return -EFAULT;
284 : }
285 0 : p->cs_flags = 0;
286 0 : p->nchunks = cs->num_chunks;
287 0 : p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
288 0 : if (p->chunks == NULL) {
289 0 : return -ENOMEM;
290 : }
291 0 : for (i = 0; i < p->nchunks; i++) {
292 : struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
293 0 : struct drm_radeon_cs_chunk user_chunk;
294 : uint32_t __user *cdata;
295 :
296 0 : chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
297 0 : if (copy_from_user(&user_chunk, chunk_ptr,
298 : sizeof(struct drm_radeon_cs_chunk))) {
299 0 : return -EFAULT;
300 : }
301 0 : p->chunks[i].length_dw = user_chunk.length_dw;
302 0 : if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
303 0 : p->chunk_relocs = &p->chunks[i];
304 0 : }
305 0 : if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
306 0 : p->chunk_ib = &p->chunks[i];
307 : /* zero length IB isn't useful */
308 0 : if (p->chunks[i].length_dw == 0)
309 0 : return -EINVAL;
310 : }
311 0 : if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
312 0 : p->chunk_const_ib = &p->chunks[i];
313 : /* zero length CONST IB isn't useful */
314 0 : if (p->chunks[i].length_dw == 0)
315 0 : return -EINVAL;
316 : }
317 0 : if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
318 0 : p->chunk_flags = &p->chunks[i];
319 : /* zero length flags aren't useful */
320 0 : if (p->chunks[i].length_dw == 0)
321 0 : return -EINVAL;
322 : }
323 :
324 0 : size = p->chunks[i].length_dw;
325 0 : cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
326 0 : p->chunks[i].user_ptr = cdata;
327 0 : if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
328 0 : continue;
329 :
330 0 : if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
331 0 : if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
332 0 : continue;
333 : }
334 :
335 0 : p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
336 0 : size *= sizeof(uint32_t);
337 0 : if (p->chunks[i].kdata == NULL) {
338 0 : return -ENOMEM;
339 : }
340 0 : if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
341 0 : return -EFAULT;
342 : }
343 0 : if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
344 0 : p->cs_flags = p->chunks[i].kdata[0];
345 0 : if (p->chunks[i].length_dw > 1)
346 0 : ring = p->chunks[i].kdata[1];
347 0 : if (p->chunks[i].length_dw > 2)
348 0 : priority = (s32)p->chunks[i].kdata[2];
349 : }
350 0 : }
351 :
352 : /* these are KMS only */
353 0 : if (p->rdev) {
354 0 : if ((p->cs_flags & RADEON_CS_USE_VM) &&
355 0 : !p->rdev->vm_manager.enabled) {
356 0 : DRM_ERROR("VM not active on asic!\n");
357 0 : return -EINVAL;
358 : }
359 :
360 0 : if (radeon_cs_get_ring(p, ring, priority))
361 0 : return -EINVAL;
362 :
363 : /* we only support VM on some SI+ rings */
364 0 : if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
365 0 : if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
366 0 : DRM_ERROR("Ring %d requires VM!\n", p->ring);
367 0 : return -EINVAL;
368 : }
369 : } else {
370 0 : if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
371 0 : DRM_ERROR("VM not supported on ring %d!\n",
372 : p->ring);
373 0 : return -EINVAL;
374 : }
375 : }
376 : }
377 :
378 0 : return 0;
379 0 : }
380 :
381 0 : static int cmp_size_smaller_first(void *priv, struct list_head *a,
382 : struct list_head *b)
383 : {
384 0 : struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
385 0 : struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
386 :
387 : /* Sort A before B if A is smaller. */
388 0 : return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
389 : }
390 :
391 : /**
392 : * cs_parser_fini() - clean parser states
393 : * @parser: parser structure holding parsing context.
394 : * @error: error number
395 : *
396 : * If error is set than unvalidate buffer, otherwise just free memory
397 : * used by parsing context.
398 : **/
399 0 : static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
400 : {
401 : unsigned i;
402 :
403 0 : if (!error) {
404 : /* Sort the buffer list from the smallest to largest buffer,
405 : * which affects the order of buffers in the LRU list.
406 : * This assures that the smallest buffers are added first
407 : * to the LRU list, so they are likely to be later evicted
408 : * first, instead of large buffers whose eviction is more
409 : * expensive.
410 : *
411 : * This slightly lowers the number of bytes moved by TTM
412 : * per frame under memory pressure.
413 : */
414 0 : list_sort(NULL, &parser->validated, cmp_size_smaller_first);
415 :
416 0 : ttm_eu_fence_buffer_objects(&parser->ticket,
417 : &parser->validated,
418 0 : &parser->ib.fence->base);
419 0 : } else if (backoff) {
420 0 : ttm_eu_backoff_reservation(&parser->ticket,
421 0 : &parser->validated);
422 0 : }
423 :
424 0 : if (parser->relocs != NULL) {
425 0 : for (i = 0; i < parser->nrelocs; i++) {
426 0 : struct radeon_bo *bo = parser->relocs[i].robj;
427 0 : if (bo == NULL)
428 0 : continue;
429 :
430 0 : drm_gem_object_unreference_unlocked(&bo->gem_base);
431 0 : }
432 : }
433 0 : kfree(parser->track);
434 0 : drm_free_large(parser->relocs);
435 0 : drm_free_large(parser->vm_bos);
436 0 : for (i = 0; i < parser->nchunks; i++)
437 0 : drm_free_large(parser->chunks[i].kdata);
438 0 : kfree(parser->chunks);
439 0 : kfree(parser->chunks_array);
440 0 : radeon_ib_free(parser->rdev, &parser->ib);
441 0 : radeon_ib_free(parser->rdev, &parser->const_ib);
442 0 : }
443 :
444 0 : static int radeon_cs_ib_chunk(struct radeon_device *rdev,
445 : struct radeon_cs_parser *parser)
446 : {
447 : int r;
448 :
449 0 : if (parser->chunk_ib == NULL)
450 0 : return 0;
451 :
452 0 : if (parser->cs_flags & RADEON_CS_USE_VM)
453 0 : return 0;
454 :
455 0 : r = radeon_cs_parse(rdev, parser->ring, parser);
456 0 : if (r || parser->parser_error) {
457 0 : DRM_ERROR("Invalid command stream !\n");
458 0 : return r;
459 : }
460 :
461 0 : r = radeon_cs_sync_rings(parser);
462 0 : if (r) {
463 0 : if (r != -ERESTARTSYS)
464 0 : DRM_ERROR("Failed to sync rings: %i\n", r);
465 0 : return r;
466 : }
467 :
468 0 : if (parser->ring == R600_RING_TYPE_UVD_INDEX)
469 0 : radeon_uvd_note_usage(rdev);
470 0 : else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
471 0 : (parser->ring == TN_RING_TYPE_VCE2_INDEX))
472 0 : radeon_vce_note_usage(rdev);
473 :
474 0 : r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
475 0 : if (r) {
476 0 : DRM_ERROR("Failed to schedule IB !\n");
477 0 : }
478 0 : return r;
479 0 : }
480 :
481 0 : static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
482 : struct radeon_vm *vm)
483 : {
484 0 : struct radeon_device *rdev = p->rdev;
485 : struct radeon_bo_va *bo_va;
486 : int i, r;
487 :
488 0 : r = radeon_vm_update_page_directory(rdev, vm);
489 0 : if (r)
490 0 : return r;
491 :
492 0 : r = radeon_vm_clear_freed(rdev, vm);
493 0 : if (r)
494 0 : return r;
495 :
496 0 : if (vm->ib_bo_va == NULL) {
497 0 : DRM_ERROR("Tmp BO not in VM!\n");
498 0 : return -EINVAL;
499 : }
500 :
501 0 : r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
502 0 : &rdev->ring_tmp_bo.bo->tbo.mem);
503 0 : if (r)
504 0 : return r;
505 :
506 0 : for (i = 0; i < p->nrelocs; i++) {
507 : struct radeon_bo *bo;
508 :
509 0 : bo = p->relocs[i].robj;
510 0 : bo_va = radeon_vm_bo_find(vm, bo);
511 0 : if (bo_va == NULL) {
512 0 : dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
513 0 : return -EINVAL;
514 : }
515 :
516 0 : r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
517 0 : if (r)
518 0 : return r;
519 :
520 0 : radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
521 0 : }
522 :
523 0 : return radeon_vm_clear_invalids(rdev, vm);
524 0 : }
525 :
526 0 : static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
527 : struct radeon_cs_parser *parser)
528 : {
529 0 : struct radeon_fpriv *fpriv = parser->filp->driver_priv;
530 0 : struct radeon_vm *vm = &fpriv->vm;
531 : int r;
532 :
533 0 : if (parser->chunk_ib == NULL)
534 0 : return 0;
535 0 : if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
536 0 : return 0;
537 :
538 0 : if (parser->const_ib.length_dw) {
539 0 : r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
540 0 : if (r) {
541 0 : return r;
542 : }
543 : }
544 :
545 0 : r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
546 0 : if (r) {
547 0 : return r;
548 : }
549 :
550 0 : if (parser->ring == R600_RING_TYPE_UVD_INDEX)
551 0 : radeon_uvd_note_usage(rdev);
552 :
553 0 : mutex_lock(&vm->mutex);
554 0 : r = radeon_bo_vm_update_pte(parser, vm);
555 0 : if (r) {
556 : goto out;
557 : }
558 :
559 0 : r = radeon_cs_sync_rings(parser);
560 0 : if (r) {
561 0 : if (r != -ERESTARTSYS)
562 0 : DRM_ERROR("Failed to sync rings: %i\n", r);
563 : goto out;
564 : }
565 :
566 0 : if ((rdev->family >= CHIP_TAHITI) &&
567 0 : (parser->chunk_const_ib != NULL)) {
568 0 : r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
569 0 : } else {
570 0 : r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
571 : }
572 :
573 : out:
574 0 : mutex_unlock(&vm->mutex);
575 0 : return r;
576 0 : }
577 :
578 0 : static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
579 : {
580 0 : if (r == -EDEADLK) {
581 0 : r = radeon_gpu_reset(rdev);
582 0 : if (!r)
583 : r = -EAGAIN;
584 0 : }
585 0 : return r;
586 : }
587 :
588 0 : static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
589 : {
590 : struct radeon_cs_chunk *ib_chunk;
591 : struct radeon_vm *vm = NULL;
592 : int r;
593 :
594 0 : if (parser->chunk_ib == NULL)
595 0 : return 0;
596 :
597 0 : if (parser->cs_flags & RADEON_CS_USE_VM) {
598 0 : struct radeon_fpriv *fpriv = parser->filp->driver_priv;
599 0 : vm = &fpriv->vm;
600 :
601 0 : if ((rdev->family >= CHIP_TAHITI) &&
602 0 : (parser->chunk_const_ib != NULL)) {
603 : ib_chunk = parser->chunk_const_ib;
604 0 : if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
605 0 : DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
606 0 : return -EINVAL;
607 : }
608 0 : r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
609 0 : vm, ib_chunk->length_dw * 4);
610 0 : if (r) {
611 0 : DRM_ERROR("Failed to get const ib !\n");
612 0 : return r;
613 : }
614 0 : parser->const_ib.is_const_ib = true;
615 0 : parser->const_ib.length_dw = ib_chunk->length_dw;
616 0 : if (copy_from_user(parser->const_ib.ptr,
617 0 : ib_chunk->user_ptr,
618 0 : ib_chunk->length_dw * 4))
619 0 : return -EFAULT;
620 : }
621 :
622 0 : ib_chunk = parser->chunk_ib;
623 0 : if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
624 0 : DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
625 0 : return -EINVAL;
626 : }
627 0 : }
628 0 : ib_chunk = parser->chunk_ib;
629 :
630 0 : r = radeon_ib_get(rdev, parser->ring, &parser->ib,
631 0 : vm, ib_chunk->length_dw * 4);
632 0 : if (r) {
633 0 : DRM_ERROR("Failed to get ib !\n");
634 0 : return r;
635 : }
636 0 : parser->ib.length_dw = ib_chunk->length_dw;
637 0 : if (ib_chunk->kdata)
638 0 : memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
639 0 : else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
640 0 : return -EFAULT;
641 0 : return 0;
642 0 : }
643 :
644 0 : int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
645 : {
646 0 : struct radeon_device *rdev = dev->dev_private;
647 0 : struct radeon_cs_parser parser;
648 : int r;
649 :
650 0 : down_read(&rdev->exclusive_lock);
651 0 : if (!rdev->accel_working) {
652 0 : up_read(&rdev->exclusive_lock);
653 0 : return -EBUSY;
654 : }
655 0 : if (rdev->in_reset) {
656 0 : up_read(&rdev->exclusive_lock);
657 0 : r = radeon_gpu_reset(rdev);
658 0 : if (!r)
659 : r = -EAGAIN;
660 0 : return r;
661 : }
662 : /* initialize parser */
663 0 : memset(&parser, 0, sizeof(struct radeon_cs_parser));
664 0 : parser.filp = filp;
665 0 : parser.rdev = rdev;
666 0 : parser.dev = rdev->dev;
667 0 : parser.family = rdev->family;
668 0 : r = radeon_cs_parser_init(&parser, data);
669 0 : if (r) {
670 0 : DRM_ERROR("Failed to initialize parser !\n");
671 0 : radeon_cs_parser_fini(&parser, r, false);
672 0 : up_read(&rdev->exclusive_lock);
673 0 : r = radeon_cs_handle_lockup(rdev, r);
674 0 : return r;
675 : }
676 :
677 0 : r = radeon_cs_ib_fill(rdev, &parser);
678 0 : if (!r) {
679 0 : r = radeon_cs_parser_relocs(&parser);
680 0 : if (r && r != -ERESTARTSYS)
681 0 : DRM_ERROR("Failed to parse relocation %d!\n", r);
682 : }
683 :
684 0 : if (r) {
685 0 : radeon_cs_parser_fini(&parser, r, false);
686 0 : up_read(&rdev->exclusive_lock);
687 0 : r = radeon_cs_handle_lockup(rdev, r);
688 0 : return r;
689 : }
690 :
691 0 : trace_radeon_cs(&parser);
692 :
693 0 : r = radeon_cs_ib_chunk(rdev, &parser);
694 0 : if (r) {
695 : goto out;
696 : }
697 0 : r = radeon_cs_ib_vm_chunk(rdev, &parser);
698 : if (r) {
699 0 : goto out;
700 : }
701 : out:
702 0 : radeon_cs_parser_fini(&parser, r, true);
703 0 : up_read(&rdev->exclusive_lock);
704 0 : r = radeon_cs_handle_lockup(rdev, r);
705 0 : return r;
706 0 : }
707 :
708 : /**
709 : * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
710 : * @parser: parser structure holding parsing context.
711 : * @pkt: where to store packet information
712 : *
713 : * Assume that chunk_ib_index is properly set. Will return -EINVAL
714 : * if packet is bigger than remaining ib size. or if packets is unknown.
715 : **/
716 0 : int radeon_cs_packet_parse(struct radeon_cs_parser *p,
717 : struct radeon_cs_packet *pkt,
718 : unsigned idx)
719 : {
720 0 : struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
721 0 : struct radeon_device *rdev = p->rdev;
722 : uint32_t header;
723 : int ret = 0, i;
724 :
725 0 : if (idx >= ib_chunk->length_dw) {
726 0 : DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
727 : idx, ib_chunk->length_dw);
728 0 : return -EINVAL;
729 : }
730 0 : header = radeon_get_ib_value(p, idx);
731 0 : pkt->idx = idx;
732 0 : pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
733 0 : pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
734 0 : pkt->one_reg_wr = 0;
735 0 : switch (pkt->type) {
736 : case RADEON_PACKET_TYPE0:
737 0 : if (rdev->family < CHIP_R600) {
738 0 : pkt->reg = R100_CP_PACKET0_GET_REG(header);
739 0 : pkt->one_reg_wr =
740 0 : RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
741 0 : } else
742 0 : pkt->reg = R600_CP_PACKET0_GET_REG(header);
743 : break;
744 : case RADEON_PACKET_TYPE3:
745 0 : pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
746 0 : break;
747 : case RADEON_PACKET_TYPE2:
748 0 : pkt->count = -1;
749 0 : break;
750 : default:
751 0 : DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
752 : ret = -EINVAL;
753 0 : goto dump_ib;
754 : }
755 0 : if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
756 0 : DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
757 : pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
758 : ret = -EINVAL;
759 0 : goto dump_ib;
760 : }
761 0 : return 0;
762 :
763 : dump_ib:
764 0 : for (i = 0; i < ib_chunk->length_dw; i++) {
765 0 : if (i == idx)
766 0 : printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
767 : else
768 0 : printk("\t0x%08x\n", radeon_get_ib_value(p, i));
769 : }
770 0 : return ret;
771 0 : }
772 :
773 : /**
774 : * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
775 : * @p: structure holding the parser context.
776 : *
777 : * Check if the next packet is NOP relocation packet3.
778 : **/
779 0 : bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
780 : {
781 0 : struct radeon_cs_packet p3reloc;
782 : int r;
783 :
784 0 : r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
785 0 : if (r)
786 0 : return false;
787 0 : if (p3reloc.type != RADEON_PACKET_TYPE3)
788 0 : return false;
789 0 : if (p3reloc.opcode != RADEON_PACKET3_NOP)
790 0 : return false;
791 0 : return true;
792 0 : }
793 :
794 : /**
795 : * radeon_cs_dump_packet() - dump raw packet context
796 : * @p: structure holding the parser context.
797 : * @pkt: structure holding the packet.
798 : *
799 : * Used mostly for debugging and error reporting.
800 : **/
801 0 : void radeon_cs_dump_packet(struct radeon_cs_parser *p,
802 : struct radeon_cs_packet *pkt)
803 : {
804 : volatile uint32_t *ib;
805 : unsigned i;
806 : unsigned idx;
807 :
808 0 : ib = p->ib.ptr;
809 0 : idx = pkt->idx;
810 0 : for (i = 0; i <= (pkt->count + 1); i++, idx++)
811 : DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
812 0 : }
813 :
814 : /**
815 : * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
816 : * @parser: parser structure holding parsing context.
817 : * @data: pointer to relocation data
818 : * @offset_start: starting offset
819 : * @offset_mask: offset mask (to align start offset on)
820 : * @reloc: reloc informations
821 : *
822 : * Check if next packet is relocation packet3, do bo validation and compute
823 : * GPU offset using the provided start.
824 : **/
825 0 : int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
826 : struct radeon_bo_list **cs_reloc,
827 : int nomm)
828 : {
829 : struct radeon_cs_chunk *relocs_chunk;
830 0 : struct radeon_cs_packet p3reloc;
831 : unsigned idx;
832 : int r;
833 :
834 0 : if (p->chunk_relocs == NULL) {
835 0 : DRM_ERROR("No relocation chunk !\n");
836 0 : return -EINVAL;
837 : }
838 0 : *cs_reloc = NULL;
839 0 : relocs_chunk = p->chunk_relocs;
840 0 : r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
841 0 : if (r)
842 0 : return r;
843 0 : p->idx += p3reloc.count + 2;
844 0 : if (p3reloc.type != RADEON_PACKET_TYPE3 ||
845 0 : p3reloc.opcode != RADEON_PACKET3_NOP) {
846 0 : DRM_ERROR("No packet3 for relocation for packet at %d.\n",
847 : p3reloc.idx);
848 0 : radeon_cs_dump_packet(p, &p3reloc);
849 0 : return -EINVAL;
850 : }
851 0 : idx = radeon_get_ib_value(p, p3reloc.idx + 1);
852 0 : if (idx >= relocs_chunk->length_dw) {
853 0 : DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
854 : idx, relocs_chunk->length_dw);
855 0 : radeon_cs_dump_packet(p, &p3reloc);
856 0 : return -EINVAL;
857 : }
858 : /* FIXME: we assume reloc size is 4 dwords */
859 0 : if (nomm) {
860 0 : *cs_reloc = p->relocs;
861 0 : (*cs_reloc)->gpu_offset =
862 0 : (u64)relocs_chunk->kdata[idx + 3] << 32;
863 0 : (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
864 0 : } else
865 0 : *cs_reloc = &p->relocs[(idx / 4)];
866 0 : return 0;
867 0 : }
|