Line data Source code
1 : /*
2 : * Copyright 2008 Advanced Micro Devices, Inc.
3 : * Copyright 2008 Red Hat Inc.
4 : * Copyright 2009 Jerome Glisse.
5 : *
6 : * Permission is hereby granted, free of charge, to any person obtaining a
7 : * copy of this software and associated documentation files (the "Software"),
8 : * to deal in the Software without restriction, including without limitation
9 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 : * and/or sell copies of the Software, and to permit persons to whom the
11 : * Software is furnished to do so, subject to the following conditions:
12 : *
13 : * The above copyright notice and this permission notice shall be included in
14 : * all copies or substantial portions of the Software.
15 : *
16 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 : * OTHER DEALINGS IN THE SOFTWARE.
23 : *
24 : * Authors: Dave Airlie
25 : * Alex Deucher
26 : * Jerome Glisse
27 : */
28 : #include <dev/pci/drm/drmP.h>
29 : #include <dev/pci/drm/radeon_drm.h>
30 : #include "radeon.h"
31 :
32 0 : void radeon_gem_object_free(struct drm_gem_object *gobj)
33 : {
34 0 : struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35 :
36 0 : if (robj) {
37 : #ifdef notyet
38 : if (robj->gem_base.import_attach)
39 : drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
40 : #endif
41 0 : radeon_mn_unregister(robj);
42 0 : radeon_bo_unref(&robj);
43 0 : }
44 0 : }
45 :
46 0 : int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
47 : int alignment, int initial_domain,
48 : u32 flags, bool kernel,
49 : struct drm_gem_object **obj)
50 : {
51 0 : struct radeon_bo *robj;
52 : unsigned long max_size;
53 : int r;
54 :
55 0 : *obj = NULL;
56 : /* At least align on page size */
57 0 : if (alignment < PAGE_SIZE) {
58 : alignment = PAGE_SIZE;
59 0 : }
60 :
61 : /* Maximum bo size is the unpinned gtt size since we use the gtt to
62 : * handle vram to system pool migrations.
63 : */
64 0 : max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
65 0 : if (size > max_size) {
66 : DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
67 : size >> 20, max_size >> 20);
68 0 : return -ENOMEM;
69 : }
70 :
71 : retry:
72 0 : r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
73 : flags, NULL, NULL, &robj);
74 0 : if (r) {
75 0 : if (r != -ERESTARTSYS) {
76 0 : if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
77 0 : initial_domain |= RADEON_GEM_DOMAIN_GTT;
78 0 : goto retry;
79 : }
80 0 : DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
81 : size, initial_domain, alignment, r);
82 0 : }
83 0 : return r;
84 : }
85 0 : *obj = &robj->gem_base;
86 0 : robj->pid = curproc->p_p->ps_pid;
87 :
88 0 : mutex_lock(&rdev->gem.mutex);
89 0 : list_add_tail(&robj->list, &rdev->gem.objects);
90 0 : mutex_unlock(&rdev->gem.mutex);
91 :
92 0 : return 0;
93 0 : }
94 :
95 0 : static int radeon_gem_set_domain(struct drm_gem_object *gobj,
96 : uint32_t rdomain, uint32_t wdomain)
97 : {
98 : struct radeon_bo *robj;
99 : uint32_t domain;
100 : long r;
101 :
102 : /* FIXME: reeimplement */
103 0 : robj = gem_to_radeon_bo(gobj);
104 : /* work out where to validate the buffer to */
105 : domain = wdomain;
106 0 : if (!domain) {
107 : domain = rdomain;
108 0 : }
109 0 : if (!domain) {
110 : /* Do nothings */
111 0 : printk(KERN_WARNING "Set domain without domain !\n");
112 0 : return 0;
113 : }
114 0 : if (domain == RADEON_GEM_DOMAIN_CPU) {
115 : /* Asking for cpu access wait for object idle */
116 0 : r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
117 0 : if (!r)
118 : r = -EBUSY;
119 :
120 0 : if (r < 0 && r != -EINTR) {
121 0 : printk(KERN_ERR "Failed to wait for object: %li\n", r);
122 0 : return r;
123 : }
124 : }
125 0 : return 0;
126 0 : }
127 :
128 0 : int radeon_gem_init(struct radeon_device *rdev)
129 : {
130 0 : INIT_LIST_HEAD(&rdev->gem.objects);
131 0 : return 0;
132 : }
133 :
134 0 : void radeon_gem_fini(struct radeon_device *rdev)
135 : {
136 0 : radeon_bo_force_delete(rdev);
137 0 : }
138 :
139 : /*
140 : * Call from drm_gem_handle_create which appear in both new and open ioctl
141 : * case.
142 : */
143 0 : int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
144 : {
145 0 : struct radeon_bo *rbo = gem_to_radeon_bo(obj);
146 0 : struct radeon_device *rdev = rbo->rdev;
147 0 : struct radeon_fpriv *fpriv = file_priv->driver_priv;
148 0 : struct radeon_vm *vm = &fpriv->vm;
149 : struct radeon_bo_va *bo_va;
150 : int r;
151 :
152 0 : if ((rdev->family < CHIP_CAYMAN) ||
153 0 : (!rdev->accel_working)) {
154 0 : return 0;
155 : }
156 :
157 0 : r = radeon_bo_reserve(rbo, false);
158 0 : if (r) {
159 0 : return r;
160 : }
161 :
162 0 : bo_va = radeon_vm_bo_find(vm, rbo);
163 0 : if (!bo_va) {
164 0 : bo_va = radeon_vm_bo_add(rdev, vm, rbo);
165 0 : } else {
166 0 : ++bo_va->ref_count;
167 : }
168 0 : radeon_bo_unreserve(rbo);
169 :
170 0 : return 0;
171 0 : }
172 :
173 0 : void radeon_gem_object_close(struct drm_gem_object *obj,
174 : struct drm_file *file_priv)
175 : {
176 0 : struct radeon_bo *rbo = gem_to_radeon_bo(obj);
177 0 : struct radeon_device *rdev = rbo->rdev;
178 0 : struct radeon_fpriv *fpriv = file_priv->driver_priv;
179 0 : struct radeon_vm *vm = &fpriv->vm;
180 : struct radeon_bo_va *bo_va;
181 : int r;
182 :
183 0 : if ((rdev->family < CHIP_CAYMAN) ||
184 0 : (!rdev->accel_working)) {
185 0 : return;
186 : }
187 :
188 0 : r = radeon_bo_reserve(rbo, true);
189 0 : if (r) {
190 0 : dev_err(rdev->dev, "leaking bo va because "
191 : "we fail to reserve bo (%d)\n", r);
192 0 : return;
193 : }
194 0 : bo_va = radeon_vm_bo_find(vm, rbo);
195 0 : if (bo_va) {
196 0 : if (--bo_va->ref_count == 0) {
197 0 : radeon_vm_bo_rmv(rdev, bo_va);
198 0 : }
199 : }
200 0 : radeon_bo_unreserve(rbo);
201 0 : }
202 :
203 0 : static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
204 : {
205 0 : if (r == -EDEADLK) {
206 0 : r = radeon_gpu_reset(rdev);
207 0 : if (!r)
208 : r = -EAGAIN;
209 0 : }
210 0 : return r;
211 : }
212 :
213 : /*
214 : * GEM ioctls.
215 : */
216 0 : int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
217 : struct drm_file *filp)
218 : {
219 0 : struct radeon_device *rdev = dev->dev_private;
220 0 : struct drm_radeon_gem_info *args = data;
221 : struct ttm_mem_type_manager *man;
222 :
223 0 : man = &rdev->mman.bdev.man[TTM_PL_VRAM];
224 :
225 0 : args->vram_size = rdev->mc.real_vram_size;
226 0 : args->vram_visible = (u64)man->size << PAGE_SHIFT;
227 0 : args->vram_visible -= rdev->vram_pin_size;
228 0 : args->gart_size = rdev->mc.gtt_size;
229 0 : args->gart_size -= rdev->gart_pin_size;
230 :
231 0 : return 0;
232 : }
233 :
234 0 : int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
235 : struct drm_file *filp)
236 : {
237 : /* TODO: implement */
238 0 : DRM_ERROR("unimplemented %s\n", __func__);
239 0 : return -ENOSYS;
240 : }
241 :
242 0 : int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
243 : struct drm_file *filp)
244 : {
245 : /* TODO: implement */
246 0 : DRM_ERROR("unimplemented %s\n", __func__);
247 0 : return -ENOSYS;
248 : }
249 :
250 0 : int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
251 : struct drm_file *filp)
252 : {
253 0 : struct radeon_device *rdev = dev->dev_private;
254 0 : struct drm_radeon_gem_create *args = data;
255 0 : struct drm_gem_object *gobj;
256 0 : uint32_t handle;
257 : int r;
258 :
259 0 : down_read(&rdev->exclusive_lock);
260 : /* create a gem object to contain this object in */
261 0 : args->size = roundup(args->size, PAGE_SIZE);
262 0 : r = radeon_gem_object_create(rdev, args->size, args->alignment,
263 0 : args->initial_domain, args->flags,
264 : false, &gobj);
265 0 : if (r) {
266 0 : up_read(&rdev->exclusive_lock);
267 0 : r = radeon_gem_handle_lockup(rdev, r);
268 0 : return r;
269 : }
270 0 : r = drm_gem_handle_create(filp, gobj, &handle);
271 : /* drop reference from allocate - handle holds it now */
272 0 : drm_gem_object_unreference_unlocked(gobj);
273 0 : if (r) {
274 0 : up_read(&rdev->exclusive_lock);
275 0 : r = radeon_gem_handle_lockup(rdev, r);
276 0 : return r;
277 : }
278 0 : args->handle = handle;
279 0 : up_read(&rdev->exclusive_lock);
280 0 : return 0;
281 0 : }
282 :
283 0 : int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
284 : struct drm_file *filp)
285 : {
286 0 : return -ENOSYS;
287 : #ifdef notyet
288 : struct radeon_device *rdev = dev->dev_private;
289 : struct drm_radeon_gem_userptr *args = data;
290 : struct drm_gem_object *gobj;
291 : struct radeon_bo *bo;
292 : uint32_t handle;
293 : int r;
294 :
295 : if (offset_in_page(args->addr | args->size))
296 : return -EINVAL;
297 :
298 : /* reject unknown flag values */
299 : if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
300 : RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
301 : RADEON_GEM_USERPTR_REGISTER))
302 : return -EINVAL;
303 :
304 : if (args->flags & RADEON_GEM_USERPTR_READONLY) {
305 : /* readonly pages not tested on older hardware */
306 : if (rdev->family < CHIP_R600)
307 : return -EINVAL;
308 :
309 : } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
310 : !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
311 :
312 : /* if we want to write to it we must require anonymous
313 : memory and install a MMU notifier */
314 : return -EACCES;
315 : }
316 :
317 : down_read(&rdev->exclusive_lock);
318 :
319 : /* create a gem object to contain this object in */
320 : r = radeon_gem_object_create(rdev, args->size, 0,
321 : RADEON_GEM_DOMAIN_CPU, 0,
322 : false, &gobj);
323 : if (r)
324 : goto handle_lockup;
325 :
326 : bo = gem_to_radeon_bo(gobj);
327 : r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
328 : if (r)
329 : goto release_object;
330 :
331 : if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
332 : r = radeon_mn_register(bo, args->addr);
333 : if (r)
334 : goto release_object;
335 : }
336 :
337 : if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
338 : down_read(¤t->mm->mmap_sem);
339 : r = radeon_bo_reserve(bo, true);
340 : if (r) {
341 : up_read(¤t->mm->mmap_sem);
342 : goto release_object;
343 : }
344 :
345 : radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
346 : r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
347 : radeon_bo_unreserve(bo);
348 : up_read(¤t->mm->mmap_sem);
349 : if (r)
350 : goto release_object;
351 : }
352 :
353 : r = drm_gem_handle_create(filp, gobj, &handle);
354 : /* drop reference from allocate - handle holds it now */
355 : drm_gem_object_unreference_unlocked(gobj);
356 : if (r)
357 : goto handle_lockup;
358 :
359 : args->handle = handle;
360 : up_read(&rdev->exclusive_lock);
361 : return 0;
362 :
363 : release_object:
364 : drm_gem_object_unreference_unlocked(gobj);
365 :
366 : handle_lockup:
367 : up_read(&rdev->exclusive_lock);
368 : r = radeon_gem_handle_lockup(rdev, r);
369 :
370 : return r;
371 : #endif
372 : }
373 :
374 0 : int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
375 : struct drm_file *filp)
376 : {
377 : /* transition the BO to a domain -
378 : * just validate the BO into a certain domain */
379 0 : struct radeon_device *rdev = dev->dev_private;
380 0 : struct drm_radeon_gem_set_domain *args = data;
381 : struct drm_gem_object *gobj;
382 : struct radeon_bo *robj;
383 : int r;
384 :
385 : /* for now if someone requests domain CPU -
386 : * just make sure the buffer is finished with */
387 0 : down_read(&rdev->exclusive_lock);
388 :
389 : /* just do a BO wait for now */
390 0 : gobj = drm_gem_object_lookup(dev, filp, args->handle);
391 0 : if (gobj == NULL) {
392 0 : up_read(&rdev->exclusive_lock);
393 0 : return -ENOENT;
394 : }
395 0 : robj = gem_to_radeon_bo(gobj);
396 :
397 0 : r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
398 :
399 0 : drm_gem_object_unreference_unlocked(gobj);
400 0 : up_read(&rdev->exclusive_lock);
401 0 : r = radeon_gem_handle_lockup(robj->rdev, r);
402 0 : return r;
403 0 : }
404 :
405 0 : int radeon_mode_dumb_mmap(struct drm_file *filp,
406 : struct drm_device *dev,
407 : uint32_t handle, uint64_t *offset_p)
408 : {
409 : struct drm_gem_object *gobj;
410 : struct radeon_bo *robj;
411 :
412 0 : gobj = drm_gem_object_lookup(dev, filp, handle);
413 0 : if (gobj == NULL) {
414 0 : return -ENOENT;
415 : }
416 0 : robj = gem_to_radeon_bo(gobj);
417 0 : if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
418 0 : drm_gem_object_unreference_unlocked(gobj);
419 0 : return -EPERM;
420 : }
421 0 : *offset_p = radeon_bo_mmap_offset(robj);
422 0 : drm_gem_object_unreference_unlocked(gobj);
423 0 : return 0;
424 0 : }
425 :
426 0 : int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
427 : struct drm_file *filp)
428 : {
429 0 : struct drm_radeon_gem_mmap *args = data;
430 :
431 0 : return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
432 : }
433 :
434 0 : int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
435 : struct drm_file *filp)
436 : {
437 0 : struct drm_radeon_gem_busy *args = data;
438 : struct drm_gem_object *gobj;
439 : struct radeon_bo *robj;
440 : int r;
441 : uint32_t cur_placement = 0;
442 :
443 0 : gobj = drm_gem_object_lookup(dev, filp, args->handle);
444 0 : if (gobj == NULL) {
445 0 : return -ENOENT;
446 : }
447 0 : robj = gem_to_radeon_bo(gobj);
448 :
449 0 : r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
450 0 : if (r == 0)
451 0 : r = -EBUSY;
452 : else
453 : r = 0;
454 :
455 0 : cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
456 0 : args->domain = radeon_mem_type_to_domain(cur_placement);
457 0 : drm_gem_object_unreference_unlocked(gobj);
458 0 : return r;
459 0 : }
460 :
461 0 : int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
462 : struct drm_file *filp)
463 : {
464 0 : struct radeon_device *rdev = dev->dev_private;
465 0 : struct drm_radeon_gem_wait_idle *args = data;
466 : struct drm_gem_object *gobj;
467 : struct radeon_bo *robj;
468 : int r = 0;
469 : uint32_t cur_placement = 0;
470 : long ret;
471 :
472 0 : gobj = drm_gem_object_lookup(dev, filp, args->handle);
473 0 : if (gobj == NULL) {
474 0 : return -ENOENT;
475 : }
476 0 : robj = gem_to_radeon_bo(gobj);
477 :
478 0 : ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
479 0 : if (ret == 0)
480 0 : r = -EBUSY;
481 0 : else if (ret < 0)
482 0 : r = ret;
483 :
484 : /* Flush HDP cache via MMIO if necessary */
485 0 : cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
486 0 : if (rdev->asic->mmio_hdp_flush &&
487 0 : radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
488 0 : robj->rdev->asic->mmio_hdp_flush(rdev);
489 0 : drm_gem_object_unreference_unlocked(gobj);
490 0 : r = radeon_gem_handle_lockup(rdev, r);
491 0 : return r;
492 0 : }
493 :
494 0 : int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
495 : struct drm_file *filp)
496 : {
497 0 : struct drm_radeon_gem_set_tiling *args = data;
498 : struct drm_gem_object *gobj;
499 : struct radeon_bo *robj;
500 : int r = 0;
501 :
502 : DRM_DEBUG("%d \n", args->handle);
503 0 : gobj = drm_gem_object_lookup(dev, filp, args->handle);
504 0 : if (gobj == NULL)
505 0 : return -ENOENT;
506 0 : robj = gem_to_radeon_bo(gobj);
507 0 : r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
508 0 : drm_gem_object_unreference_unlocked(gobj);
509 0 : return r;
510 0 : }
511 :
512 0 : int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
513 : struct drm_file *filp)
514 : {
515 0 : struct drm_radeon_gem_get_tiling *args = data;
516 : struct drm_gem_object *gobj;
517 : struct radeon_bo *rbo;
518 : int r = 0;
519 :
520 : DRM_DEBUG("\n");
521 0 : gobj = drm_gem_object_lookup(dev, filp, args->handle);
522 0 : if (gobj == NULL)
523 0 : return -ENOENT;
524 0 : rbo = gem_to_radeon_bo(gobj);
525 0 : r = radeon_bo_reserve(rbo, false);
526 0 : if (unlikely(r != 0))
527 : goto out;
528 0 : radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
529 0 : radeon_bo_unreserve(rbo);
530 : out:
531 0 : drm_gem_object_unreference_unlocked(gobj);
532 0 : return r;
533 0 : }
534 :
535 : /**
536 : * radeon_gem_va_update_vm -update the bo_va in its VM
537 : *
538 : * @rdev: radeon_device pointer
539 : * @bo_va: bo_va to update
540 : *
541 : * Update the bo_va directly after setting it's address. Errors are not
542 : * vital here, so they are not reported back to userspace.
543 : */
544 0 : static void radeon_gem_va_update_vm(struct radeon_device *rdev,
545 : struct radeon_bo_va *bo_va)
546 : {
547 0 : struct ttm_validate_buffer tv, *entry;
548 : struct radeon_bo_list *vm_bos;
549 0 : struct ww_acquire_ctx ticket;
550 0 : struct list_head list;
551 : unsigned domain;
552 : int r;
553 :
554 0 : INIT_LIST_HEAD(&list);
555 :
556 0 : tv.bo = &bo_va->bo->tbo;
557 0 : tv.shared = true;
558 0 : list_add(&tv.head, &list);
559 :
560 0 : vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
561 0 : if (!vm_bos)
562 0 : return;
563 :
564 0 : r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
565 0 : if (r)
566 : goto error_free;
567 :
568 0 : list_for_each_entry(entry, &list, head) {
569 0 : domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
570 : /* if anything is swapped out don't swap it in here,
571 : just abort and wait for the next CS */
572 0 : if (domain == RADEON_GEM_DOMAIN_CPU)
573 : goto error_unreserve;
574 : }
575 :
576 0 : mutex_lock(&bo_va->vm->mutex);
577 0 : r = radeon_vm_clear_freed(rdev, bo_va->vm);
578 0 : if (r)
579 : goto error_unlock;
580 :
581 0 : if (bo_va->it.start)
582 0 : r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
583 :
584 : error_unlock:
585 0 : mutex_unlock(&bo_va->vm->mutex);
586 :
587 : error_unreserve:
588 0 : ttm_eu_backoff_reservation(&ticket, &list);
589 :
590 : error_free:
591 0 : drm_free_large(vm_bos);
592 :
593 0 : if (r && r != -ERESTARTSYS)
594 0 : DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
595 0 : }
596 :
597 0 : int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
598 : struct drm_file *filp)
599 : {
600 0 : struct drm_radeon_gem_va *args = data;
601 : struct drm_gem_object *gobj;
602 0 : struct radeon_device *rdev = dev->dev_private;
603 0 : struct radeon_fpriv *fpriv = filp->driver_priv;
604 : struct radeon_bo *rbo;
605 : struct radeon_bo_va *bo_va;
606 : u32 invalid_flags;
607 : int r = 0;
608 :
609 0 : if (!rdev->vm_manager.enabled) {
610 0 : args->operation = RADEON_VA_RESULT_ERROR;
611 0 : return -ENOTTY;
612 : }
613 :
614 : /* !! DONT REMOVE !!
615 : * We don't support vm_id yet, to be sure we don't have have broken
616 : * userspace, reject anyone trying to use non 0 value thus moving
617 : * forward we can use those fields without breaking existant userspace
618 : */
619 0 : if (args->vm_id) {
620 0 : args->operation = RADEON_VA_RESULT_ERROR;
621 0 : return -EINVAL;
622 : }
623 :
624 0 : if (args->offset < RADEON_VA_RESERVED_SIZE) {
625 0 : dev_err(&dev->pdev->dev,
626 : "offset 0x%lX is in reserved area 0x%X\n",
627 : (unsigned long)args->offset,
628 : RADEON_VA_RESERVED_SIZE);
629 0 : args->operation = RADEON_VA_RESULT_ERROR;
630 0 : return -EINVAL;
631 : }
632 :
633 : /* don't remove, we need to enforce userspace to set the snooped flag
634 : * otherwise we will endup with broken userspace and we won't be able
635 : * to enable this feature without adding new interface
636 : */
637 : invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
638 0 : if ((args->flags & invalid_flags)) {
639 0 : dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
640 : args->flags, invalid_flags);
641 0 : args->operation = RADEON_VA_RESULT_ERROR;
642 0 : return -EINVAL;
643 : }
644 :
645 0 : switch (args->operation) {
646 : case RADEON_VA_MAP:
647 : case RADEON_VA_UNMAP:
648 : break;
649 : default:
650 0 : dev_err(&dev->pdev->dev, "unsupported operation %d\n",
651 : args->operation);
652 0 : args->operation = RADEON_VA_RESULT_ERROR;
653 0 : return -EINVAL;
654 : }
655 :
656 0 : gobj = drm_gem_object_lookup(dev, filp, args->handle);
657 0 : if (gobj == NULL) {
658 0 : args->operation = RADEON_VA_RESULT_ERROR;
659 0 : return -ENOENT;
660 : }
661 0 : rbo = gem_to_radeon_bo(gobj);
662 0 : r = radeon_bo_reserve(rbo, false);
663 0 : if (r) {
664 0 : args->operation = RADEON_VA_RESULT_ERROR;
665 0 : drm_gem_object_unreference_unlocked(gobj);
666 0 : return r;
667 : }
668 0 : bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
669 0 : if (!bo_va) {
670 0 : args->operation = RADEON_VA_RESULT_ERROR;
671 0 : drm_gem_object_unreference_unlocked(gobj);
672 0 : return -ENOENT;
673 : }
674 :
675 0 : switch (args->operation) {
676 : case RADEON_VA_MAP:
677 0 : if (bo_va->it.start) {
678 0 : args->operation = RADEON_VA_RESULT_VA_EXIST;
679 0 : args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
680 0 : radeon_bo_unreserve(rbo);
681 0 : goto out;
682 : }
683 0 : r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
684 0 : break;
685 : case RADEON_VA_UNMAP:
686 0 : r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
687 0 : break;
688 : default:
689 : break;
690 : }
691 0 : if (!r)
692 0 : radeon_gem_va_update_vm(rdev, bo_va);
693 0 : args->operation = RADEON_VA_RESULT_OK;
694 0 : if (r) {
695 0 : args->operation = RADEON_VA_RESULT_ERROR;
696 0 : }
697 : out:
698 0 : drm_gem_object_unreference_unlocked(gobj);
699 0 : return r;
700 0 : }
701 :
702 0 : int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
703 : struct drm_file *filp)
704 : {
705 0 : struct drm_radeon_gem_op *args = data;
706 : struct drm_gem_object *gobj;
707 : struct radeon_bo *robj;
708 : int r;
709 :
710 0 : gobj = drm_gem_object_lookup(dev, filp, args->handle);
711 0 : if (gobj == NULL) {
712 0 : return -ENOENT;
713 : }
714 0 : robj = gem_to_radeon_bo(gobj);
715 :
716 : r = -EPERM;
717 0 : if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
718 : goto out;
719 :
720 0 : r = radeon_bo_reserve(robj, false);
721 0 : if (unlikely(r))
722 : goto out;
723 :
724 0 : switch (args->op) {
725 : case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
726 0 : args->value = robj->initial_domain;
727 0 : break;
728 : case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
729 0 : robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
730 : RADEON_GEM_DOMAIN_GTT |
731 : RADEON_GEM_DOMAIN_CPU);
732 0 : break;
733 : default:
734 : r = -EINVAL;
735 0 : }
736 :
737 0 : radeon_bo_unreserve(robj);
738 : out:
739 0 : drm_gem_object_unreference_unlocked(gobj);
740 0 : return r;
741 0 : }
742 :
743 0 : int radeon_mode_dumb_create(struct drm_file *file_priv,
744 : struct drm_device *dev,
745 : struct drm_mode_create_dumb *args)
746 : {
747 0 : struct radeon_device *rdev = dev->dev_private;
748 0 : struct drm_gem_object *gobj;
749 0 : uint32_t handle;
750 : int r;
751 :
752 0 : args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
753 0 : args->size = args->pitch * args->height;
754 0 : args->size = roundup2(args->size, PAGE_SIZE);
755 :
756 0 : r = radeon_gem_object_create(rdev, args->size, 0,
757 : RADEON_GEM_DOMAIN_VRAM, 0,
758 : false, &gobj);
759 0 : if (r)
760 0 : return -ENOMEM;
761 :
762 0 : r = drm_gem_handle_create(file_priv, gobj, &handle);
763 : /* drop reference from allocate - handle holds it now */
764 0 : drm_gem_object_unreference_unlocked(gobj);
765 0 : if (r) {
766 0 : return r;
767 : }
768 0 : args->handle = handle;
769 0 : return 0;
770 0 : }
771 :
772 : #if defined(CONFIG_DEBUG_FS)
773 : static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
774 : {
775 : struct drm_info_node *node = (struct drm_info_node *)m->private;
776 : struct drm_device *dev = node->minor->dev;
777 : struct radeon_device *rdev = dev->dev_private;
778 : struct radeon_bo *rbo;
779 : unsigned i = 0;
780 :
781 : mutex_lock(&rdev->gem.mutex);
782 : list_for_each_entry(rbo, &rdev->gem.objects, list) {
783 : unsigned domain;
784 : const char *placement;
785 :
786 : domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
787 : switch (domain) {
788 : case RADEON_GEM_DOMAIN_VRAM:
789 : placement = "VRAM";
790 : break;
791 : case RADEON_GEM_DOMAIN_GTT:
792 : placement = " GTT";
793 : break;
794 : case RADEON_GEM_DOMAIN_CPU:
795 : default:
796 : placement = " CPU";
797 : break;
798 : }
799 : seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
800 : i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
801 : placement, (unsigned long)rbo->pid);
802 : i++;
803 : }
804 : mutex_unlock(&rdev->gem.mutex);
805 : return 0;
806 : }
807 :
808 : static struct drm_info_list radeon_debugfs_gem_list[] = {
809 : {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
810 : };
811 : #endif
812 :
813 0 : int radeon_gem_debugfs_init(struct radeon_device *rdev)
814 : {
815 : #if defined(CONFIG_DEBUG_FS)
816 : return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
817 : #endif
818 0 : return 0;
819 : }
|