Line data Source code
1 : /*
2 : * Copyright 2009 Jerome Glisse.
3 : * All Rights Reserved.
4 : *
5 : * Permission is hereby granted, free of charge, to any person obtaining a
6 : * copy of this software and associated documentation files (the
7 : * "Software"), to deal in the Software without restriction, including
8 : * without limitation the rights to use, copy, modify, merge, publish,
9 : * distribute, sub license, and/or sell copies of the Software, and to
10 : * permit persons to whom the Software is furnished to do so, subject to
11 : * the following conditions:
12 : *
13 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 : * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 : * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 : * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 : * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 : * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 : *
21 : * The above copyright notice and this permission notice (including the
22 : * next paragraph) shall be included in all copies or substantial portions
23 : * of the Software.
24 : *
25 : */
26 : /*
27 : * Authors:
28 : * Jerome Glisse <glisse@freedesktop.org>
29 : * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 : * Dave Airlie
31 : */
32 : #include <dev/pci/drm/drmP.h>
33 : #include <dev/pci/drm/ttm/ttm_bo_api.h>
34 : #include <dev/pci/drm/ttm/ttm_bo_driver.h>
35 : #include <dev/pci/drm/ttm/ttm_placement.h>
36 : #include <dev/pci/drm/ttm/ttm_module.h>
37 : #include <dev/pci/drm/ttm/ttm_page_alloc.h>
38 : #include <dev/pci/drm/radeon_drm.h>
39 : #include "radeon_reg.h"
40 : #include "radeon.h"
41 :
42 : #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
43 :
44 : static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
45 : static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
46 :
47 0 : static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
48 : {
49 : struct radeon_mman *mman;
50 : struct radeon_device *rdev;
51 :
52 0 : mman = container_of(bdev, struct radeon_mman, bdev);
53 0 : rdev = container_of(mman, struct radeon_device, mman);
54 0 : return rdev;
55 : }
56 :
57 :
58 : /*
59 : * Global memory.
60 : */
61 0 : static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
62 : {
63 0 : return ttm_mem_global_init(ref->object);
64 : }
65 :
66 0 : static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
67 : {
68 0 : ttm_mem_global_release(ref->object);
69 0 : }
70 :
71 0 : static int radeon_ttm_global_init(struct radeon_device *rdev)
72 : {
73 : struct drm_global_reference *global_ref;
74 : int r;
75 :
76 0 : rdev->mman.mem_global_referenced = false;
77 0 : global_ref = &rdev->mman.mem_global_ref;
78 0 : global_ref->global_type = DRM_GLOBAL_TTM_MEM;
79 0 : global_ref->size = sizeof(struct ttm_mem_global);
80 0 : global_ref->init = &radeon_ttm_mem_global_init;
81 0 : global_ref->release = &radeon_ttm_mem_global_release;
82 0 : r = drm_global_item_ref(global_ref);
83 0 : if (r != 0) {
84 0 : DRM_ERROR("Failed setting up TTM memory accounting "
85 : "subsystem.\n");
86 0 : return r;
87 : }
88 :
89 0 : rdev->mman.bo_global_ref.mem_glob =
90 0 : rdev->mman.mem_global_ref.object;
91 0 : global_ref = &rdev->mman.bo_global_ref.ref;
92 0 : global_ref->global_type = DRM_GLOBAL_TTM_BO;
93 0 : global_ref->size = sizeof(struct ttm_bo_global);
94 0 : global_ref->init = &ttm_bo_global_init;
95 0 : global_ref->release = &ttm_bo_global_release;
96 0 : r = drm_global_item_ref(global_ref);
97 0 : if (r != 0) {
98 0 : DRM_ERROR("Failed setting up TTM BO subsystem.\n");
99 0 : drm_global_item_unref(&rdev->mman.mem_global_ref);
100 0 : return r;
101 : }
102 :
103 0 : rdev->mman.mem_global_referenced = true;
104 0 : return 0;
105 0 : }
106 :
107 0 : static void radeon_ttm_global_fini(struct radeon_device *rdev)
108 : {
109 0 : if (rdev->mman.mem_global_referenced) {
110 0 : drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
111 0 : drm_global_item_unref(&rdev->mman.mem_global_ref);
112 0 : rdev->mman.mem_global_referenced = false;
113 0 : }
114 0 : }
115 :
116 0 : static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
117 : {
118 0 : return 0;
119 : }
120 :
121 0 : static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
122 : struct ttm_mem_type_manager *man)
123 : {
124 : struct radeon_device *rdev;
125 :
126 0 : rdev = radeon_get_rdev(bdev);
127 :
128 0 : switch (type) {
129 : case TTM_PL_SYSTEM:
130 : /* System memory */
131 0 : man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
132 0 : man->available_caching = TTM_PL_MASK_CACHING;
133 0 : man->default_caching = TTM_PL_FLAG_CACHED;
134 0 : break;
135 : case TTM_PL_TT:
136 0 : man->func = &ttm_bo_manager_func;
137 0 : man->gpu_offset = rdev->mc.gtt_start;
138 0 : man->available_caching = TTM_PL_MASK_CACHING;
139 0 : man->default_caching = TTM_PL_FLAG_CACHED;
140 0 : man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
141 : #if __OS_HAS_AGP
142 0 : if (rdev->flags & RADEON_IS_AGP) {
143 0 : if (!rdev->ddev->agp) {
144 0 : DRM_ERROR("AGP is not enabled for memory type %u\n",
145 : (unsigned)type);
146 0 : return -EINVAL;
147 : }
148 0 : if (!rdev->ddev->agp->cant_use_aperture)
149 0 : man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
150 0 : man->available_caching = TTM_PL_FLAG_UNCACHED |
151 : TTM_PL_FLAG_WC;
152 0 : man->default_caching = TTM_PL_FLAG_WC;
153 0 : }
154 : #endif
155 : break;
156 : case TTM_PL_VRAM:
157 : /* "On-card" video ram */
158 0 : man->func = &ttm_bo_manager_func;
159 0 : man->gpu_offset = rdev->mc.vram_start;
160 0 : man->flags = TTM_MEMTYPE_FLAG_FIXED |
161 : TTM_MEMTYPE_FLAG_MAPPABLE;
162 0 : man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
163 0 : man->default_caching = TTM_PL_FLAG_WC;
164 0 : break;
165 : default:
166 0 : DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
167 0 : return -EINVAL;
168 : }
169 0 : return 0;
170 0 : }
171 :
172 0 : static void radeon_evict_flags(struct ttm_buffer_object *bo,
173 : struct ttm_placement *placement)
174 : {
175 : static struct ttm_place placements = {
176 : .fpfn = 0,
177 : .lpfn = 0,
178 : .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
179 : };
180 :
181 : struct radeon_bo *rbo;
182 :
183 0 : if (!radeon_ttm_bo_is_radeon_bo(bo)) {
184 0 : placement->placement = &placements;
185 0 : placement->busy_placement = &placements;
186 0 : placement->num_placement = 1;
187 0 : placement->num_busy_placement = 1;
188 0 : return;
189 : }
190 0 : rbo = container_of(bo, struct radeon_bo, tbo);
191 0 : switch (bo->mem.mem_type) {
192 : case TTM_PL_VRAM:
193 0 : if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
194 0 : radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
195 0 : else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
196 0 : bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
197 0 : unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
198 : int i;
199 :
200 : /* Try evicting to the CPU inaccessible part of VRAM
201 : * first, but only set GTT as busy placement, so this
202 : * BO will be evicted to GTT rather than causing other
203 : * BOs to be evicted from VRAM
204 : */
205 0 : radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
206 : RADEON_GEM_DOMAIN_GTT);
207 0 : rbo->placement.num_busy_placement = 0;
208 0 : for (i = 0; i < rbo->placement.num_placement; i++) {
209 0 : if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
210 0 : if (rbo->placements[i].fpfn < fpfn)
211 0 : rbo->placements[i].fpfn = fpfn;
212 : } else {
213 0 : rbo->placement.busy_placement =
214 : &rbo->placements[i];
215 0 : rbo->placement.num_busy_placement = 1;
216 : }
217 : }
218 0 : } else
219 0 : radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
220 : break;
221 : case TTM_PL_TT:
222 : default:
223 0 : radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
224 0 : }
225 0 : *placement = rbo->placement;
226 0 : }
227 :
228 0 : static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
229 : {
230 0 : struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
231 :
232 0 : if (radeon_ttm_tt_has_userptr(bo->ttm))
233 0 : return -EPERM;
234 0 : return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
235 0 : }
236 :
237 0 : static void radeon_move_null(struct ttm_buffer_object *bo,
238 : struct ttm_mem_reg *new_mem)
239 : {
240 0 : struct ttm_mem_reg *old_mem = &bo->mem;
241 :
242 0 : BUG_ON(old_mem->mm_node != NULL);
243 0 : *old_mem = *new_mem;
244 0 : new_mem->mm_node = NULL;
245 0 : }
246 :
247 0 : static int radeon_move_blit(struct ttm_buffer_object *bo,
248 : bool evict, bool no_wait_gpu,
249 : struct ttm_mem_reg *new_mem,
250 : struct ttm_mem_reg *old_mem)
251 : {
252 : struct radeon_device *rdev;
253 : uint64_t old_start, new_start;
254 0 : struct radeon_fence *fence;
255 : unsigned num_pages;
256 : int r, ridx;
257 :
258 0 : rdev = radeon_get_rdev(bo->bdev);
259 0 : ridx = radeon_copy_ring_index(rdev);
260 0 : old_start = (u64)old_mem->start << PAGE_SHIFT;
261 0 : new_start = (u64)new_mem->start << PAGE_SHIFT;
262 :
263 0 : switch (old_mem->mem_type) {
264 : case TTM_PL_VRAM:
265 0 : old_start += rdev->mc.vram_start;
266 0 : break;
267 : case TTM_PL_TT:
268 0 : old_start += rdev->mc.gtt_start;
269 0 : break;
270 : default:
271 0 : DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
272 0 : return -EINVAL;
273 : }
274 0 : switch (new_mem->mem_type) {
275 : case TTM_PL_VRAM:
276 0 : new_start += rdev->mc.vram_start;
277 0 : break;
278 : case TTM_PL_TT:
279 0 : new_start += rdev->mc.gtt_start;
280 0 : break;
281 : default:
282 0 : DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
283 0 : return -EINVAL;
284 : }
285 0 : if (!rdev->ring[ridx].ready) {
286 0 : DRM_ERROR("Trying to move memory with ring turned off.\n");
287 0 : return -EINVAL;
288 : }
289 :
290 : BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
291 :
292 0 : num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
293 0 : fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
294 0 : if (IS_ERR(fence))
295 0 : return PTR_ERR(fence);
296 :
297 0 : r = ttm_bo_move_accel_cleanup(bo, &fence->base,
298 : evict, no_wait_gpu, new_mem);
299 0 : radeon_fence_unref(&fence);
300 0 : return r;
301 0 : }
302 :
303 0 : static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
304 : bool evict, bool interruptible,
305 : bool no_wait_gpu,
306 : struct ttm_mem_reg *new_mem)
307 : {
308 : struct radeon_device *rdev;
309 0 : struct ttm_mem_reg *old_mem = &bo->mem;
310 0 : struct ttm_mem_reg tmp_mem;
311 0 : struct ttm_place placements;
312 0 : struct ttm_placement placement;
313 : int r;
314 :
315 0 : rdev = radeon_get_rdev(bo->bdev);
316 0 : tmp_mem = *new_mem;
317 0 : tmp_mem.mm_node = NULL;
318 0 : placement.num_placement = 1;
319 0 : placement.placement = &placements;
320 0 : placement.num_busy_placement = 1;
321 0 : placement.busy_placement = &placements;
322 0 : placements.fpfn = 0;
323 0 : placements.lpfn = 0;
324 0 : placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
325 0 : r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
326 : interruptible, no_wait_gpu);
327 0 : if (unlikely(r)) {
328 0 : return r;
329 : }
330 :
331 0 : r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
332 0 : if (unlikely(r)) {
333 : goto out_cleanup;
334 : }
335 :
336 0 : r = ttm_tt_bind(bo->ttm, &tmp_mem);
337 0 : if (unlikely(r)) {
338 : goto out_cleanup;
339 : }
340 0 : r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
341 0 : if (unlikely(r)) {
342 : goto out_cleanup;
343 : }
344 0 : r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
345 : out_cleanup:
346 0 : ttm_bo_mem_put(bo, &tmp_mem);
347 0 : return r;
348 0 : }
349 :
350 0 : static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
351 : bool evict, bool interruptible,
352 : bool no_wait_gpu,
353 : struct ttm_mem_reg *new_mem)
354 : {
355 : struct radeon_device *rdev;
356 0 : struct ttm_mem_reg *old_mem = &bo->mem;
357 0 : struct ttm_mem_reg tmp_mem;
358 0 : struct ttm_placement placement;
359 0 : struct ttm_place placements;
360 : int r;
361 :
362 0 : rdev = radeon_get_rdev(bo->bdev);
363 0 : tmp_mem = *new_mem;
364 0 : tmp_mem.mm_node = NULL;
365 0 : placement.num_placement = 1;
366 0 : placement.placement = &placements;
367 0 : placement.num_busy_placement = 1;
368 0 : placement.busy_placement = &placements;
369 0 : placements.fpfn = 0;
370 0 : placements.lpfn = 0;
371 0 : placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
372 0 : r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
373 : interruptible, no_wait_gpu);
374 0 : if (unlikely(r)) {
375 0 : return r;
376 : }
377 0 : r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
378 0 : if (unlikely(r)) {
379 : goto out_cleanup;
380 : }
381 0 : r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
382 : if (unlikely(r)) {
383 0 : goto out_cleanup;
384 : }
385 : out_cleanup:
386 0 : ttm_bo_mem_put(bo, &tmp_mem);
387 0 : return r;
388 0 : }
389 :
390 0 : static int radeon_bo_move(struct ttm_buffer_object *bo,
391 : bool evict, bool interruptible,
392 : bool no_wait_gpu,
393 : struct ttm_mem_reg *new_mem)
394 : {
395 : struct radeon_device *rdev;
396 0 : struct ttm_mem_reg *old_mem = &bo->mem;
397 : int r;
398 :
399 0 : rdev = radeon_get_rdev(bo->bdev);
400 0 : if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
401 0 : radeon_move_null(bo, new_mem);
402 0 : return 0;
403 : }
404 0 : if ((old_mem->mem_type == TTM_PL_TT &&
405 0 : new_mem->mem_type == TTM_PL_SYSTEM) ||
406 0 : (old_mem->mem_type == TTM_PL_SYSTEM &&
407 0 : new_mem->mem_type == TTM_PL_TT)) {
408 : /* bind is enough */
409 0 : radeon_move_null(bo, new_mem);
410 0 : return 0;
411 : }
412 0 : if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
413 0 : rdev->asic->copy.copy == NULL) {
414 : /* use memcpy */
415 : goto memcpy;
416 : }
417 :
418 0 : if (old_mem->mem_type == TTM_PL_VRAM &&
419 0 : new_mem->mem_type == TTM_PL_SYSTEM) {
420 0 : r = radeon_move_vram_ram(bo, evict, interruptible,
421 : no_wait_gpu, new_mem);
422 0 : } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
423 0 : new_mem->mem_type == TTM_PL_VRAM) {
424 0 : r = radeon_move_ram_vram(bo, evict, interruptible,
425 : no_wait_gpu, new_mem);
426 0 : } else {
427 0 : r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
428 : }
429 :
430 0 : if (r) {
431 : memcpy:
432 0 : r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
433 0 : if (r) {
434 0 : return r;
435 : }
436 : }
437 :
438 : /* update statistics */
439 0 : atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
440 0 : return 0;
441 0 : }
442 :
443 0 : static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
444 : {
445 0 : struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
446 0 : struct radeon_device *rdev = radeon_get_rdev(bdev);
447 :
448 0 : mem->bus.addr = NULL;
449 0 : mem->bus.offset = 0;
450 0 : mem->bus.size = mem->num_pages << PAGE_SHIFT;
451 0 : mem->bus.base = 0;
452 0 : mem->bus.is_iomem = false;
453 0 : if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
454 0 : return -EINVAL;
455 0 : switch (mem->mem_type) {
456 : case TTM_PL_SYSTEM:
457 : /* system memory */
458 0 : return 0;
459 : case TTM_PL_TT:
460 : #if __OS_HAS_AGP
461 0 : if (rdev->flags & RADEON_IS_AGP) {
462 : /* RADEON_IS_AGP is set only if AGP is active */
463 0 : mem->bus.offset = mem->start << PAGE_SHIFT;
464 0 : mem->bus.base = rdev->mc.agp_base;
465 0 : mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
466 0 : }
467 : #endif
468 : break;
469 : case TTM_PL_VRAM:
470 0 : mem->bus.offset = mem->start << PAGE_SHIFT;
471 : /* check if it's visible */
472 0 : if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
473 0 : return -EINVAL;
474 0 : mem->bus.base = rdev->mc.aper_base;
475 0 : mem->bus.is_iomem = true;
476 : #ifdef __alpha__
477 : /*
478 : * Alpha: use bus.addr to hold the ioremap() return,
479 : * so we can modify bus.base below.
480 : */
481 : if (mem->placement & TTM_PL_FLAG_WC)
482 : mem->bus.addr =
483 : ioremap_wc(mem->bus.base + mem->bus.offset,
484 : mem->bus.size);
485 : else
486 : mem->bus.addr =
487 : ioremap_nocache(mem->bus.base + mem->bus.offset,
488 : mem->bus.size);
489 :
490 : /*
491 : * Alpha: Use just the bus offset plus
492 : * the hose/domain memory base for bus.base.
493 : * It then can be used to build PTEs for VRAM
494 : * access, as done in ttm_bo_vm_fault().
495 : */
496 : mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
497 : rdev->ddev->hose->dense_mem_base;
498 : #endif
499 0 : break;
500 : default:
501 0 : return -EINVAL;
502 : }
503 0 : return 0;
504 0 : }
505 :
506 0 : static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
507 : {
508 0 : }
509 :
510 : /*
511 : * TTM backend functions.
512 : */
513 : struct radeon_ttm_tt {
514 : struct ttm_dma_tt ttm;
515 : struct radeon_device *rdev;
516 : u64 offset;
517 :
518 : uint64_t userptr;
519 : struct mm_struct *usermm;
520 : uint32_t userflags;
521 :
522 : bus_dmamap_t map;
523 : bus_dma_segment_t *segs;
524 : };
525 :
526 : /* prepare the sg table with the user pages */
527 0 : static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
528 : {
529 0 : STUB();
530 0 : return -ENOSYS;
531 : #ifdef notyet
532 : struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
533 : struct radeon_ttm_tt *gtt = (void *)ttm;
534 : unsigned pinned = 0, nents;
535 : int r;
536 :
537 : int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
538 : enum dma_data_direction direction = write ?
539 : DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
540 :
541 : if (current->mm != gtt->usermm)
542 : return -EPERM;
543 :
544 : if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
545 : /* check that we only pin down anonymous memory
546 : to prevent problems with writeback */
547 : unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
548 : struct vm_area_struct *vma;
549 : vma = find_vma(gtt->usermm, gtt->userptr);
550 : if (!vma || vma->vm_file || vma->vm_end < end)
551 : return -EPERM;
552 : }
553 :
554 : do {
555 : unsigned num_pages = ttm->num_pages - pinned;
556 : uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
557 : struct vm_page **pages = ttm->pages + pinned;
558 :
559 : r = get_user_pages(current, current->mm, userptr, num_pages,
560 : write, 0, pages, NULL);
561 : if (r < 0)
562 : goto release_pages;
563 :
564 : pinned += r;
565 :
566 : } while (pinned < ttm->num_pages);
567 :
568 : r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
569 : ttm->num_pages << PAGE_SHIFT,
570 : GFP_KERNEL);
571 : if (r)
572 : goto release_sg;
573 :
574 : r = -ENOMEM;
575 : nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
576 : if (nents != ttm->sg->nents)
577 : goto release_sg;
578 :
579 : drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
580 : gtt->ttm.dma_address, ttm->num_pages);
581 :
582 : return 0;
583 :
584 : release_sg:
585 : kfree(ttm->sg);
586 :
587 : release_pages:
588 : release_pages(ttm->pages, pinned, 0);
589 : return r;
590 : #endif
591 : }
592 :
593 0 : static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
594 : {
595 0 : STUB();
596 : #ifdef notyet
597 : struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
598 : struct radeon_ttm_tt *gtt = (void *)ttm;
599 : struct sg_page_iter sg_iter;
600 :
601 : int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
602 : enum dma_data_direction direction = write ?
603 : DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
604 :
605 : /* double check that we don't free the table twice */
606 : if (!ttm->sg->sgl)
607 : return;
608 :
609 : /* free the sg table and pages again */
610 : dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
611 :
612 : for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
613 : struct vm_page *page = sg_page_iter_page(&sg_iter);
614 : if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
615 : set_page_dirty(page);
616 :
617 : mark_page_accessed(page);
618 : page_cache_release(page);
619 : }
620 :
621 : sg_free_table(ttm->sg);
622 : #endif
623 0 : }
624 :
625 0 : static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
626 : struct ttm_mem_reg *bo_mem)
627 : {
628 0 : struct radeon_ttm_tt *gtt = (void*)ttm;
629 : uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
630 : RADEON_GART_PAGE_WRITE;
631 : int r;
632 :
633 0 : if (gtt->userptr) {
634 0 : radeon_ttm_tt_pin_userptr(ttm);
635 : flags &= ~RADEON_GART_PAGE_WRITE;
636 0 : }
637 :
638 0 : gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
639 0 : if (!ttm->num_pages) {
640 0 : WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
641 : ttm->num_pages, bo_mem, ttm);
642 0 : }
643 0 : if (ttm->caching_state == tt_cached)
644 0 : flags |= RADEON_GART_PAGE_SNOOP;
645 0 : r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
646 0 : ttm->pages, gtt->ttm.dma_address, flags);
647 0 : if (r) {
648 0 : DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
649 : ttm->num_pages, (unsigned)gtt->offset);
650 0 : return r;
651 : }
652 0 : return 0;
653 0 : }
654 :
655 0 : static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
656 : {
657 0 : struct radeon_ttm_tt *gtt = (void *)ttm;
658 :
659 0 : radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
660 :
661 0 : if (gtt->userptr)
662 0 : radeon_ttm_tt_unpin_userptr(ttm);
663 :
664 0 : return 0;
665 : }
666 :
667 0 : static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
668 : {
669 0 : struct radeon_ttm_tt *gtt = (void *)ttm;
670 :
671 0 : bus_dmamap_destroy(gtt->rdev->dmat, gtt->map);
672 0 : free(gtt->segs, M_DRM, 0);
673 0 : ttm_dma_tt_fini(>t->ttm);
674 0 : kfree(gtt);
675 0 : }
676 :
677 : static struct ttm_backend_func radeon_backend_func = {
678 : .bind = &radeon_ttm_backend_bind,
679 : .unbind = &radeon_ttm_backend_unbind,
680 : .destroy = &radeon_ttm_backend_destroy,
681 : };
682 :
683 0 : static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
684 : unsigned long size, uint32_t page_flags,
685 : struct vm_page *dummy_read_page)
686 : {
687 : struct radeon_device *rdev;
688 : struct radeon_ttm_tt *gtt;
689 :
690 0 : rdev = radeon_get_rdev(bdev);
691 : #if __OS_HAS_AGP
692 0 : if (rdev->flags & RADEON_IS_AGP) {
693 0 : return ttm_agp_tt_create(bdev, rdev->ddev->agp,
694 : size, page_flags, dummy_read_page);
695 : }
696 : #endif
697 :
698 0 : gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
699 0 : if (gtt == NULL) {
700 0 : return NULL;
701 : }
702 0 : gtt->ttm.ttm.func = &radeon_backend_func;
703 0 : gtt->rdev = rdev;
704 0 : if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) {
705 0 : kfree(gtt);
706 0 : return NULL;
707 : }
708 :
709 0 : gtt->segs = mallocarray(gtt->ttm.ttm.num_pages,
710 : sizeof(bus_dma_segment_t), M_DRM, M_WAITOK | M_ZERO);
711 :
712 0 : if (bus_dmamap_create(rdev->dmat, size, gtt->ttm.ttm.num_pages, size,
713 : 0, BUS_DMA_WAITOK, >t->map)) {
714 0 : free(gtt->segs, M_DRM, 0);
715 0 : ttm_dma_tt_fini(>t->ttm);
716 0 : free(gtt, M_DRM, 0);
717 0 : return NULL;
718 : }
719 :
720 0 : return >t->ttm.ttm;
721 0 : }
722 :
723 0 : static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
724 : {
725 0 : if (!ttm || ttm->func != &radeon_backend_func)
726 0 : return NULL;
727 0 : return (struct radeon_ttm_tt *)ttm;
728 0 : }
729 :
730 0 : static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
731 : {
732 0 : struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
733 : struct radeon_device *rdev;
734 : unsigned i;
735 : int r;
736 : int seg;
737 0 : bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
738 :
739 0 : if (ttm->state != tt_unpopulated)
740 0 : return 0;
741 :
742 0 : if (gtt && gtt->userptr) {
743 0 : ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
744 0 : if (!ttm->sg)
745 0 : return -ENOMEM;
746 :
747 0 : ttm->page_flags |= TTM_PAGE_FLAG_SG;
748 0 : ttm->state = tt_unbound;
749 0 : return 0;
750 : }
751 :
752 0 : if (slave && ttm->sg) {
753 : #ifdef notyet
754 : drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
755 : gtt->ttm.dma_address, ttm->num_pages);
756 : #endif
757 0 : ttm->state = tt_unbound;
758 0 : return 0;
759 : }
760 :
761 0 : rdev = radeon_get_rdev(ttm->bdev);
762 : #if __OS_HAS_AGP
763 0 : if (rdev->flags & RADEON_IS_AGP) {
764 0 : return ttm_agp_tt_populate(ttm);
765 : }
766 : #endif
767 :
768 : #ifdef CONFIG_SWIOTLB
769 : if (swiotlb_nr_tbl()) {
770 : return ttm_dma_populate(>t->ttm, rdev->dev);
771 : }
772 : #endif
773 :
774 0 : r = ttm_pool_populate(ttm);
775 0 : if (r) {
776 0 : return r;
777 : }
778 :
779 : #ifdef __linux__
780 : for (i = 0; i < ttm->num_pages; i++) {
781 : gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
782 : 0, PAGE_SIZE,
783 : PCI_DMA_BIDIRECTIONAL);
784 : if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
785 : while (i--) {
786 : pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
787 : PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
788 : gtt->ttm.dma_address[i] = 0;
789 : }
790 : ttm_pool_unpopulate(ttm);
791 : return -EFAULT;
792 : }
793 : }
794 : #else
795 0 : for (i = 0; i < ttm->num_pages; i++) {
796 0 : gtt->segs[i].ds_addr = VM_PAGE_TO_PHYS(ttm->pages[i]);
797 0 : gtt->segs[i].ds_len = PAGE_SIZE;
798 : }
799 :
800 0 : if (bus_dmamap_load_raw(rdev->dmat, gtt->map, gtt->segs,
801 : ttm->num_pages,
802 : ttm->num_pages * PAGE_SIZE, 0)) {
803 0 : ttm_pool_unpopulate(ttm);
804 0 : return -EFAULT;
805 : }
806 : #endif
807 :
808 0 : for (seg = 0, i = 0; seg < gtt->map->dm_nsegs; seg++) {
809 0 : bus_addr_t addr = gtt->map->dm_segs[seg].ds_addr;
810 0 : bus_size_t len = gtt->map->dm_segs[seg].ds_len;
811 :
812 0 : while (len > 0) {
813 0 : gtt->ttm.dma_address[i++] = addr;
814 0 : addr += PAGE_SIZE;
815 0 : len -= PAGE_SIZE;
816 : }
817 : }
818 :
819 0 : return 0;
820 0 : }
821 :
822 0 : static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
823 : {
824 : struct radeon_device *rdev;
825 0 : struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
826 : unsigned i;
827 0 : bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
828 :
829 0 : if (gtt && gtt->userptr) {
830 0 : kfree(ttm->sg);
831 0 : ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
832 0 : return;
833 : }
834 :
835 0 : if (slave)
836 0 : return;
837 :
838 0 : rdev = radeon_get_rdev(ttm->bdev);
839 : #if __OS_HAS_AGP
840 0 : if (rdev->flags & RADEON_IS_AGP) {
841 0 : ttm_agp_tt_unpopulate(ttm);
842 0 : return;
843 : }
844 : #endif
845 :
846 : #ifdef CONFIG_SWIOTLB
847 : if (swiotlb_nr_tbl()) {
848 : ttm_dma_unpopulate(>t->ttm, rdev->dev);
849 : return;
850 : }
851 : #endif
852 :
853 : #ifdef __linux__
854 : for (i = 0; i < ttm->num_pages; i++) {
855 : if (gtt->ttm.dma_address[i]) {
856 : pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
857 : PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
858 : }
859 : }
860 : #else
861 0 : bus_dmamap_unload(rdev->dmat, gtt->map);
862 0 : for (i = 0; i < ttm->num_pages; i++)
863 0 : gtt->ttm.dma_address[i] = 0;
864 :
865 : #endif
866 :
867 0 : ttm_pool_unpopulate(ttm);
868 0 : }
869 :
870 0 : int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
871 : uint32_t flags)
872 : {
873 0 : STUB();
874 0 : return -ENOSYS;
875 : #ifdef notyet
876 : struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
877 :
878 : if (gtt == NULL)
879 : return -EINVAL;
880 :
881 : gtt->userptr = addr;
882 : gtt->usermm = current->mm;
883 : gtt->userflags = flags;
884 : return 0;
885 : #endif
886 : }
887 :
888 0 : bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
889 : {
890 0 : struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
891 :
892 0 : if (gtt == NULL)
893 0 : return false;
894 :
895 0 : return !!gtt->userptr;
896 0 : }
897 :
898 0 : bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
899 : {
900 0 : struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
901 :
902 0 : if (gtt == NULL)
903 0 : return false;
904 :
905 0 : return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
906 0 : }
907 :
908 : static struct ttm_bo_driver radeon_bo_driver = {
909 : .ttm_tt_create = &radeon_ttm_tt_create,
910 : .ttm_tt_populate = &radeon_ttm_tt_populate,
911 : .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
912 : .invalidate_caches = &radeon_invalidate_caches,
913 : .init_mem_type = &radeon_init_mem_type,
914 : .evict_flags = &radeon_evict_flags,
915 : .move = &radeon_bo_move,
916 : .verify_access = &radeon_verify_access,
917 : .move_notify = &radeon_bo_move_notify,
918 : .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
919 : .io_mem_reserve = &radeon_ttm_io_mem_reserve,
920 : .io_mem_free = &radeon_ttm_io_mem_free,
921 : };
922 :
923 0 : int radeon_ttm_init(struct radeon_device *rdev)
924 : {
925 : int r;
926 :
927 0 : r = radeon_ttm_global_init(rdev);
928 0 : if (r) {
929 0 : return r;
930 : }
931 : /* No others user of address space so set it to 0 */
932 : #ifdef notyet
933 : r = ttm_bo_device_init(&rdev->mman.bdev,
934 : rdev->mman.bo_global_ref.ref.object,
935 : &radeon_bo_driver,
936 : rdev->ddev->anon_inode->i_mapping,
937 : DRM_FILE_PAGE_OFFSET,
938 : rdev->need_dma32);
939 : #else
940 0 : r = ttm_bo_device_init(&rdev->mman.bdev,
941 0 : rdev->mman.bo_global_ref.ref.object,
942 : &radeon_bo_driver,
943 : /*rdev->ddev->anon_inode->i_mapping*/ NULL,
944 : DRM_FILE_PAGE_OFFSET,
945 0 : rdev->need_dma32);
946 : #endif
947 0 : if (r) {
948 0 : DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
949 0 : return r;
950 : }
951 0 : rdev->mman.bdev.iot = rdev->iot;
952 0 : rdev->mman.bdev.memt = rdev->memt;
953 0 : rdev->mman.bdev.dmat = rdev->dmat;
954 0 : rdev->mman.initialized = true;
955 0 : r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
956 0 : rdev->mc.real_vram_size >> PAGE_SHIFT);
957 0 : if (r) {
958 0 : DRM_ERROR("Failed initializing VRAM heap.\n");
959 0 : return r;
960 : }
961 : /* Change the size here instead of the init above so only lpfn is affected */
962 0 : radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
963 :
964 : #ifdef __sparc64__
965 : r = radeon_bo_create(rdev, rdev->fb_offset, PAGE_SIZE, true,
966 : RADEON_GEM_DOMAIN_VRAM, 0, NULL,
967 : NULL, &rdev->stollen_vga_memory);
968 : #else
969 0 : r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
970 : RADEON_GEM_DOMAIN_VRAM, 0, NULL,
971 0 : NULL, &rdev->stollen_vga_memory);
972 : #endif
973 0 : if (r) {
974 0 : return r;
975 : }
976 0 : r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
977 0 : if (r)
978 0 : return r;
979 0 : r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
980 0 : radeon_bo_unreserve(rdev->stollen_vga_memory);
981 0 : if (r) {
982 0 : radeon_bo_unref(&rdev->stollen_vga_memory);
983 0 : return r;
984 : }
985 : DRM_INFO("radeon: %uM of VRAM memory ready\n",
986 : (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
987 0 : r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
988 0 : rdev->mc.gtt_size >> PAGE_SHIFT);
989 0 : if (r) {
990 0 : DRM_ERROR("Failed initializing GTT heap.\n");
991 0 : return r;
992 : }
993 : DRM_INFO("radeon: %uM of GTT memory ready.\n",
994 : (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
995 :
996 0 : r = radeon_ttm_debugfs_init(rdev);
997 0 : if (r) {
998 0 : DRM_ERROR("Failed to init debugfs\n");
999 0 : return r;
1000 : }
1001 0 : return 0;
1002 0 : }
1003 :
1004 0 : void radeon_ttm_fini(struct radeon_device *rdev)
1005 : {
1006 : int r;
1007 :
1008 0 : if (!rdev->mman.initialized)
1009 0 : return;
1010 0 : radeon_ttm_debugfs_fini(rdev);
1011 0 : if (rdev->stollen_vga_memory) {
1012 0 : r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
1013 0 : if (r == 0) {
1014 0 : radeon_bo_unpin(rdev->stollen_vga_memory);
1015 0 : radeon_bo_unreserve(rdev->stollen_vga_memory);
1016 0 : }
1017 0 : radeon_bo_unref(&rdev->stollen_vga_memory);
1018 0 : }
1019 0 : ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
1020 0 : ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
1021 0 : ttm_bo_device_release(&rdev->mman.bdev);
1022 0 : radeon_gart_fini(rdev);
1023 0 : radeon_ttm_global_fini(rdev);
1024 0 : rdev->mman.initialized = false;
1025 : DRM_INFO("radeon: ttm finalized\n");
1026 0 : }
1027 :
1028 : /* this should only be called at bootup or when userspace
1029 : * isn't running */
1030 0 : void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
1031 : {
1032 : struct ttm_mem_type_manager *man;
1033 :
1034 0 : if (!rdev->mman.initialized)
1035 0 : return;
1036 :
1037 0 : man = &rdev->mman.bdev.man[TTM_PL_VRAM];
1038 : /* this just adjusts TTM size idea, which sets lpfn to the correct value */
1039 0 : man->size = size >> PAGE_SHIFT;
1040 0 : }
1041 :
1042 : #ifdef __linux__
1043 : static struct vm_operations_struct radeon_ttm_vm_ops;
1044 : static const struct vm_operations_struct *ttm_vm_ops = NULL;
1045 :
1046 : static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1047 : {
1048 : struct ttm_buffer_object *bo;
1049 : struct radeon_device *rdev;
1050 : int r;
1051 :
1052 : bo = (struct ttm_buffer_object *)vma->vm_private_data;
1053 : if (bo == NULL) {
1054 : return VM_FAULT_NOPAGE;
1055 : }
1056 : rdev = radeon_get_rdev(bo->bdev);
1057 : down_read(&rdev->pm.mclk_lock);
1058 : r = ttm_vm_ops->fault(vma, vmf);
1059 : up_read(&rdev->pm.mclk_lock);
1060 : return r;
1061 : }
1062 :
1063 : int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
1064 : {
1065 : struct drm_file *file_priv;
1066 : struct radeon_device *rdev;
1067 : int r;
1068 :
1069 : if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
1070 : return -EINVAL;
1071 : }
1072 :
1073 : file_priv = filp->private_data;
1074 : rdev = file_priv->minor->dev->dev_private;
1075 : if (rdev == NULL) {
1076 : return -EINVAL;
1077 : }
1078 : r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
1079 : if (unlikely(r != 0)) {
1080 : return r;
1081 : }
1082 : if (unlikely(ttm_vm_ops == NULL)) {
1083 : ttm_vm_ops = vma->vm_ops;
1084 : radeon_ttm_vm_ops = *ttm_vm_ops;
1085 : radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
1086 : }
1087 : vma->vm_ops = &radeon_ttm_vm_ops;
1088 : return 0;
1089 : }
1090 : #else
1091 :
1092 : static struct uvm_pagerops radeon_ttm_vm_ops;
1093 : static const struct uvm_pagerops *ttm_vm_ops = NULL;
1094 :
1095 : static int
1096 0 : radeon_ttm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
1097 : int npages, int centeridx, vm_fault_t fault_type,
1098 : vm_prot_t access_type, int flags)
1099 : {
1100 : struct ttm_buffer_object *bo;
1101 : struct radeon_device *rdev;
1102 : int r;
1103 :
1104 0 : bo = (struct ttm_buffer_object *)ufi->entry->object.uvm_obj;
1105 0 : rdev = radeon_get_rdev(bo->bdev);
1106 0 : down_read(&rdev->pm.mclk_lock);
1107 0 : r = ttm_vm_ops->pgo_fault(ufi, vaddr, pps, npages, centeridx,
1108 : fault_type, access_type, flags);
1109 0 : up_read(&rdev->pm.mclk_lock);
1110 0 : return r;
1111 : }
1112 :
1113 : struct uvm_object *
1114 0 : radeon_mmap(struct drm_device *dev, voff_t off, vsize_t size)
1115 : {
1116 0 : struct radeon_device *rdev = dev->dev_private;
1117 : struct uvm_object *uobj;
1118 :
1119 0 : if (unlikely(off < DRM_FILE_PAGE_OFFSET))
1120 0 : return NULL;
1121 :
1122 : #if 0
1123 : file_priv = filp->private_data;
1124 : rdev = file_priv->minor->dev->dev_private;
1125 : if (rdev == NULL) {
1126 : return -EINVAL;
1127 : }
1128 : #endif
1129 0 : uobj = ttm_bo_mmap(off, size, &rdev->mman.bdev);
1130 0 : if (unlikely(uobj == NULL)) {
1131 0 : return NULL;
1132 : }
1133 0 : if (unlikely(ttm_vm_ops == NULL)) {
1134 0 : ttm_vm_ops = uobj->pgops;
1135 0 : radeon_ttm_vm_ops = *ttm_vm_ops;
1136 0 : radeon_ttm_vm_ops.pgo_fault = &radeon_ttm_fault;
1137 0 : }
1138 0 : uobj->pgops = &radeon_ttm_vm_ops;
1139 0 : return uobj;
1140 0 : }
1141 : #endif
1142 :
1143 : #if defined(CONFIG_DEBUG_FS)
1144 :
1145 : static int radeon_mm_dump_table(struct seq_file *m, void *data)
1146 : {
1147 : struct drm_info_node *node = (struct drm_info_node *)m->private;
1148 : unsigned ttm_pl = *(int *)node->info_ent->data;
1149 : struct drm_device *dev = node->minor->dev;
1150 : struct radeon_device *rdev = dev->dev_private;
1151 : struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
1152 : int ret;
1153 : struct ttm_bo_global *glob = rdev->mman.bdev.glob;
1154 :
1155 : spin_lock(&glob->lru_lock);
1156 : ret = drm_mm_dump_table(m, mm);
1157 : spin_unlock(&glob->lru_lock);
1158 : return ret;
1159 : }
1160 :
1161 : static int ttm_pl_vram = TTM_PL_VRAM;
1162 : static int ttm_pl_tt = TTM_PL_TT;
1163 :
1164 : static struct drm_info_list radeon_ttm_debugfs_list[] = {
1165 : {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
1166 : {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
1167 : {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1168 : #ifdef CONFIG_SWIOTLB
1169 : {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1170 : #endif
1171 : };
1172 :
1173 : static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
1174 : {
1175 : struct radeon_device *rdev = inode->i_private;
1176 : i_size_write(inode, rdev->mc.mc_vram_size);
1177 : filep->private_data = inode->i_private;
1178 : return 0;
1179 : }
1180 :
1181 : static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
1182 : size_t size, loff_t *pos)
1183 : {
1184 : struct radeon_device *rdev = f->private_data;
1185 : ssize_t result = 0;
1186 : int r;
1187 :
1188 : if (size & 0x3 || *pos & 0x3)
1189 : return -EINVAL;
1190 :
1191 : while (size) {
1192 : unsigned long flags;
1193 : uint32_t value;
1194 :
1195 : if (*pos >= rdev->mc.mc_vram_size)
1196 : return result;
1197 :
1198 : spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
1199 : WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
1200 : if (rdev->family >= CHIP_CEDAR)
1201 : WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
1202 : value = RREG32(RADEON_MM_DATA);
1203 : spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
1204 :
1205 : r = put_user(value, (uint32_t *)buf);
1206 : if (r)
1207 : return r;
1208 :
1209 : result += 4;
1210 : buf += 4;
1211 : *pos += 4;
1212 : size -= 4;
1213 : }
1214 :
1215 : return result;
1216 : }
1217 :
1218 : static const struct file_operations radeon_ttm_vram_fops = {
1219 : .owner = THIS_MODULE,
1220 : .open = radeon_ttm_vram_open,
1221 : .read = radeon_ttm_vram_read,
1222 : .llseek = default_llseek
1223 : };
1224 :
1225 : static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
1226 : {
1227 : struct radeon_device *rdev = inode->i_private;
1228 : i_size_write(inode, rdev->mc.gtt_size);
1229 : filep->private_data = inode->i_private;
1230 : return 0;
1231 : }
1232 :
1233 : static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
1234 : size_t size, loff_t *pos)
1235 : {
1236 : struct radeon_device *rdev = f->private_data;
1237 : ssize_t result = 0;
1238 : int r;
1239 :
1240 : while (size) {
1241 : loff_t p = *pos / PAGE_SIZE;
1242 : unsigned off = *pos & PAGE_MASK;
1243 : size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1244 : struct vm_page *page;
1245 : void *ptr;
1246 :
1247 : if (p >= rdev->gart.num_cpu_pages)
1248 : return result;
1249 :
1250 : page = rdev->gart.pages[p];
1251 : if (page) {
1252 : ptr = kmap(page);
1253 : ptr += off;
1254 :
1255 : r = copy_to_user(buf, ptr, cur_size);
1256 : kunmap(rdev->gart.pages[p]);
1257 : } else
1258 : r = clear_user(buf, cur_size);
1259 :
1260 : if (r)
1261 : return -EFAULT;
1262 :
1263 : result += cur_size;
1264 : buf += cur_size;
1265 : *pos += cur_size;
1266 : size -= cur_size;
1267 : }
1268 :
1269 : return result;
1270 : }
1271 :
1272 : static const struct file_operations radeon_ttm_gtt_fops = {
1273 : .owner = THIS_MODULE,
1274 : .open = radeon_ttm_gtt_open,
1275 : .read = radeon_ttm_gtt_read,
1276 : .llseek = default_llseek
1277 : };
1278 :
1279 : #endif
1280 :
1281 0 : static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
1282 : {
1283 : #if defined(CONFIG_DEBUG_FS)
1284 : unsigned count;
1285 :
1286 : struct drm_minor *minor = rdev->ddev->primary;
1287 : struct dentry *ent, *root = minor->debugfs_root;
1288 :
1289 : ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
1290 : rdev, &radeon_ttm_vram_fops);
1291 : if (IS_ERR(ent))
1292 : return PTR_ERR(ent);
1293 : rdev->mman.vram = ent;
1294 :
1295 : ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
1296 : rdev, &radeon_ttm_gtt_fops);
1297 : if (IS_ERR(ent))
1298 : return PTR_ERR(ent);
1299 : rdev->mman.gtt = ent;
1300 :
1301 : count = ARRAY_SIZE(radeon_ttm_debugfs_list);
1302 :
1303 : #ifdef CONFIG_SWIOTLB
1304 : if (!swiotlb_nr_tbl())
1305 : --count;
1306 : #endif
1307 :
1308 : return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
1309 : #else
1310 :
1311 0 : return 0;
1312 : #endif
1313 : }
1314 :
1315 0 : static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
1316 : {
1317 : #if defined(CONFIG_DEBUG_FS)
1318 :
1319 : debugfs_remove(rdev->mman.vram);
1320 : rdev->mman.vram = NULL;
1321 :
1322 : debugfs_remove(rdev->mman.gtt);
1323 : rdev->mman.gtt = NULL;
1324 : #endif
1325 0 : }
|