Line data Source code
1 : /* $OpenBSD: drm_gem.c,v 1.6 2018/06/25 22:29:16 kettenis Exp $ */
2 : /*
3 : * Copyright © 2008 Intel Corporation
4 : *
5 : * Permission is hereby granted, free of charge, to any person obtaining a
6 : * copy of this software and associated documentation files (the "Software"),
7 : * to deal in the Software without restriction, including without limitation
8 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 : * and/or sell copies of the Software, and to permit persons to whom the
10 : * Software is furnished to do so, subject to the following conditions:
11 : *
12 : * The above copyright notice and this permission notice (including the next
13 : * paragraph) shall be included in all copies or substantial portions of the
14 : * Software.
15 : *
16 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 : * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 : * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 : * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 : * IN THE SOFTWARE.
23 : *
24 : * Authors:
25 : * Eric Anholt <eric@anholt.net>
26 : *
27 : */
28 :
29 : #include <dev/pci/drm/drmP.h>
30 : #include <dev/pci/drm/drm_vma_manager.h>
31 : #include "drm_internal.h"
32 :
33 : #include <uvm/uvm.h>
34 :
35 : void drm_unref(struct uvm_object *);
36 : void drm_ref(struct uvm_object *);
37 : boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
38 : int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
39 : vm_fault_t, vm_prot_t, int);
40 :
41 : struct uvm_pagerops drm_pgops = {
42 : NULL,
43 : drm_ref,
44 : drm_unref,
45 : drm_fault,
46 : drm_flush,
47 : };
48 :
49 : void
50 0 : drm_ref(struct uvm_object *uobj)
51 : {
52 : struct drm_gem_object *obj =
53 0 : container_of(uobj, struct drm_gem_object, uobj);
54 :
55 0 : drm_gem_object_reference(obj);
56 0 : }
57 :
58 : void
59 0 : drm_unref(struct uvm_object *uobj)
60 : {
61 : struct drm_gem_object *obj =
62 0 : container_of(uobj, struct drm_gem_object, uobj);
63 :
64 0 : drm_gem_object_unreference_unlocked(obj);
65 0 : }
66 :
67 : int
68 0 : drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
69 : int npages, int centeridx, vm_fault_t fault_type,
70 : vm_prot_t access_type, int flags)
71 : {
72 0 : struct vm_map_entry *entry = ufi->entry;
73 0 : struct uvm_object *uobj = entry->object.uvm_obj;
74 : struct drm_gem_object *obj =
75 0 : container_of(uobj, struct drm_gem_object, uobj);
76 0 : struct drm_device *dev = obj->dev;
77 : int ret;
78 :
79 : /*
80 : * we do not allow device mappings to be mapped copy-on-write
81 : * so we kill any attempt to do so here.
82 : */
83 :
84 0 : if (UVM_ET_ISCOPYONWRITE(entry)) {
85 0 : uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
86 0 : return(VM_PAGER_ERROR);
87 : }
88 :
89 : /*
90 : * We could end up here as the result of a copyin(9) or
91 : * copyout(9) while handling an ioctl. So we must be careful
92 : * not to deadlock. Therefore we only block if the quiesce
93 : * count is zero, which guarantees we didn't enter from within
94 : * an ioctl code path.
95 : */
96 0 : mtx_enter(&dev->quiesce_mtx);
97 0 : if (dev->quiesce && dev->quiesce_count == 0) {
98 0 : mtx_leave(&dev->quiesce_mtx);
99 0 : uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
100 0 : mtx_enter(&dev->quiesce_mtx);
101 0 : while (dev->quiesce) {
102 0 : msleep(&dev->quiesce, &dev->quiesce_mtx,
103 : PZERO, "drmflt", 0);
104 : }
105 0 : mtx_leave(&dev->quiesce_mtx);
106 0 : return(VM_PAGER_REFAULT);
107 : }
108 0 : dev->quiesce_count++;
109 0 : mtx_leave(&dev->quiesce_mtx);
110 :
111 : /* Call down into driver to do the magic */
112 0 : ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
113 0 : entry->start), vaddr, pps, npages, centeridx,
114 : access_type, flags);
115 :
116 0 : mtx_enter(&dev->quiesce_mtx);
117 0 : dev->quiesce_count--;
118 0 : if (dev->quiesce)
119 0 : wakeup(&dev->quiesce_count);
120 0 : mtx_leave(&dev->quiesce_mtx);
121 :
122 0 : return (ret);
123 0 : }
124 :
125 : boolean_t
126 0 : drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
127 : {
128 0 : return (TRUE);
129 : }
130 :
131 : struct uvm_object *
132 0 : udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
133 : {
134 0 : struct drm_device *dev = drm_get_device_from_kdev(device);
135 : struct drm_gem_object *obj;
136 : struct drm_vma_offset_node *node;
137 : struct drm_file *priv;
138 : struct file *filp;
139 :
140 0 : if (cdevsw[major(device)].d_mmap != drmmmap)
141 0 : return NULL;
142 :
143 0 : if (dev == NULL)
144 0 : return NULL;
145 :
146 0 : if (dev->driver->mmap)
147 0 : return dev->driver->mmap(dev, off, size);
148 :
149 0 : mutex_lock(&dev->struct_mutex);
150 :
151 0 : priv = drm_find_file_by_minor(dev, minor(device));
152 0 : if (priv == 0) {
153 0 : mutex_unlock(&dev->struct_mutex);
154 0 : return NULL;
155 : }
156 0 : filp = priv->filp;
157 :
158 0 : node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
159 0 : off >> PAGE_SHIFT,
160 0 : atop(round_page(size)));
161 0 : if (!node) {
162 0 : mutex_unlock(&dev->struct_mutex);
163 0 : return NULL;
164 0 : } else if (!drm_vma_node_is_allowed(node, filp)) {
165 0 : mutex_unlock(&dev->struct_mutex);
166 0 : return NULL;
167 : }
168 :
169 0 : obj = container_of(node, struct drm_gem_object, vma_node);
170 0 : drm_gem_object_reference(obj);
171 :
172 0 : mutex_unlock(&dev->struct_mutex);
173 0 : return &obj->uobj;
174 0 : }
175 :
176 : /** @file drm_gem.c
177 : *
178 : * This file provides some of the base ioctls and library routines for
179 : * the graphics memory manager implemented by each device driver.
180 : *
181 : * Because various devices have different requirements in terms of
182 : * synchronization and migration strategies, implementing that is left up to
183 : * the driver, and all that the general API provides should be generic --
184 : * allocating objects, reading/writing data with the cpu, freeing objects.
185 : * Even there, platform-dependent optimizations for reading/writing data with
186 : * the CPU mean we'll likely hook those out to driver-specific calls. However,
187 : * the DRI2 implementation wants to have at least allocate/mmap be generic.
188 : *
189 : * The goal was to have swap-backed object allocation managed through
190 : * struct file. However, file descriptors as handles to a struct file have
191 : * two major failings:
192 : * - Process limits prevent more than 1024 or so being used at a time by
193 : * default.
194 : * - Inability to allocate high fds will aggravate the X Server's select()
195 : * handling, and likely that of many GL client applications as well.
196 : *
197 : * This led to a plan of using our own integer IDs (called handles, following
198 : * DRM terminology) to mimic fds, and implement the fd syscalls we need as
199 : * ioctls. The objects themselves will still include the struct file so
200 : * that we can transition to fds if the required kernel infrastructure shows
201 : * up at a later date, and as our interface with shmfs for memory allocation.
202 : */
203 :
204 : /*
205 : * We make up offsets for buffer objects so we can recognize them at
206 : * mmap time.
207 : */
208 :
209 : /* pgoff in mmap is an unsigned long, so we need to make sure that
210 : * the faked up offset will fit
211 : */
212 :
213 : #if BITS_PER_LONG == 64
214 : #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
215 : #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
216 : #else
217 : #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
218 : #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
219 : #endif
220 :
221 : /**
222 : * Initialize the GEM device fields
223 : */
224 :
225 : int
226 0 : drm_gem_init(struct drm_device *dev)
227 : {
228 : struct drm_vma_offset_manager *vma_offset_manager;
229 :
230 0 : rw_init(&dev->object_name_lock, "drmonl");
231 0 : idr_init(&dev->object_name_idr);
232 :
233 0 : vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
234 0 : if (!vma_offset_manager) {
235 0 : DRM_ERROR("out of memory\n");
236 0 : return -ENOMEM;
237 : }
238 :
239 0 : dev->vma_offset_manager = vma_offset_manager;
240 0 : drm_vma_offset_manager_init(vma_offset_manager,
241 : DRM_FILE_PAGE_OFFSET_START,
242 : DRM_FILE_PAGE_OFFSET_SIZE);
243 :
244 0 : return 0;
245 0 : }
246 :
247 : void
248 0 : drm_gem_destroy(struct drm_device *dev)
249 : {
250 :
251 0 : drm_vma_offset_manager_destroy(dev->vma_offset_manager);
252 0 : kfree(dev->vma_offset_manager);
253 0 : dev->vma_offset_manager = NULL;
254 0 : }
255 :
256 : #ifdef __linux__
257 :
258 : /**
259 : * Initialize an already allocated GEM object of the specified size with
260 : * shmfs backing store.
261 : */
262 : int drm_gem_object_init(struct drm_device *dev,
263 : struct drm_gem_object *obj, size_t size)
264 : {
265 : struct file *filp;
266 :
267 : drm_gem_private_object_init(dev, obj, size);
268 :
269 : filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
270 : if (IS_ERR(filp))
271 : return PTR_ERR(filp);
272 :
273 : obj->filp = filp;
274 :
275 : return 0;
276 : }
277 : EXPORT_SYMBOL(drm_gem_object_init);
278 :
279 : #else
280 :
281 0 : int drm_gem_object_init(struct drm_device *dev,
282 : struct drm_gem_object *obj, size_t size)
283 : {
284 0 : drm_gem_private_object_init(dev, obj, size);
285 :
286 0 : obj->uao = uao_create(size, 0);
287 0 : uvm_objinit(&obj->uobj, &drm_pgops, 1);
288 :
289 0 : atomic_inc(&dev->obj_count);
290 0 : atomic_add(obj->size, &dev->obj_memory);
291 :
292 0 : obj->filp = (void *)obj->uao;
293 :
294 0 : return 0;
295 : }
296 :
297 : #endif
298 :
299 : /**
300 : * Initialize an already allocated GEM object of the specified size with
301 : * no GEM provided backing store. Instead the caller is responsible for
302 : * backing the object and handling it.
303 : */
304 0 : void drm_gem_private_object_init(struct drm_device *dev,
305 : struct drm_gem_object *obj, size_t size)
306 : {
307 0 : BUG_ON((size & (PAGE_SIZE - 1)) != 0);
308 :
309 0 : obj->dev = dev;
310 0 : obj->filp = NULL;
311 :
312 0 : kref_init(&obj->refcount);
313 0 : obj->handle_count = 0;
314 0 : obj->size = size;
315 0 : drm_vma_node_reset(&obj->vma_node);
316 0 : }
317 : EXPORT_SYMBOL(drm_gem_private_object_init);
318 :
319 : static void
320 0 : drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
321 : {
322 : /*
323 : * Note: obj->dma_buf can't disappear as long as we still hold a
324 : * handle reference in obj->handle_count.
325 : */
326 0 : mutex_lock(&filp->prime.lock);
327 0 : if (obj->dma_buf) {
328 0 : drm_prime_remove_buf_handle_locked(&filp->prime,
329 : obj->dma_buf);
330 0 : }
331 0 : mutex_unlock(&filp->prime.lock);
332 0 : }
333 :
334 : /**
335 : * Called after the last handle to the object has been closed
336 : *
337 : * Removes any name for the object. Note that this must be
338 : * called before drm_gem_object_free or we'll be touching
339 : * freed memory
340 : */
341 0 : static void drm_gem_object_handle_free(struct drm_gem_object *obj)
342 : {
343 0 : struct drm_device *dev = obj->dev;
344 :
345 : /* Remove any name for this object */
346 0 : if (obj->name) {
347 0 : idr_remove(&dev->object_name_idr, obj->name);
348 0 : obj->name = 0;
349 0 : }
350 0 : }
351 :
352 0 : static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
353 : {
354 : /* Unbreak the reference cycle if we have an exported dma_buf. */
355 0 : if (obj->dma_buf) {
356 0 : dma_buf_put(obj->dma_buf);
357 0 : obj->dma_buf = NULL;
358 0 : }
359 0 : }
360 :
361 : static void
362 0 : drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
363 : {
364 0 : if (WARN_ON(obj->handle_count == 0))
365 : return;
366 :
367 : /*
368 : * Must bump handle count first as this may be the last
369 : * ref, in which case the object would disappear before we
370 : * checked for a name
371 : */
372 :
373 0 : mutex_lock(&obj->dev->object_name_lock);
374 0 : if (--obj->handle_count == 0) {
375 0 : drm_gem_object_handle_free(obj);
376 0 : drm_gem_object_exported_dma_buf_free(obj);
377 0 : }
378 0 : mutex_unlock(&obj->dev->object_name_lock);
379 :
380 0 : drm_gem_object_unreference_unlocked(obj);
381 0 : }
382 :
383 : /**
384 : * Removes the mapping from handle to filp for this object.
385 : */
386 : int
387 0 : drm_gem_handle_delete(struct drm_file *filp, u32 handle)
388 : {
389 : struct drm_device *dev;
390 : struct drm_gem_object *obj;
391 :
392 : /* This is gross. The idr system doesn't let us try a delete and
393 : * return an error code. It just spews if you fail at deleting.
394 : * So, we have to grab a lock around finding the object and then
395 : * doing the delete on it and dropping the refcount, or the user
396 : * could race us to double-decrement the refcount and cause a
397 : * use-after-free later. Given the frequency of our handle lookups,
398 : * we may want to use ida for number allocation and a hash table
399 : * for the pointers, anyway.
400 : */
401 0 : spin_lock(&filp->table_lock);
402 :
403 : /* Check if we currently have a reference on the object */
404 0 : obj = idr_find(&filp->object_idr, handle);
405 0 : if (obj == NULL) {
406 0 : spin_unlock(&filp->table_lock);
407 0 : return -EINVAL;
408 : }
409 0 : dev = obj->dev;
410 :
411 : /* Release reference and decrement refcount. */
412 0 : idr_remove(&filp->object_idr, handle);
413 0 : spin_unlock(&filp->table_lock);
414 :
415 0 : if (drm_core_check_feature(dev, DRIVER_PRIME))
416 0 : drm_gem_remove_prime_handles(obj, filp);
417 0 : drm_vma_node_revoke(&obj->vma_node, filp->filp);
418 :
419 0 : if (dev->driver->gem_close_object)
420 0 : dev->driver->gem_close_object(obj, filp);
421 0 : drm_gem_object_handle_unreference_unlocked(obj);
422 :
423 0 : return 0;
424 0 : }
425 : EXPORT_SYMBOL(drm_gem_handle_delete);
426 :
427 : /**
428 : * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
429 : *
430 : * This implements the ->dumb_destroy kms driver callback for drivers which use
431 : * gem to manage their backing storage.
432 : */
433 0 : int drm_gem_dumb_destroy(struct drm_file *file,
434 : struct drm_device *dev,
435 : uint32_t handle)
436 : {
437 0 : return drm_gem_handle_delete(file, handle);
438 : }
439 : EXPORT_SYMBOL(drm_gem_dumb_destroy);
440 :
441 : /**
442 : * drm_gem_handle_create_tail - internal functions to create a handle
443 : *
444 : * This expects the dev->object_name_lock to be held already and will drop it
445 : * before returning. Used to avoid races in establishing new handles when
446 : * importing an object from either an flink name or a dma-buf.
447 : */
448 : int
449 0 : drm_gem_handle_create_tail(struct drm_file *file_priv,
450 : struct drm_gem_object *obj,
451 : u32 *handlep)
452 : {
453 0 : struct drm_device *dev = obj->dev;
454 : int ret;
455 :
456 0 : WARN_ON(!mutex_is_locked(&dev->object_name_lock));
457 :
458 : /*
459 : * Get the user-visible handle using idr. Preload and perform
460 : * allocation under our spinlock.
461 : */
462 0 : idr_preload(GFP_KERNEL);
463 0 : spin_lock(&file_priv->table_lock);
464 :
465 0 : ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
466 0 : drm_gem_object_reference(obj);
467 0 : obj->handle_count++;
468 0 : spin_unlock(&file_priv->table_lock);
469 : idr_preload_end();
470 0 : mutex_unlock(&dev->object_name_lock);
471 0 : if (ret < 0)
472 : goto err_unref;
473 :
474 0 : *handlep = ret;
475 :
476 0 : ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
477 0 : if (ret)
478 : goto err_remove;
479 :
480 0 : if (dev->driver->gem_open_object) {
481 0 : ret = dev->driver->gem_open_object(obj, file_priv);
482 0 : if (ret)
483 : goto err_revoke;
484 : }
485 :
486 0 : return 0;
487 :
488 : err_revoke:
489 0 : drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
490 : err_remove:
491 0 : spin_lock(&file_priv->table_lock);
492 0 : idr_remove(&file_priv->object_idr, *handlep);
493 0 : spin_unlock(&file_priv->table_lock);
494 : err_unref:
495 0 : drm_gem_object_handle_unreference_unlocked(obj);
496 0 : return ret;
497 0 : }
498 :
499 : /**
500 : * Create a handle for this object. This adds a handle reference
501 : * to the object, which includes a regular reference count. Callers
502 : * will likely want to dereference the object afterwards.
503 : */
504 : int
505 0 : drm_gem_handle_create(struct drm_file *file_priv,
506 : struct drm_gem_object *obj,
507 : u32 *handlep)
508 : {
509 0 : mutex_lock(&obj->dev->object_name_lock);
510 :
511 0 : return drm_gem_handle_create_tail(file_priv, obj, handlep);
512 : }
513 : EXPORT_SYMBOL(drm_gem_handle_create);
514 :
515 :
516 : /**
517 : * drm_gem_free_mmap_offset - release a fake mmap offset for an object
518 : * @obj: obj in question
519 : *
520 : * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
521 : */
522 : void
523 0 : drm_gem_free_mmap_offset(struct drm_gem_object *obj)
524 : {
525 0 : struct drm_device *dev = obj->dev;
526 :
527 0 : drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
528 0 : }
529 : EXPORT_SYMBOL(drm_gem_free_mmap_offset);
530 :
531 : /**
532 : * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
533 : * @obj: obj in question
534 : * @size: the virtual size
535 : *
536 : * GEM memory mapping works by handing back to userspace a fake mmap offset
537 : * it can use in a subsequent mmap(2) call. The DRM core code then looks
538 : * up the object based on the offset and sets up the various memory mapping
539 : * structures.
540 : *
541 : * This routine allocates and attaches a fake offset for @obj, in cases where
542 : * the virtual size differs from the physical size (ie. obj->size). Otherwise
543 : * just use drm_gem_create_mmap_offset().
544 : */
545 : int
546 0 : drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
547 : {
548 0 : struct drm_device *dev = obj->dev;
549 :
550 0 : return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
551 0 : size / PAGE_SIZE);
552 : }
553 : EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
554 :
555 : /**
556 : * drm_gem_create_mmap_offset - create a fake mmap offset for an object
557 : * @obj: obj in question
558 : *
559 : * GEM memory mapping works by handing back to userspace a fake mmap offset
560 : * it can use in a subsequent mmap(2) call. The DRM core code then looks
561 : * up the object based on the offset and sets up the various memory mapping
562 : * structures.
563 : *
564 : * This routine allocates and attaches a fake offset for @obj.
565 : */
566 0 : int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
567 : {
568 0 : return drm_gem_create_mmap_offset_size(obj, obj->size);
569 : }
570 : EXPORT_SYMBOL(drm_gem_create_mmap_offset);
571 :
572 : #ifdef __linux__
573 :
574 : /**
575 : * drm_gem_get_pages - helper to allocate backing pages for a GEM object
576 : * from shmem
577 : * @obj: obj in question
578 : * @gfpmask: gfp mask of requested pages
579 : */
580 : struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
581 : {
582 : struct inode *inode;
583 : struct address_space *mapping;
584 : struct page *p, **pages;
585 : int i, npages;
586 :
587 : /* This is the shared memory object that backs the GEM resource */
588 : inode = file_inode(obj->filp);
589 : mapping = inode->i_mapping;
590 :
591 : /* We already BUG_ON() for non-page-aligned sizes in
592 : * drm_gem_object_init(), so we should never hit this unless
593 : * driver author is doing something really wrong:
594 : */
595 : WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
596 :
597 : npages = obj->size >> PAGE_SHIFT;
598 :
599 : pages = drm_malloc_ab(npages, sizeof(struct page *));
600 : if (pages == NULL)
601 : return ERR_PTR(-ENOMEM);
602 :
603 : gfpmask |= mapping_gfp_mask(mapping);
604 :
605 : for (i = 0; i < npages; i++) {
606 : p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
607 : if (IS_ERR(p))
608 : goto fail;
609 : pages[i] = p;
610 :
611 : /* There is a hypothetical issue w/ drivers that require
612 : * buffer memory in the low 4GB.. if the pages are un-
613 : * pinned, and swapped out, they can end up swapped back
614 : * in above 4GB. If pages are already in memory, then
615 : * shmem_read_mapping_page_gfp will ignore the gfpmask,
616 : * even if the already in-memory page disobeys the mask.
617 : *
618 : * It is only a theoretical issue today, because none of
619 : * the devices with this limitation can be populated with
620 : * enough memory to trigger the issue. But this BUG_ON()
621 : * is here as a reminder in case the problem with
622 : * shmem_read_mapping_page_gfp() isn't solved by the time
623 : * it does become a real issue.
624 : *
625 : * See this thread: http://lkml.org/lkml/2011/7/11/238
626 : */
627 : BUG_ON((gfpmask & __GFP_DMA32) &&
628 : (page_to_pfn(p) >= 0x00100000UL));
629 : }
630 :
631 : return pages;
632 :
633 : fail:
634 : while (i--)
635 : page_cache_release(pages[i]);
636 :
637 : drm_free_large(pages);
638 : return ERR_CAST(p);
639 : }
640 : EXPORT_SYMBOL(drm_gem_get_pages);
641 :
642 : /**
643 : * drm_gem_put_pages - helper to free backing pages for a GEM object
644 : * @obj: obj in question
645 : * @pages: pages to free
646 : * @dirty: if true, pages will be marked as dirty
647 : * @accessed: if true, the pages will be marked as accessed
648 : */
649 : void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
650 : bool dirty, bool accessed)
651 : {
652 : int i, npages;
653 :
654 : /* We already BUG_ON() for non-page-aligned sizes in
655 : * drm_gem_object_init(), so we should never hit this unless
656 : * driver author is doing something really wrong:
657 : */
658 : WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
659 :
660 : npages = obj->size >> PAGE_SHIFT;
661 :
662 : for (i = 0; i < npages; i++) {
663 : if (dirty)
664 : set_page_dirty(pages[i]);
665 :
666 : if (accessed)
667 : mark_page_accessed(pages[i]);
668 :
669 : /* Undo the reference we took when populating the table */
670 : page_cache_release(pages[i]);
671 : }
672 :
673 : drm_free_large(pages);
674 : }
675 : EXPORT_SYMBOL(drm_gem_put_pages);
676 :
677 : #endif
678 :
679 : /** Returns a reference to the object named by the handle. */
680 : struct drm_gem_object *
681 0 : drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
682 : u32 handle)
683 : {
684 : struct drm_gem_object *obj;
685 :
686 0 : spin_lock(&filp->table_lock);
687 :
688 : /* Check if we currently have a reference on the object */
689 0 : obj = idr_find(&filp->object_idr, handle);
690 0 : if (obj == NULL) {
691 0 : spin_unlock(&filp->table_lock);
692 0 : return NULL;
693 : }
694 :
695 0 : drm_gem_object_reference(obj);
696 :
697 0 : spin_unlock(&filp->table_lock);
698 :
699 0 : return obj;
700 0 : }
701 : EXPORT_SYMBOL(drm_gem_object_lookup);
702 :
703 : /**
704 : * Releases the handle to an mm object.
705 : */
706 : int
707 0 : drm_gem_close_ioctl(struct drm_device *dev, void *data,
708 : struct drm_file *file_priv)
709 : {
710 0 : struct drm_gem_close *args = data;
711 : int ret;
712 :
713 0 : if (!(dev->driver->driver_features & DRIVER_GEM))
714 0 : return -ENODEV;
715 :
716 0 : ret = drm_gem_handle_delete(file_priv, args->handle);
717 :
718 0 : return ret;
719 0 : }
720 :
721 : /**
722 : * Create a global name for an object, returning the name.
723 : *
724 : * Note that the name does not hold a reference; when the object
725 : * is freed, the name goes away.
726 : */
727 : int
728 0 : drm_gem_flink_ioctl(struct drm_device *dev, void *data,
729 : struct drm_file *file_priv)
730 : {
731 0 : struct drm_gem_flink *args = data;
732 : struct drm_gem_object *obj;
733 : int ret;
734 :
735 0 : if (!(dev->driver->driver_features & DRIVER_GEM))
736 0 : return -ENODEV;
737 :
738 0 : obj = drm_gem_object_lookup(dev, file_priv, args->handle);
739 0 : if (obj == NULL)
740 0 : return -ENOENT;
741 :
742 0 : mutex_lock(&dev->object_name_lock);
743 0 : idr_preload(GFP_KERNEL);
744 : /* prevent races with concurrent gem_close. */
745 0 : if (obj->handle_count == 0) {
746 : ret = -ENOENT;
747 0 : goto err;
748 : }
749 :
750 0 : if (!obj->name) {
751 0 : ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
752 0 : if (ret < 0)
753 : goto err;
754 :
755 0 : obj->name = ret;
756 0 : }
757 :
758 0 : args->name = (uint64_t) obj->name;
759 0 : ret = 0;
760 :
761 : err:
762 : idr_preload_end();
763 0 : mutex_unlock(&dev->object_name_lock);
764 0 : drm_gem_object_unreference_unlocked(obj);
765 0 : return ret;
766 0 : }
767 :
768 : /**
769 : * Open an object using the global name, returning a handle and the size.
770 : *
771 : * This handle (of course) holds a reference to the object, so the object
772 : * will not go away until the handle is deleted.
773 : */
774 : int
775 0 : drm_gem_open_ioctl(struct drm_device *dev, void *data,
776 : struct drm_file *file_priv)
777 : {
778 0 : struct drm_gem_open *args = data;
779 : struct drm_gem_object *obj;
780 : int ret;
781 0 : u32 handle;
782 :
783 0 : if (!(dev->driver->driver_features & DRIVER_GEM))
784 0 : return -ENODEV;
785 :
786 0 : mutex_lock(&dev->object_name_lock);
787 0 : obj = idr_find(&dev->object_name_idr, (int) args->name);
788 0 : if (obj) {
789 0 : drm_gem_object_reference(obj);
790 : } else {
791 0 : mutex_unlock(&dev->object_name_lock);
792 0 : return -ENOENT;
793 : }
794 :
795 : /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
796 0 : ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
797 0 : drm_gem_object_unreference_unlocked(obj);
798 0 : if (ret)
799 0 : return ret;
800 :
801 0 : args->handle = handle;
802 0 : args->size = obj->size;
803 :
804 0 : return 0;
805 0 : }
806 :
807 : /**
808 : * Called at device open time, sets up the structure for handling refcounting
809 : * of mm objects.
810 : */
811 : void
812 0 : drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
813 : {
814 0 : idr_init(&file_private->object_idr);
815 0 : mtx_init(&file_private->table_lock, IPL_NONE);
816 0 : }
817 :
818 : /**
819 : * Called at device close to release the file's
820 : * handle references on objects.
821 : */
822 : static int
823 0 : drm_gem_object_release_handle(int id, void *ptr, void *data)
824 : {
825 0 : struct drm_file *file_priv = data;
826 0 : struct drm_gem_object *obj = ptr;
827 0 : struct drm_device *dev = obj->dev;
828 :
829 0 : if (dev->driver->gem_close_object)
830 0 : dev->driver->gem_close_object(obj, file_priv);
831 :
832 0 : if (drm_core_check_feature(dev, DRIVER_PRIME))
833 0 : drm_gem_remove_prime_handles(obj, file_priv);
834 0 : drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
835 :
836 0 : drm_gem_object_handle_unreference_unlocked(obj);
837 :
838 0 : return 0;
839 : }
840 :
841 : /**
842 : * Called at close time when the filp is going away.
843 : *
844 : * Releases any remaining references on objects by this filp.
845 : */
846 : void
847 0 : drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
848 : {
849 0 : idr_for_each(&file_private->object_idr,
850 0 : &drm_gem_object_release_handle, file_private);
851 0 : idr_destroy(&file_private->object_idr);
852 0 : }
853 :
854 : #ifdef __linux__
855 :
856 : void
857 : drm_gem_object_release(struct drm_gem_object *obj)
858 : {
859 : WARN_ON(obj->dma_buf);
860 :
861 : if (obj->filp)
862 : fput(obj->filp);
863 : }
864 : EXPORT_SYMBOL(drm_gem_object_release);
865 :
866 : #else
867 :
868 : void
869 0 : drm_gem_object_release(struct drm_gem_object *obj)
870 : {
871 0 : struct drm_device *dev = obj->dev;
872 :
873 0 : WARN_ON(obj->dma_buf);
874 :
875 0 : if (obj->uao)
876 0 : uao_detach(obj->uao);
877 :
878 0 : atomic_dec(&dev->obj_count);
879 0 : atomic_sub(obj->size, &dev->obj_memory);
880 0 : }
881 :
882 : #endif
883 :
884 : /**
885 : * Called after the last reference to the object has been lost.
886 : * Must be called holding struct_ mutex
887 : *
888 : * Frees the object
889 : */
890 : void
891 0 : drm_gem_object_free(struct kref *kref)
892 : {
893 0 : struct drm_gem_object *obj = (struct drm_gem_object *) kref;
894 0 : struct drm_device *dev = obj->dev;
895 :
896 0 : BUG_ON(!mutex_is_locked(&dev->struct_mutex));
897 :
898 0 : if (dev->driver->gem_free_object != NULL)
899 0 : dev->driver->gem_free_object(obj);
900 0 : }
901 : EXPORT_SYMBOL(drm_gem_object_free);
902 :
903 : #ifdef __linux__
904 :
905 : void drm_gem_vm_open(struct vm_area_struct *vma)
906 : {
907 : struct drm_gem_object *obj = vma->vm_private_data;
908 :
909 : drm_gem_object_reference(obj);
910 :
911 : mutex_lock(&obj->dev->struct_mutex);
912 : drm_vm_open_locked(obj->dev, vma);
913 : mutex_unlock(&obj->dev->struct_mutex);
914 : }
915 : EXPORT_SYMBOL(drm_gem_vm_open);
916 :
917 : void drm_gem_vm_close(struct vm_area_struct *vma)
918 : {
919 : struct drm_gem_object *obj = vma->vm_private_data;
920 : struct drm_device *dev = obj->dev;
921 :
922 : mutex_lock(&dev->struct_mutex);
923 : drm_vm_close_locked(obj->dev, vma);
924 : drm_gem_object_unreference(obj);
925 : mutex_unlock(&dev->struct_mutex);
926 : }
927 : EXPORT_SYMBOL(drm_gem_vm_close);
928 :
929 : /**
930 : * drm_gem_mmap_obj - memory map a GEM object
931 : * @obj: the GEM object to map
932 : * @obj_size: the object size to be mapped, in bytes
933 : * @vma: VMA for the area to be mapped
934 : *
935 : * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
936 : * provided by the driver. Depending on their requirements, drivers can either
937 : * provide a fault handler in their gem_vm_ops (in which case any accesses to
938 : * the object will be trapped, to perform migration, GTT binding, surface
939 : * register allocation, or performance monitoring), or mmap the buffer memory
940 : * synchronously after calling drm_gem_mmap_obj.
941 : *
942 : * This function is mainly intended to implement the DMABUF mmap operation, when
943 : * the GEM object is not looked up based on its fake offset. To implement the
944 : * DRM mmap operation, drivers should use the drm_gem_mmap() function.
945 : *
946 : * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
947 : * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
948 : * callers must verify access restrictions before calling this helper.
949 : *
950 : * NOTE: This function has to be protected with dev->struct_mutex
951 : *
952 : * Return 0 or success or -EINVAL if the object size is smaller than the VMA
953 : * size, or if no gem_vm_ops are provided.
954 : */
955 : int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
956 : struct vm_area_struct *vma)
957 : {
958 : struct drm_device *dev = obj->dev;
959 :
960 : lockdep_assert_held(&dev->struct_mutex);
961 :
962 : /* Check for valid size. */
963 : if (obj_size < vma->vm_end - vma->vm_start)
964 : return -EINVAL;
965 :
966 : if (!dev->driver->gem_vm_ops)
967 : return -EINVAL;
968 :
969 : vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
970 : vma->vm_ops = dev->driver->gem_vm_ops;
971 : vma->vm_private_data = obj;
972 : vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
973 :
974 : /* Take a ref for this mapping of the object, so that the fault
975 : * handler can dereference the mmap offset's pointer to the object.
976 : * This reference is cleaned up by the corresponding vm_close
977 : * (which should happen whether the vma was created by this call, or
978 : * by a vm_open due to mremap or partial unmap or whatever).
979 : */
980 : drm_gem_object_reference(obj);
981 :
982 : drm_vm_open_locked(dev, vma);
983 : return 0;
984 : }
985 : EXPORT_SYMBOL(drm_gem_mmap_obj);
986 :
987 : /**
988 : * drm_gem_mmap - memory map routine for GEM objects
989 : * @filp: DRM file pointer
990 : * @vma: VMA for the area to be mapped
991 : *
992 : * If a driver supports GEM object mapping, mmap calls on the DRM file
993 : * descriptor will end up here.
994 : *
995 : * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
996 : * contain the fake offset we created when the GTT map ioctl was called on
997 : * the object) and map it with a call to drm_gem_mmap_obj().
998 : *
999 : * If the caller is not granted access to the buffer object, the mmap will fail
1000 : * with EACCES. Please see the vma manager for more information.
1001 : */
1002 : int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1003 : {
1004 : struct drm_file *priv = filp->private_data;
1005 : struct drm_device *dev = priv->minor->dev;
1006 : struct drm_gem_object *obj;
1007 : struct drm_vma_offset_node *node;
1008 : int ret = 0;
1009 :
1010 : if (drm_device_is_unplugged(dev))
1011 : return -ENODEV;
1012 :
1013 : mutex_lock(&dev->struct_mutex);
1014 :
1015 : node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
1016 : vma->vm_pgoff,
1017 : vma_pages(vma));
1018 : if (!node) {
1019 : mutex_unlock(&dev->struct_mutex);
1020 : return drm_mmap(filp, vma);
1021 : } else if (!drm_vma_node_is_allowed(node, filp)) {
1022 : mutex_unlock(&dev->struct_mutex);
1023 : return -EACCES;
1024 : }
1025 :
1026 : obj = container_of(node, struct drm_gem_object, vma_node);
1027 : ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
1028 :
1029 : mutex_unlock(&dev->struct_mutex);
1030 :
1031 : return ret;
1032 : }
1033 : EXPORT_SYMBOL(drm_gem_mmap);
1034 :
1035 : #endif
|