Line data Source code
1 : /*
2 : * Copyright © 2012 Red Hat
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice (including the next
12 : * paragraph) shall be included in all copies or substantial portions of the
13 : * Software.
14 : *
15 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 : * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 : * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 : * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 : * IN THE SOFTWARE.
22 : *
23 : * Authors:
24 : * Dave Airlie <airlied@redhat.com>
25 : * Rob Clark <rob.clark@linaro.org>
26 : *
27 : */
28 :
29 : #ifdef __linux__
30 : #include <linux/export.h>
31 : #include <linux/dma-buf.h>
32 : #endif
33 : #include <dev/pci/drm/drmP.h>
34 : #ifdef notyet
35 : #include <drm/drm_gem.h>
36 : #endif
37 :
38 : #include "drm_internal.h"
39 :
40 : /*
41 : * DMA-BUF/GEM Object references and lifetime overview:
42 : *
43 : * On the export the dma_buf holds a reference to the exporting GEM
44 : * object. It takes this reference in handle_to_fd_ioctl, when it
45 : * first calls .prime_export and stores the exporting GEM object in
46 : * the dma_buf priv. This reference is released when the dma_buf
47 : * object goes away in the driver .release function.
48 : *
49 : * On the import the importing GEM object holds a reference to the
50 : * dma_buf (which in turn holds a ref to the exporting GEM object).
51 : * It takes that reference in the fd_to_handle ioctl.
52 : * It calls dma_buf_get, creates an attachment to it and stores the
53 : * attachment in the GEM object. When this attachment is destroyed
54 : * when the imported object is destroyed, we remove the attachment
55 : * and drop the reference to the dma_buf.
56 : *
57 : * Thus the chain of references always flows in one direction
58 : * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
59 : *
60 : * Self-importing: if userspace is using PRIME as a replacement for flink
61 : * then it will get a fd->handle request for a GEM object that it created.
62 : * Drivers should detect this situation and return back the gem object
63 : * from the dma-buf private. Prime will do this automatically for drivers that
64 : * use the drm_gem_prime_{import,export} helpers.
65 : */
66 :
67 : struct drm_prime_member {
68 : struct list_head entry;
69 : struct dma_buf *dma_buf;
70 : uint32_t handle;
71 : };
72 :
73 : #ifdef notyet
74 : struct drm_prime_attachment {
75 : struct sg_table *sgt;
76 : enum dma_data_direction dir;
77 : };
78 : #endif
79 :
80 0 : static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
81 : struct dma_buf *dma_buf, uint32_t handle)
82 : {
83 : struct drm_prime_member *member;
84 :
85 0 : member = kmalloc(sizeof(*member), GFP_KERNEL);
86 0 : if (!member)
87 0 : return -ENOMEM;
88 :
89 0 : get_dma_buf(dma_buf);
90 0 : member->dma_buf = dma_buf;
91 0 : member->handle = handle;
92 0 : list_add(&member->entry, &prime_fpriv->head);
93 0 : return 0;
94 0 : }
95 :
96 0 : static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
97 : uint32_t handle)
98 : {
99 : struct drm_prime_member *member;
100 :
101 0 : list_for_each_entry(member, &prime_fpriv->head, entry) {
102 0 : if (member->handle == handle)
103 0 : return member->dma_buf;
104 : }
105 :
106 0 : return NULL;
107 0 : }
108 :
109 0 : static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
110 : struct dma_buf *dma_buf,
111 : uint32_t *handle)
112 : {
113 : struct drm_prime_member *member;
114 :
115 0 : list_for_each_entry(member, &prime_fpriv->head, entry) {
116 0 : if (member->dma_buf == dma_buf) {
117 0 : *handle = member->handle;
118 0 : return 0;
119 : }
120 : }
121 0 : return -ENOENT;
122 0 : }
123 :
124 : #ifdef notyet
125 :
126 : static int drm_gem_map_attach(struct dma_buf *dma_buf,
127 : struct device *target_dev,
128 : struct dma_buf_attachment *attach)
129 : {
130 : struct drm_prime_attachment *prime_attach;
131 : struct drm_gem_object *obj = dma_buf->priv;
132 : struct drm_device *dev = obj->dev;
133 :
134 : prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
135 : if (!prime_attach)
136 : return -ENOMEM;
137 :
138 : prime_attach->dir = DMA_NONE;
139 : attach->priv = prime_attach;
140 :
141 : if (!dev->driver->gem_prime_pin)
142 : return 0;
143 :
144 : return dev->driver->gem_prime_pin(obj);
145 : }
146 :
147 : static void drm_gem_map_detach(struct dma_buf *dma_buf,
148 : struct dma_buf_attachment *attach)
149 : {
150 : struct drm_prime_attachment *prime_attach = attach->priv;
151 : struct drm_gem_object *obj = dma_buf->priv;
152 : struct drm_device *dev = obj->dev;
153 : struct sg_table *sgt;
154 :
155 : if (dev->driver->gem_prime_unpin)
156 : dev->driver->gem_prime_unpin(obj);
157 :
158 : if (!prime_attach)
159 : return;
160 :
161 : sgt = prime_attach->sgt;
162 : if (sgt) {
163 : if (prime_attach->dir != DMA_NONE)
164 : dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
165 : prime_attach->dir);
166 : sg_free_table(sgt);
167 : }
168 :
169 : kfree(sgt);
170 : kfree(prime_attach);
171 : attach->priv = NULL;
172 : }
173 :
174 : #endif
175 :
176 0 : void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
177 : struct dma_buf *dma_buf)
178 : {
179 : struct drm_prime_member *member, *safe;
180 :
181 0 : list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
182 0 : if (member->dma_buf == dma_buf) {
183 0 : dma_buf_put(dma_buf);
184 0 : list_del(&member->entry);
185 0 : kfree(member);
186 0 : }
187 : }
188 0 : }
189 :
190 : #ifdef notyet
191 :
192 : static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
193 : enum dma_data_direction dir)
194 : {
195 : struct drm_prime_attachment *prime_attach = attach->priv;
196 : struct drm_gem_object *obj = attach->dmabuf->priv;
197 : struct sg_table *sgt;
198 :
199 : if (WARN_ON(dir == DMA_NONE || !prime_attach))
200 : return ERR_PTR(-EINVAL);
201 :
202 : /* return the cached mapping when possible */
203 : if (prime_attach->dir == dir)
204 : return prime_attach->sgt;
205 :
206 : /*
207 : * two mappings with different directions for the same attachment are
208 : * not allowed
209 : */
210 : if (WARN_ON(prime_attach->dir != DMA_NONE))
211 : return ERR_PTR(-EBUSY);
212 :
213 : sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
214 :
215 : if (!IS_ERR(sgt)) {
216 : if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
217 : sg_free_table(sgt);
218 : kfree(sgt);
219 : sgt = ERR_PTR(-ENOMEM);
220 : } else {
221 : prime_attach->sgt = sgt;
222 : prime_attach->dir = dir;
223 : }
224 : }
225 :
226 : return sgt;
227 : }
228 :
229 : static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
230 : struct sg_table *sgt,
231 : enum dma_data_direction dir)
232 : {
233 : /* nothing to be done here */
234 : }
235 :
236 : #endif
237 :
238 : /**
239 : * drm_gem_dmabuf_release - dma_buf release implementation for GEM
240 : * @dma_buf: buffer to be released
241 : *
242 : * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
243 : * must use this in their dma_buf ops structure as the release callback.
244 : */
245 0 : void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
246 : {
247 0 : struct drm_gem_object *obj = dma_buf->priv;
248 :
249 : /* drop the reference on the export fd holds */
250 0 : drm_gem_object_unreference_unlocked(obj);
251 0 : }
252 : EXPORT_SYMBOL(drm_gem_dmabuf_release);
253 :
254 : #ifdef notyet
255 :
256 : static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
257 : {
258 : struct drm_gem_object *obj = dma_buf->priv;
259 : struct drm_device *dev = obj->dev;
260 :
261 : return dev->driver->gem_prime_vmap(obj);
262 : }
263 :
264 : static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
265 : {
266 : struct drm_gem_object *obj = dma_buf->priv;
267 : struct drm_device *dev = obj->dev;
268 :
269 : dev->driver->gem_prime_vunmap(obj, vaddr);
270 : }
271 :
272 : static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
273 : unsigned long page_num)
274 : {
275 : return NULL;
276 : }
277 :
278 : static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
279 : unsigned long page_num, void *addr)
280 : {
281 :
282 : }
283 : static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
284 : unsigned long page_num)
285 : {
286 : return NULL;
287 : }
288 :
289 : static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
290 : unsigned long page_num, void *addr)
291 : {
292 :
293 : }
294 :
295 : static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
296 : struct vm_area_struct *vma)
297 : {
298 : struct drm_gem_object *obj = dma_buf->priv;
299 : struct drm_device *dev = obj->dev;
300 :
301 : if (!dev->driver->gem_prime_mmap)
302 : return -ENOSYS;
303 :
304 : return dev->driver->gem_prime_mmap(obj, vma);
305 : }
306 :
307 : #endif
308 :
309 : static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
310 : #ifdef notyet
311 : .attach = drm_gem_map_attach,
312 : .detach = drm_gem_map_detach,
313 : .map_dma_buf = drm_gem_map_dma_buf,
314 : .unmap_dma_buf = drm_gem_unmap_dma_buf,
315 : #endif
316 : .release = drm_gem_dmabuf_release,
317 : #ifdef notyet
318 : .kmap = drm_gem_dmabuf_kmap,
319 : .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
320 : .kunmap = drm_gem_dmabuf_kunmap,
321 : .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
322 : .mmap = drm_gem_dmabuf_mmap,
323 : .vmap = drm_gem_dmabuf_vmap,
324 : .vunmap = drm_gem_dmabuf_vunmap,
325 : #endif
326 : };
327 :
328 : /**
329 : * DOC: PRIME Helpers
330 : *
331 : * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
332 : * simpler APIs by using the helper functions @drm_gem_prime_export and
333 : * @drm_gem_prime_import. These functions implement dma-buf support in terms of
334 : * six lower-level driver callbacks:
335 : *
336 : * Export callbacks:
337 : *
338 : * - @gem_prime_pin (optional): prepare a GEM object for exporting
339 : *
340 : * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
341 : *
342 : * - @gem_prime_vmap: vmap a buffer exported by your driver
343 : *
344 : * - @gem_prime_vunmap: vunmap a buffer exported by your driver
345 : *
346 : * - @gem_prime_mmap (optional): mmap a buffer exported by your driver
347 : *
348 : * Import callback:
349 : *
350 : * - @gem_prime_import_sg_table (import): produce a GEM object from another
351 : * driver's scatter/gather table
352 : */
353 :
354 : /**
355 : * drm_gem_prime_export - helper library implementation of the export callback
356 : * @dev: drm_device to export from
357 : * @obj: GEM object to export
358 : * @flags: flags like DRM_CLOEXEC
359 : *
360 : * This is the implementation of the gem_prime_export functions for GEM drivers
361 : * using the PRIME helpers.
362 : */
363 0 : struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
364 : struct drm_gem_object *obj,
365 : int flags)
366 : {
367 0 : struct dma_buf_export_info exp_info = {
368 : #ifdef __linux__
369 : .exp_name = KBUILD_MODNAME, /* white lie for debug */
370 : .owner = dev->driver->fops->owner,
371 : #endif
372 : .ops = &drm_gem_prime_dmabuf_ops,
373 0 : .size = obj->size,
374 : .flags = flags,
375 0 : .priv = obj,
376 : };
377 :
378 : #ifdef notyet
379 : if (dev->driver->gem_prime_res_obj)
380 : exp_info.resv = dev->driver->gem_prime_res_obj(obj);
381 : #endif
382 :
383 0 : return dma_buf_export(&exp_info);
384 0 : }
385 : EXPORT_SYMBOL(drm_gem_prime_export);
386 :
387 0 : static struct dma_buf *export_and_register_object(struct drm_device *dev,
388 : struct drm_gem_object *obj,
389 : uint32_t flags)
390 : {
391 : struct dma_buf *dmabuf;
392 :
393 : /* prevent races with concurrent gem_close. */
394 0 : if (obj->handle_count == 0) {
395 0 : dmabuf = ERR_PTR(-ENOENT);
396 0 : return dmabuf;
397 : }
398 :
399 0 : dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
400 0 : if (IS_ERR(dmabuf)) {
401 : /* normally the created dma-buf takes ownership of the ref,
402 : * but if that fails then drop the ref
403 : */
404 0 : return dmabuf;
405 : }
406 :
407 : /*
408 : * Note that callers do not need to clean up the export cache
409 : * since the check for obj->handle_count guarantees that someone
410 : * will clean it up.
411 : */
412 0 : obj->dma_buf = dmabuf;
413 0 : get_dma_buf(obj->dma_buf);
414 : /* Grab a new ref since the callers is now used by the dma-buf */
415 0 : drm_gem_object_reference(obj);
416 :
417 0 : return dmabuf;
418 0 : }
419 :
420 : /**
421 : * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
422 : * @dev: dev to export the buffer from
423 : * @file_priv: drm file-private structure
424 : * @handle: buffer handle to export
425 : * @flags: flags like DRM_CLOEXEC
426 : * @prime_fd: pointer to storage for the fd id of the create dma-buf
427 : *
428 : * This is the PRIME export function which must be used mandatorily by GEM
429 : * drivers to ensure correct lifetime management of the underlying GEM object.
430 : * The actual exporting from GEM object to a dma-buf is done through the
431 : * gem_prime_export driver callback.
432 : */
433 0 : int drm_gem_prime_handle_to_fd(struct drm_device *dev,
434 : struct drm_file *file_priv, uint32_t handle,
435 : uint32_t flags,
436 : int *prime_fd)
437 : {
438 : struct drm_gem_object *obj;
439 : int ret = 0;
440 : struct dma_buf *dmabuf;
441 :
442 0 : mutex_lock(&file_priv->prime.lock);
443 0 : obj = drm_gem_object_lookup(dev, file_priv, handle);
444 0 : if (!obj) {
445 : ret = -ENOENT;
446 0 : goto out_unlock;
447 : }
448 :
449 0 : dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
450 0 : if (dmabuf) {
451 0 : get_dma_buf(dmabuf);
452 0 : goto out_have_handle;
453 : }
454 :
455 0 : mutex_lock(&dev->object_name_lock);
456 : #ifdef notyet
457 : /* re-export the original imported object */
458 : if (obj->import_attach) {
459 : dmabuf = obj->import_attach->dmabuf;
460 : get_dma_buf(dmabuf);
461 : goto out_have_obj;
462 : }
463 : #endif
464 :
465 0 : if (obj->dma_buf) {
466 0 : get_dma_buf(obj->dma_buf);
467 0 : dmabuf = obj->dma_buf;
468 0 : goto out_have_obj;
469 : }
470 :
471 0 : dmabuf = export_and_register_object(dev, obj, flags);
472 0 : if (IS_ERR(dmabuf)) {
473 : /* normally the created dma-buf takes ownership of the ref,
474 : * but if that fails then drop the ref
475 : */
476 0 : ret = PTR_ERR(dmabuf);
477 0 : mutex_unlock(&dev->object_name_lock);
478 0 : goto out;
479 : }
480 :
481 : out_have_obj:
482 : /*
483 : * If we've exported this buffer then cheat and add it to the import list
484 : * so we get the correct handle back. We must do this under the
485 : * protection of dev->object_name_lock to ensure that a racing gem close
486 : * ioctl doesn't miss to remove this buffer handle from the cache.
487 : */
488 0 : ret = drm_prime_add_buf_handle(&file_priv->prime,
489 : dmabuf, handle);
490 0 : mutex_unlock(&dev->object_name_lock);
491 0 : if (ret)
492 : goto fail_put_dmabuf;
493 :
494 : out_have_handle:
495 0 : ret = dma_buf_fd(dmabuf, flags);
496 : /*
497 : * We must _not_ remove the buffer from the handle cache since the newly
498 : * created dma buf is already linked in the global obj->dma_buf pointer,
499 : * and that is invariant as long as a userspace gem handle exists.
500 : * Closing the handle will clean out the cache anyway, so we don't leak.
501 : */
502 0 : if (ret < 0) {
503 : goto fail_put_dmabuf;
504 : } else {
505 0 : *prime_fd = ret;
506 : ret = 0;
507 : }
508 :
509 0 : goto out;
510 :
511 : fail_put_dmabuf:
512 0 : dma_buf_put(dmabuf);
513 : out:
514 0 : drm_gem_object_unreference_unlocked(obj);
515 : out_unlock:
516 0 : mutex_unlock(&file_priv->prime.lock);
517 :
518 0 : return ret;
519 : }
520 : EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
521 :
522 : /**
523 : * drm_gem_prime_import - helper library implementation of the import callback
524 : * @dev: drm_device to import into
525 : * @dma_buf: dma-buf object to import
526 : *
527 : * This is the implementation of the gem_prime_import functions for GEM drivers
528 : * using the PRIME helpers.
529 : */
530 0 : struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
531 : struct dma_buf *dma_buf)
532 : {
533 : struct dma_buf_attachment *attach;
534 : #ifdef notyet
535 : struct sg_table *sgt;
536 : #endif
537 : struct drm_gem_object *obj;
538 : int ret;
539 :
540 0 : if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
541 0 : obj = dma_buf->priv;
542 0 : if (obj->dev == dev) {
543 : /*
544 : * Importing dmabuf exported from out own gem increases
545 : * refcount on gem itself instead of f_count of dmabuf.
546 : */
547 0 : drm_gem_object_reference(obj);
548 0 : return obj;
549 : }
550 : }
551 :
552 : #ifdef notyet
553 : if (!dev->driver->gem_prime_import_sg_table)
554 : return ERR_PTR(-EINVAL);
555 : #endif
556 :
557 : attach = dma_buf_attach(dma_buf, dev->dev);
558 0 : if (IS_ERR(attach))
559 0 : return ERR_CAST(attach);
560 :
561 : #ifdef notyet
562 : get_dma_buf(dma_buf);
563 :
564 : sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
565 : if (IS_ERR(sgt)) {
566 : ret = PTR_ERR(sgt);
567 : goto fail_detach;
568 : }
569 :
570 : obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
571 : if (IS_ERR(obj)) {
572 : ret = PTR_ERR(obj);
573 : goto fail_unmap;
574 : }
575 :
576 : obj->import_attach = attach;
577 :
578 : return obj;
579 :
580 : fail_unmap:
581 : dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
582 : fail_detach:
583 : dma_buf_detach(dma_buf, attach);
584 : dma_buf_put(dma_buf);
585 :
586 : return ERR_PTR(ret);
587 : #else
588 : ret = 0;
589 0 : panic(__func__);
590 : #endif
591 0 : }
592 : EXPORT_SYMBOL(drm_gem_prime_import);
593 :
594 : /**
595 : * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
596 : * @dev: dev to export the buffer from
597 : * @file_priv: drm file-private structure
598 : * @prime_fd: fd id of the dma-buf which should be imported
599 : * @handle: pointer to storage for the handle of the imported buffer object
600 : *
601 : * This is the PRIME import function which must be used mandatorily by GEM
602 : * drivers to ensure correct lifetime management of the underlying GEM object.
603 : * The actual importing of GEM object from the dma-buf is done through the
604 : * gem_import_export driver callback.
605 : */
606 0 : int drm_gem_prime_fd_to_handle(struct drm_device *dev,
607 : struct drm_file *file_priv, int prime_fd,
608 : uint32_t *handle)
609 : {
610 : struct dma_buf *dma_buf;
611 : struct drm_gem_object *obj;
612 : int ret;
613 :
614 0 : dma_buf = dma_buf_get(prime_fd);
615 0 : if (IS_ERR(dma_buf))
616 0 : return PTR_ERR(dma_buf);
617 :
618 0 : mutex_lock(&file_priv->prime.lock);
619 :
620 0 : ret = drm_prime_lookup_buf_handle(&file_priv->prime,
621 : dma_buf, handle);
622 0 : if (ret == 0)
623 : goto out_put;
624 :
625 : /* never seen this one, need to import */
626 0 : mutex_lock(&dev->object_name_lock);
627 0 : obj = dev->driver->gem_prime_import(dev, dma_buf);
628 0 : if (IS_ERR(obj)) {
629 0 : ret = PTR_ERR(obj);
630 0 : goto out_unlock;
631 : }
632 :
633 0 : if (obj->dma_buf) {
634 0 : WARN_ON(obj->dma_buf != dma_buf);
635 0 : } else {
636 0 : obj->dma_buf = dma_buf;
637 0 : get_dma_buf(dma_buf);
638 : }
639 :
640 : /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
641 0 : ret = drm_gem_handle_create_tail(file_priv, obj, handle);
642 0 : drm_gem_object_unreference_unlocked(obj);
643 0 : if (ret)
644 : goto out_put;
645 :
646 0 : ret = drm_prime_add_buf_handle(&file_priv->prime,
647 0 : dma_buf, *handle);
648 0 : if (ret)
649 : goto fail;
650 :
651 0 : mutex_unlock(&file_priv->prime.lock);
652 :
653 0 : dma_buf_put(dma_buf);
654 :
655 0 : return 0;
656 :
657 : fail:
658 : /* hmm, if driver attached, we are relying on the free-object path
659 : * to detach.. which seems ok..
660 : */
661 0 : drm_gem_handle_delete(file_priv, *handle);
662 : out_unlock:
663 0 : mutex_unlock(&dev->object_name_lock);
664 : out_put:
665 0 : dma_buf_put(dma_buf);
666 0 : mutex_unlock(&file_priv->prime.lock);
667 0 : return ret;
668 0 : }
669 : EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
670 :
671 0 : int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
672 : struct drm_file *file_priv)
673 : {
674 0 : struct drm_prime_handle *args = data;
675 : uint32_t flags;
676 :
677 0 : if (!drm_core_check_feature(dev, DRIVER_PRIME))
678 0 : return -EINVAL;
679 :
680 0 : if (!dev->driver->prime_handle_to_fd)
681 0 : return -ENOSYS;
682 :
683 : /* check flags are valid */
684 0 : if (args->flags & ~DRM_CLOEXEC)
685 0 : return -EINVAL;
686 :
687 : /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
688 0 : flags = args->flags & DRM_CLOEXEC;
689 :
690 0 : return dev->driver->prime_handle_to_fd(dev, file_priv,
691 0 : args->handle, flags, &args->fd);
692 0 : }
693 :
694 0 : int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
695 : struct drm_file *file_priv)
696 : {
697 0 : struct drm_prime_handle *args = data;
698 :
699 0 : if (!drm_core_check_feature(dev, DRIVER_PRIME))
700 0 : return -EINVAL;
701 :
702 0 : if (!dev->driver->prime_fd_to_handle)
703 0 : return -ENOSYS;
704 :
705 0 : return dev->driver->prime_fd_to_handle(dev, file_priv,
706 0 : args->fd, &args->handle);
707 0 : }
708 :
709 : #ifdef notyet
710 :
711 : /**
712 : * drm_prime_pages_to_sg - converts a page array into an sg list
713 : * @pages: pointer to the array of page pointers to convert
714 : * @nr_pages: length of the page vector
715 : *
716 : * This helper creates an sg table object from a set of pages
717 : * the driver is responsible for mapping the pages into the
718 : * importers address space for use with dma_buf itself.
719 : */
720 : struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
721 : {
722 : struct sg_table *sg = NULL;
723 : int ret;
724 :
725 : sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
726 : if (!sg) {
727 : ret = -ENOMEM;
728 : goto out;
729 : }
730 :
731 : ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
732 : nr_pages << PAGE_SHIFT, GFP_KERNEL);
733 : if (ret)
734 : goto out;
735 :
736 : return sg;
737 : out:
738 : kfree(sg);
739 : return ERR_PTR(ret);
740 : }
741 : EXPORT_SYMBOL(drm_prime_pages_to_sg);
742 :
743 : /**
744 : * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
745 : * @sgt: scatter-gather table to convert
746 : * @pages: array of page pointers to store the page array in
747 : * @addrs: optional array to store the dma bus address of each page
748 : * @max_pages: size of both the passed-in arrays
749 : *
750 : * Exports an sg table into an array of pages and addresses. This is currently
751 : * required by the TTM driver in order to do correct fault handling.
752 : */
753 : int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
754 : dma_addr_t *addrs, int max_pages)
755 : {
756 : unsigned count;
757 : struct scatterlist *sg;
758 : struct page *page;
759 : u32 len;
760 : int pg_index;
761 : dma_addr_t addr;
762 :
763 : pg_index = 0;
764 : for_each_sg(sgt->sgl, sg, sgt->nents, count) {
765 : len = sg->length;
766 : page = sg_page(sg);
767 : addr = sg_dma_address(sg);
768 :
769 : while (len > 0) {
770 : if (WARN_ON(pg_index >= max_pages))
771 : return -1;
772 : pages[pg_index] = page;
773 : if (addrs)
774 : addrs[pg_index] = addr;
775 :
776 : page++;
777 : addr += PAGE_SIZE;
778 : len -= PAGE_SIZE;
779 : pg_index++;
780 : }
781 : }
782 : return 0;
783 : }
784 : EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
785 :
786 : /**
787 : * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
788 : * @obj: GEM object which was created from a dma-buf
789 : * @sg: the sg-table which was pinned at import time
790 : *
791 : * This is the cleanup functions which GEM drivers need to call when they use
792 : * @drm_gem_prime_import to import dma-bufs.
793 : */
794 : void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
795 : {
796 : struct dma_buf_attachment *attach;
797 : struct dma_buf *dma_buf;
798 : attach = obj->import_attach;
799 : if (sg)
800 : dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
801 : dma_buf = attach->dmabuf;
802 : dma_buf_detach(attach->dmabuf, attach);
803 : /* remove the reference */
804 : dma_buf_put(dma_buf);
805 : }
806 : EXPORT_SYMBOL(drm_prime_gem_destroy);
807 :
808 : #endif
809 :
810 0 : void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
811 : {
812 0 : INIT_LIST_HEAD(&prime_fpriv->head);
813 0 : rw_init(&prime_fpriv->lock, "primlk");
814 0 : }
815 :
816 0 : void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
817 : {
818 : /* by now drm_gem_release should've made sure the list is empty */
819 0 : WARN_ON(!list_empty(&prime_fpriv->head));
820 0 : }
|