Line data Source code
1 : /*
2 : * Copyright 2012 Red Hat Inc
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice (including the next
12 : * paragraph) shall be included in all copies or substantial portions of the
13 : * Software.
14 : *
15 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 : * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 : * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 : * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 : * DEALINGS IN THE SOFTWARE.
22 : *
23 : * Authors:
24 : * Dave Airlie <airlied@redhat.com>
25 : */
26 : #include <dev/pci/drm/drmP.h>
27 : #include "i915_drv.h"
28 : #ifdef __linux__
29 : #include <linux/dma-buf.h>
30 : #endif
31 :
32 0 : static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
33 : {
34 0 : return to_intel_bo(buf->priv);
35 : }
36 :
37 : #ifdef notyet
38 :
39 : static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
40 : enum dma_data_direction dir)
41 : {
42 : struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
43 : struct sg_table *st;
44 : struct scatterlist *src, *dst;
45 : int ret, i;
46 :
47 : ret = i915_mutex_lock_interruptible(obj->base.dev);
48 : if (ret)
49 : goto err;
50 :
51 : ret = i915_gem_object_get_pages(obj);
52 : if (ret)
53 : goto err_unlock;
54 :
55 : i915_gem_object_pin_pages(obj);
56 :
57 : /* Copy sg so that we make an independent mapping */
58 : st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
59 : if (st == NULL) {
60 : ret = -ENOMEM;
61 : goto err_unpin;
62 : }
63 :
64 : ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
65 : if (ret)
66 : goto err_free;
67 :
68 : src = obj->pages->sgl;
69 : dst = st->sgl;
70 : for (i = 0; i < obj->pages->nents; i++) {
71 : sg_set_page(dst, sg_page(src), src->length, 0);
72 : dst = sg_next(dst);
73 : src = sg_next(src);
74 : }
75 :
76 : if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
77 : ret =-ENOMEM;
78 : goto err_free_sg;
79 : }
80 :
81 : mutex_unlock(&obj->base.dev->struct_mutex);
82 : return st;
83 :
84 : err_free_sg:
85 : sg_free_table(st);
86 : err_free:
87 : kfree(st);
88 : err_unpin:
89 : i915_gem_object_unpin_pages(obj);
90 : err_unlock:
91 : mutex_unlock(&obj->base.dev->struct_mutex);
92 : err:
93 : return ERR_PTR(ret);
94 : }
95 :
96 : static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
97 : struct sg_table *sg,
98 : enum dma_data_direction dir)
99 : {
100 : struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
101 :
102 : mutex_lock(&obj->base.dev->struct_mutex);
103 :
104 : dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
105 : sg_free_table(sg);
106 : kfree(sg);
107 :
108 : i915_gem_object_unpin_pages(obj);
109 :
110 : mutex_unlock(&obj->base.dev->struct_mutex);
111 : }
112 :
113 : static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
114 : {
115 : struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
116 : struct drm_device *dev = obj->base.dev;
117 : struct sg_page_iter sg_iter;
118 : struct page **pages;
119 : int ret, i;
120 :
121 : ret = i915_mutex_lock_interruptible(dev);
122 : if (ret)
123 : return ERR_PTR(ret);
124 :
125 : if (obj->dma_buf_vmapping) {
126 : obj->vmapping_count++;
127 : goto out_unlock;
128 : }
129 :
130 : ret = i915_gem_object_get_pages(obj);
131 : if (ret)
132 : goto err;
133 :
134 : i915_gem_object_pin_pages(obj);
135 :
136 : ret = -ENOMEM;
137 :
138 : pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
139 : if (pages == NULL)
140 : goto err_unpin;
141 :
142 : i = 0;
143 : for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
144 : pages[i++] = sg_page_iter_page(&sg_iter);
145 :
146 : obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
147 : drm_free_large(pages);
148 :
149 : if (!obj->dma_buf_vmapping)
150 : goto err_unpin;
151 :
152 : obj->vmapping_count = 1;
153 : out_unlock:
154 : mutex_unlock(&dev->struct_mutex);
155 : return obj->dma_buf_vmapping;
156 :
157 : err_unpin:
158 : i915_gem_object_unpin_pages(obj);
159 : err:
160 : mutex_unlock(&dev->struct_mutex);
161 : return ERR_PTR(ret);
162 : }
163 :
164 : static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
165 : {
166 : struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
167 : struct drm_device *dev = obj->base.dev;
168 :
169 : mutex_lock(&dev->struct_mutex);
170 : if (--obj->vmapping_count == 0) {
171 : vunmap(obj->dma_buf_vmapping);
172 : obj->dma_buf_vmapping = NULL;
173 :
174 : i915_gem_object_unpin_pages(obj);
175 : }
176 : mutex_unlock(&dev->struct_mutex);
177 : }
178 :
179 : static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
180 : {
181 : return NULL;
182 : }
183 :
184 : static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
185 : {
186 :
187 : }
188 : static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
189 : {
190 : return NULL;
191 : }
192 :
193 : static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
194 : {
195 :
196 : }
197 :
198 : static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
199 : {
200 : return -EINVAL;
201 : }
202 :
203 : static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
204 : {
205 : struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
206 : struct drm_device *dev = obj->base.dev;
207 : int ret;
208 : bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
209 :
210 : ret = i915_mutex_lock_interruptible(dev);
211 : if (ret)
212 : return ret;
213 :
214 : ret = i915_gem_object_set_to_cpu_domain(obj, write);
215 : mutex_unlock(&dev->struct_mutex);
216 : return ret;
217 : }
218 :
219 : #endif
220 :
221 : static const struct dma_buf_ops i915_dmabuf_ops = {
222 : #ifdef notyet
223 : .map_dma_buf = i915_gem_map_dma_buf,
224 : .unmap_dma_buf = i915_gem_unmap_dma_buf,
225 : #endif
226 : .release = drm_gem_dmabuf_release,
227 : #ifdef notyet
228 : .kmap = i915_gem_dmabuf_kmap,
229 : .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
230 : .kunmap = i915_gem_dmabuf_kunmap,
231 : .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
232 : .mmap = i915_gem_dmabuf_mmap,
233 : .vmap = i915_gem_dmabuf_vmap,
234 : .vunmap = i915_gem_dmabuf_vunmap,
235 : .begin_cpu_access = i915_gem_begin_cpu_access,
236 : #endif
237 : };
238 :
239 0 : struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
240 : struct drm_gem_object *gem_obj, int flags)
241 : {
242 0 : struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
243 0 : DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
244 :
245 0 : exp_info.ops = &i915_dmabuf_ops;
246 0 : exp_info.size = gem_obj->size;
247 0 : exp_info.flags = flags;
248 0 : exp_info.priv = gem_obj;
249 :
250 :
251 0 : if (obj->ops->dmabuf_export) {
252 0 : int ret = obj->ops->dmabuf_export(obj);
253 0 : if (ret)
254 0 : return ERR_PTR(ret);
255 0 : }
256 :
257 0 : return dma_buf_export(&exp_info);
258 0 : }
259 :
260 : #ifdef notyet
261 :
262 : static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
263 : {
264 : struct sg_table *sg;
265 :
266 : sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
267 : if (IS_ERR(sg))
268 : return PTR_ERR(sg);
269 :
270 : obj->pages = sg;
271 : return 0;
272 : }
273 :
274 : static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
275 : {
276 : dma_buf_unmap_attachment(obj->base.import_attach,
277 : obj->pages, DMA_BIDIRECTIONAL);
278 : }
279 :
280 : static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
281 : .get_pages = i915_gem_object_get_pages_dmabuf,
282 : .put_pages = i915_gem_object_put_pages_dmabuf,
283 : };
284 :
285 : #endif
286 :
287 0 : struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
288 : struct dma_buf *dma_buf)
289 : {
290 : struct dma_buf_attachment *attach;
291 : struct drm_i915_gem_object *obj;
292 : int ret;
293 :
294 : /* is this one of own objects? */
295 0 : if (dma_buf->ops == &i915_dmabuf_ops) {
296 0 : obj = dma_buf_to_obj(dma_buf);
297 : /* is it from our device? */
298 0 : if (obj->base.dev == dev) {
299 : /*
300 : * Importing dmabuf exported from out own gem increases
301 : * refcount on gem itself instead of f_count of dmabuf.
302 : */
303 0 : drm_gem_object_reference(&obj->base);
304 0 : return &obj->base;
305 : }
306 : }
307 :
308 : /* need to attach */
309 : attach = dma_buf_attach(dma_buf, dev->dev);
310 0 : if (IS_ERR(attach))
311 0 : return ERR_CAST(attach);
312 :
313 : #ifdef notyet
314 : get_dma_buf(dma_buf);
315 :
316 : obj = i915_gem_object_alloc(dev);
317 : if (obj == NULL) {
318 : ret = -ENOMEM;
319 : goto fail_detach;
320 : }
321 :
322 : drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
323 : i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
324 : obj->base.import_attach = attach;
325 :
326 : return &obj->base;
327 :
328 : fail_detach:
329 : dma_buf_detach(dma_buf, attach);
330 : dma_buf_put(dma_buf);
331 :
332 : return ERR_PTR(ret);
333 : #else
334 : ret = 0;
335 0 : panic(__func__);
336 : #endif
337 0 : }
|