Line data Source code
1 : /* $OpenBSD: drm_vma_manager.c,v 1.3 2017/07/01 16:14:10 kettenis Exp $ */
2 : /*
3 : * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 : * Copyright (c) 2012 David Airlie <airlied@linux.ie>
5 : * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
6 : *
7 : * Permission is hereby granted, free of charge, to any person obtaining a
8 : * copy of this software and associated documentation files (the "Software"),
9 : * to deal in the Software without restriction, including without limitation
10 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 : * and/or sell copies of the Software, and to permit persons to whom the
12 : * Software is furnished to do so, subject to the following conditions:
13 : *
14 : * The above copyright notice and this permission notice shall be included in
15 : * all copies or substantial portions of the Software.
16 : *
17 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 : * OTHER DEALINGS IN THE SOFTWARE.
24 : */
25 :
26 : #include "drmP.h"
27 : #include "drm_mm.h"
28 : #include "drm_vma_manager.h"
29 : #include "drm_linux_rbtree.h"
30 :
31 : /**
32 : * DOC: vma offset manager
33 : *
34 : * The vma-manager is responsible to map arbitrary driver-dependent memory
35 : * regions into the linear user address-space. It provides offsets to the
36 : * caller which can then be used on the address_space of the drm-device. It
37 : * takes care to not overlap regions, size them appropriately and to not
38 : * confuse mm-core by inconsistent fake vm_pgoff fields.
39 : * Drivers shouldn't use this for object placement in VMEM. This manager should
40 : * only be used to manage mappings into linear user-space VMs.
41 : *
42 : * We use drm_mm as backend to manage object allocations. But it is highly
43 : * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
44 : * speed up offset lookups.
45 : *
46 : * You must not use multiple offset managers on a single address_space.
47 : * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
48 : * no longer be linear.
49 : *
50 : * This offset manager works on page-based addresses. That is, every argument
51 : * and return code (with the exception of drm_vma_node_offset_addr()) is given
52 : * in number of pages, not number of bytes. That means, object sizes and offsets
53 : * must always be page-aligned (as usual).
54 : * If you want to get a valid byte-based user-space address for a given offset,
55 : * please see drm_vma_node_offset_addr().
56 : *
57 : * Additionally to offset management, the vma offset manager also handles access
58 : * management. For every open-file context that is allowed to access a given
59 : * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
60 : * open-file with the offset of the node will fail with -EACCES. To revoke
61 : * access again, use drm_vma_node_revoke(). However, the caller is responsible
62 : * for destroying already existing mappings, if required.
63 : */
64 :
65 : /**
66 : * drm_vma_offset_manager_init - Initialize new offset-manager
67 : * @mgr: Manager object
68 : * @page_offset: Offset of available memory area (page-based)
69 : * @size: Size of available address space range (page-based)
70 : *
71 : * Initialize a new offset-manager. The offset and area size available for the
72 : * manager are given as @page_offset and @size. Both are interpreted as
73 : * page-numbers, not bytes.
74 : *
75 : * Adding/removing nodes from the manager is locked internally and protected
76 : * against concurrent access. However, node allocation and destruction is left
77 : * for the caller. While calling into the vma-manager, a given node must
78 : * always be guaranteed to be referenced.
79 : */
80 0 : void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
81 : unsigned long page_offset, unsigned long size)
82 : {
83 0 : rw_init(&mgr->vm_lock, "drmvmo");
84 0 : mgr->vm_addr_space_rb = RB_ROOT;
85 0 : drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
86 0 : }
87 : EXPORT_SYMBOL(drm_vma_offset_manager_init);
88 :
89 : /**
90 : * drm_vma_offset_manager_destroy() - Destroy offset manager
91 : * @mgr: Manager object
92 : *
93 : * Destroy an object manager which was previously created via
94 : * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
95 : * before destroying the manager. Otherwise, drm_mm will refuse to free the
96 : * requested resources.
97 : *
98 : * The manager must not be accessed after this function is called.
99 : */
100 0 : void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
101 : {
102 : /* take the lock to protect against buggy drivers */
103 0 : write_lock(&mgr->vm_lock);
104 0 : drm_mm_takedown(&mgr->vm_addr_space_mm);
105 0 : write_unlock(&mgr->vm_lock);
106 0 : }
107 : EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
108 :
109 : /**
110 : * drm_vma_offset_lookup() - Find node in offset space
111 : * @mgr: Manager object
112 : * @start: Start address for object (page-based)
113 : * @pages: Size of object (page-based)
114 : *
115 : * Find a node given a start address and object size. This returns the _best_
116 : * match for the given node. That is, @start may point somewhere into a valid
117 : * region and the given node will be returned, as long as the node spans the
118 : * whole requested area (given the size in number of pages as @pages).
119 : *
120 : * RETURNS:
121 : * Returns NULL if no suitable node can be found. Otherwise, the best match
122 : * is returned. It's the caller's responsibility to make sure the node doesn't
123 : * get destroyed before the caller can access it.
124 : */
125 0 : struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
126 : unsigned long start,
127 : unsigned long pages)
128 : {
129 : struct drm_vma_offset_node *node;
130 :
131 0 : read_lock(&mgr->vm_lock);
132 0 : node = drm_vma_offset_lookup_locked(mgr, start, pages);
133 0 : read_unlock(&mgr->vm_lock);
134 :
135 0 : return node;
136 : }
137 : EXPORT_SYMBOL(drm_vma_offset_lookup);
138 :
139 : /**
140 : * drm_vma_offset_lookup_locked() - Find node in offset space
141 : * @mgr: Manager object
142 : * @start: Start address for object (page-based)
143 : * @pages: Size of object (page-based)
144 : *
145 : * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
146 : * manually. See drm_vma_offset_lock_lookup() for an example.
147 : *
148 : * RETURNS:
149 : * Returns NULL if no suitable node can be found. Otherwise, the best match
150 : * is returned.
151 : */
152 0 : struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
153 : unsigned long start,
154 : unsigned long pages)
155 : {
156 : struct drm_vma_offset_node *node, *best;
157 : struct rb_node *iter;
158 : unsigned long offset;
159 :
160 0 : iter = mgr->vm_addr_space_rb.rb_node;
161 : best = NULL;
162 :
163 0 : while (likely(iter)) {
164 0 : node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
165 0 : offset = node->vm_node.start;
166 0 : if (start >= offset) {
167 0 : iter = iter->rb_right;
168 : best = node;
169 0 : if (start == offset)
170 : break;
171 : } else {
172 0 : iter = iter->rb_left;
173 : }
174 : }
175 :
176 : /* verify that the node spans the requested area */
177 0 : if (best) {
178 0 : offset = best->vm_node.start + best->vm_node.size;
179 0 : if (offset < start + pages)
180 0 : best = NULL;
181 : }
182 :
183 0 : return best;
184 : }
185 : EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
186 :
187 : /* internal helper to link @node into the rb-tree */
188 0 : static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
189 : struct drm_vma_offset_node *node)
190 : {
191 0 : struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
192 : struct rb_node *parent = NULL;
193 : struct drm_vma_offset_node *iter_node;
194 :
195 0 : while (likely(*iter)) {
196 : parent = *iter;
197 0 : iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
198 :
199 0 : if (node->vm_node.start < iter_node->vm_node.start)
200 0 : iter = &(*iter)->rb_left;
201 0 : else if (node->vm_node.start > iter_node->vm_node.start)
202 0 : iter = &(*iter)->rb_right;
203 : else
204 0 : BUG();
205 : }
206 :
207 0 : rb_link_node(&node->vm_rb, parent, iter);
208 0 : rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
209 0 : }
210 :
211 : /**
212 : * drm_vma_offset_add() - Add offset node to manager
213 : * @mgr: Manager object
214 : * @node: Node to be added
215 : * @pages: Allocation size visible to user-space (in number of pages)
216 : *
217 : * Add a node to the offset-manager. If the node was already added, this does
218 : * nothing and return 0. @pages is the size of the object given in number of
219 : * pages.
220 : * After this call succeeds, you can access the offset of the node until it
221 : * is removed again.
222 : *
223 : * If this call fails, it is safe to retry the operation or call
224 : * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
225 : * case.
226 : *
227 : * @pages is not required to be the same size as the underlying memory object
228 : * that you want to map. It only limits the size that user-space can map into
229 : * their address space.
230 : *
231 : * RETURNS:
232 : * 0 on success, negative error code on failure.
233 : */
234 0 : int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
235 : struct drm_vma_offset_node *node, unsigned long pages)
236 : {
237 : int ret;
238 :
239 0 : write_lock(&mgr->vm_lock);
240 :
241 0 : if (drm_mm_node_allocated(&node->vm_node)) {
242 : ret = 0;
243 0 : goto out_unlock;
244 : }
245 :
246 0 : ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
247 : pages, 0, DRM_MM_SEARCH_DEFAULT);
248 0 : if (ret)
249 : goto out_unlock;
250 :
251 0 : _drm_vma_offset_add_rb(mgr, node);
252 :
253 : out_unlock:
254 0 : write_unlock(&mgr->vm_lock);
255 0 : return ret;
256 : }
257 : EXPORT_SYMBOL(drm_vma_offset_add);
258 :
259 : /**
260 : * drm_vma_offset_remove() - Remove offset node from manager
261 : * @mgr: Manager object
262 : * @node: Node to be removed
263 : *
264 : * Remove a node from the offset manager. If the node wasn't added before, this
265 : * does nothing. After this call returns, the offset and size will be 0 until a
266 : * new offset is allocated via drm_vma_offset_add() again. Helper functions like
267 : * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
268 : * offset is allocated.
269 : */
270 0 : void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
271 : struct drm_vma_offset_node *node)
272 : {
273 0 : write_lock(&mgr->vm_lock);
274 :
275 0 : if (drm_mm_node_allocated(&node->vm_node)) {
276 0 : rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
277 0 : drm_mm_remove_node(&node->vm_node);
278 0 : memset(&node->vm_node, 0, sizeof(node->vm_node));
279 0 : }
280 :
281 0 : write_unlock(&mgr->vm_lock);
282 0 : }
283 : EXPORT_SYMBOL(drm_vma_offset_remove);
284 :
285 : /**
286 : * drm_vma_node_allow - Add open-file to list of allowed users
287 : * @node: Node to modify
288 : * @filp: Open file to add
289 : *
290 : * Add @filp to the list of allowed open-files for this node. If @filp is
291 : * already on this list, the ref-count is incremented.
292 : *
293 : * The list of allowed-users is preserved across drm_vma_offset_add() and
294 : * drm_vma_offset_remove() calls. You may even call it if the node is currently
295 : * not added to any offset-manager.
296 : *
297 : * You must remove all open-files the same number of times as you added them
298 : * before destroying the node. Otherwise, you will leak memory.
299 : *
300 : * This is locked against concurrent access internally.
301 : *
302 : * RETURNS:
303 : * 0 on success, negative error code on internal failure (out-of-mem)
304 : */
305 0 : int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
306 : {
307 : struct rb_node **iter;
308 : struct rb_node *parent = NULL;
309 : struct drm_vma_offset_file *new, *entry;
310 : int ret = 0;
311 :
312 : /* Preallocate entry to avoid atomic allocations below. It is quite
313 : * unlikely that an open-file is added twice to a single node so we
314 : * don't optimize for this case. OOM is checked below only if the entry
315 : * is actually used. */
316 0 : new = kmalloc(sizeof(*entry), GFP_KERNEL);
317 :
318 0 : write_lock(&node->vm_lock);
319 :
320 0 : iter = &node->vm_files.rb_node;
321 :
322 0 : while (likely(*iter)) {
323 : parent = *iter;
324 0 : entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
325 :
326 0 : if (filp == entry->vm_filp) {
327 0 : entry->vm_count++;
328 0 : goto unlock;
329 0 : } else if (filp > entry->vm_filp) {
330 0 : iter = &(*iter)->rb_right;
331 0 : } else {
332 0 : iter = &(*iter)->rb_left;
333 : }
334 : }
335 :
336 0 : if (!new) {
337 : ret = -ENOMEM;
338 0 : goto unlock;
339 : }
340 :
341 0 : new->vm_filp = filp;
342 0 : new->vm_count = 1;
343 0 : rb_link_node(&new->vm_rb, parent, iter);
344 0 : rb_insert_color(&new->vm_rb, &node->vm_files);
345 0 : new = NULL;
346 :
347 : unlock:
348 0 : write_unlock(&node->vm_lock);
349 0 : kfree(new);
350 0 : return ret;
351 : }
352 : EXPORT_SYMBOL(drm_vma_node_allow);
353 :
354 : /**
355 : * drm_vma_node_revoke - Remove open-file from list of allowed users
356 : * @node: Node to modify
357 : * @filp: Open file to remove
358 : *
359 : * Decrement the ref-count of @filp in the list of allowed open-files on @node.
360 : * If the ref-count drops to zero, remove @filp from the list. You must call
361 : * this once for every drm_vma_node_allow() on @filp.
362 : *
363 : * This is locked against concurrent access internally.
364 : *
365 : * If @filp is not on the list, nothing is done.
366 : */
367 0 : void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
368 : {
369 : struct drm_vma_offset_file *entry;
370 : struct rb_node *iter;
371 :
372 0 : write_lock(&node->vm_lock);
373 :
374 0 : iter = node->vm_files.rb_node;
375 0 : while (likely(iter)) {
376 0 : entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
377 0 : if (filp == entry->vm_filp) {
378 0 : if (!--entry->vm_count) {
379 0 : rb_erase(&entry->vm_rb, &node->vm_files);
380 0 : kfree(entry);
381 0 : }
382 : break;
383 0 : } else if (filp > entry->vm_filp) {
384 0 : iter = iter->rb_right;
385 0 : } else {
386 0 : iter = iter->rb_left;
387 : }
388 : }
389 :
390 0 : write_unlock(&node->vm_lock);
391 0 : }
392 : EXPORT_SYMBOL(drm_vma_node_revoke);
393 :
394 : /**
395 : * drm_vma_node_is_allowed - Check whether an open-file is granted access
396 : * @node: Node to check
397 : * @filp: Open-file to check for
398 : *
399 : * Search the list in @node whether @filp is currently on the list of allowed
400 : * open-files (see drm_vma_node_allow()).
401 : *
402 : * This is locked against concurrent access internally.
403 : *
404 : * RETURNS:
405 : * true iff @filp is on the list
406 : */
407 0 : bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
408 : struct file *filp)
409 : {
410 : struct drm_vma_offset_file *entry;
411 : struct rb_node *iter;
412 :
413 0 : read_lock(&node->vm_lock);
414 :
415 0 : iter = node->vm_files.rb_node;
416 0 : while (likely(iter)) {
417 0 : entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
418 0 : if (filp == entry->vm_filp)
419 : break;
420 0 : else if (filp > entry->vm_filp)
421 0 : iter = iter->rb_right;
422 : else
423 0 : iter = iter->rb_left;
424 : }
425 :
426 0 : read_unlock(&node->vm_lock);
427 :
428 0 : return iter;
429 : }
430 : EXPORT_SYMBOL(drm_vma_node_is_allowed);
|