Line data Source code
1 : /**************************************************************************
2 : *
3 : * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 : * All Rights Reserved.
5 : *
6 : * Permission is hereby granted, free of charge, to any person obtaining a
7 : * copy of this software and associated documentation files (the
8 : * "Software"), to deal in the Software without restriction, including
9 : * without limitation the rights to use, copy, modify, merge, publish,
10 : * distribute, sub license, and/or sell copies of the Software, and to
11 : * permit persons to whom the Software is furnished to do so, subject to
12 : * the following conditions:
13 : *
14 : * The above copyright notice and this permission notice (including the
15 : * next paragraph) shall be included in all copies or substantial portions
16 : * of the Software.
17 : *
18 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 : * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 : * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 : * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 : * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 : * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 : *
26 : *
27 : **************************************************************************/
28 :
29 : /*
30 : * Generic simple memory manager implementation. Intended to be used as a base
31 : * class implementation for more advanced memory managers.
32 : *
33 : * Note that the algorithm used is quite simple and there might be substantial
34 : * performance gains if a smarter free list is implemented. Currently it is just an
35 : * unordered stack of free regions. This could easily be improved if an RB-tree
36 : * is used instead. At least if we expect heavy fragmentation.
37 : *
38 : * Aligned allocations can also see improvement.
39 : *
40 : * Authors:
41 : * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 : */
43 :
44 : #include <dev/pci/drm/drmP.h>
45 : #include <dev/pci/drm/drm_mm.h>
46 : #ifdef __linux__
47 : #include <linux/slab.h>
48 : #include <linux/seq_file.h>
49 : #include <linux/export.h>
50 : #endif
51 :
52 : /**
53 : * DOC: Overview
54 : *
55 : * drm_mm provides a simple range allocator. The drivers are free to use the
56 : * resource allocator from the linux core if it suits them, the upside of drm_mm
57 : * is that it's in the DRM core. Which means that it's easier to extend for
58 : * some of the crazier special purpose needs of gpus.
59 : *
60 : * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
61 : * Drivers are free to embed either of them into their own suitable
62 : * datastructures. drm_mm itself will not do any allocations of its own, so if
63 : * drivers choose not to embed nodes they need to still allocate them
64 : * themselves.
65 : *
66 : * The range allocator also supports reservation of preallocated blocks. This is
67 : * useful for taking over initial mode setting configurations from the firmware,
68 : * where an object needs to be created which exactly matches the firmware's
69 : * scanout target. As long as the range is still free it can be inserted anytime
70 : * after the allocator is initialized, which helps with avoiding looped
71 : * depencies in the driver load sequence.
72 : *
73 : * drm_mm maintains a stack of most recently freed holes, which of all
74 : * simplistic datastructures seems to be a fairly decent approach to clustering
75 : * allocations and avoiding too much fragmentation. This means free space
76 : * searches are O(num_holes). Given that all the fancy features drm_mm supports
77 : * something better would be fairly complex and since gfx thrashing is a fairly
78 : * steep cliff not a real concern. Removing a node again is O(1).
79 : *
80 : * drm_mm supports a few features: Alignment and range restrictions can be
81 : * supplied. Further more every &drm_mm_node has a color value (which is just an
82 : * opaqua unsigned long) which in conjunction with a driver callback can be used
83 : * to implement sophisticated placement restrictions. The i915 DRM driver uses
84 : * this to implement guard pages between incompatible caching domains in the
85 : * graphics TT.
86 : *
87 : * Two behaviors are supported for searching and allocating: bottom-up and top-down.
88 : * The default is bottom-up. Top-down allocation can be used if the memory area
89 : * has different restrictions, or just to reduce fragmentation.
90 : *
91 : * Finally iteration helpers to walk all nodes and all holes are provided as are
92 : * some basic allocator dumpers for debugging.
93 : */
94 :
95 : static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
96 : u64 size,
97 : unsigned alignment,
98 : unsigned long color,
99 : enum drm_mm_search_flags flags);
100 : static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
101 : u64 size,
102 : unsigned alignment,
103 : unsigned long color,
104 : u64 start,
105 : u64 end,
106 : enum drm_mm_search_flags flags);
107 :
108 0 : static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
109 : struct drm_mm_node *node,
110 : u64 size, unsigned alignment,
111 : unsigned long color,
112 : enum drm_mm_allocator_flags flags)
113 : {
114 0 : struct drm_mm *mm = hole_node->mm;
115 0 : u64 hole_start = drm_mm_hole_node_start(hole_node);
116 0 : u64 hole_end = drm_mm_hole_node_end(hole_node);
117 0 : u64 adj_start = hole_start;
118 0 : u64 adj_end = hole_end;
119 :
120 0 : BUG_ON(node->allocated);
121 :
122 0 : if (mm->color_adjust)
123 0 : mm->color_adjust(hole_node, color, &adj_start, &adj_end);
124 :
125 0 : if (flags & DRM_MM_CREATE_TOP)
126 0 : adj_start = adj_end - size;
127 :
128 0 : if (alignment) {
129 0 : u64 tmp = adj_start;
130 : unsigned rem;
131 :
132 0 : rem = do_div(tmp, alignment);
133 0 : if (rem) {
134 0 : if (flags & DRM_MM_CREATE_TOP)
135 0 : adj_start -= rem;
136 : else
137 0 : adj_start += alignment - rem;
138 : }
139 0 : }
140 :
141 0 : BUG_ON(adj_start < hole_start);
142 0 : BUG_ON(adj_end > hole_end);
143 :
144 0 : if (adj_start == hole_start) {
145 0 : hole_node->hole_follows = 0;
146 0 : list_del(&hole_node->hole_stack);
147 0 : }
148 :
149 0 : node->start = adj_start;
150 0 : node->size = size;
151 0 : node->mm = mm;
152 0 : node->color = color;
153 0 : node->allocated = 1;
154 :
155 0 : INIT_LIST_HEAD(&node->hole_stack);
156 0 : list_add(&node->node_list, &hole_node->node_list);
157 :
158 0 : BUG_ON(node->start + node->size > adj_end);
159 :
160 0 : node->hole_follows = 0;
161 0 : if (__drm_mm_hole_node_start(node) < hole_end) {
162 0 : list_add(&node->hole_stack, &mm->hole_stack);
163 0 : node->hole_follows = 1;
164 0 : }
165 0 : }
166 :
167 : /**
168 : * drm_mm_reserve_node - insert an pre-initialized node
169 : * @mm: drm_mm allocator to insert @node into
170 : * @node: drm_mm_node to insert
171 : *
172 : * This functions inserts an already set-up drm_mm_node into the allocator,
173 : * meaning that start, size and color must be set by the caller. This is useful
174 : * to initialize the allocator with preallocated objects which must be set-up
175 : * before the range allocator can be set-up, e.g. when taking over a firmware
176 : * framebuffer.
177 : *
178 : * Returns:
179 : * 0 on success, -ENOSPC if there's no hole where @node is.
180 : */
181 0 : int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
182 : {
183 : struct drm_mm_node *hole;
184 : u64 end;
185 : u64 hole_start;
186 : u64 hole_end;
187 :
188 0 : BUG_ON(node == NULL);
189 :
190 0 : end = node->start + node->size;
191 :
192 : /* Find the relevant hole to add our node to */
193 0 : drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
194 0 : if (hole_start > node->start || hole_end < end)
195 : continue;
196 :
197 0 : node->mm = mm;
198 0 : node->allocated = 1;
199 :
200 0 : INIT_LIST_HEAD(&node->hole_stack);
201 0 : list_add(&node->node_list, &hole->node_list);
202 :
203 0 : if (node->start == hole_start) {
204 0 : hole->hole_follows = 0;
205 0 : list_del_init(&hole->hole_stack);
206 0 : }
207 :
208 0 : node->hole_follows = 0;
209 0 : if (end != hole_end) {
210 0 : list_add(&node->hole_stack, &mm->hole_stack);
211 0 : node->hole_follows = 1;
212 0 : }
213 :
214 0 : return 0;
215 : }
216 :
217 0 : return -ENOSPC;
218 0 : }
219 : EXPORT_SYMBOL(drm_mm_reserve_node);
220 :
221 : /**
222 : * drm_mm_insert_node_generic - search for space and insert @node
223 : * @mm: drm_mm to allocate from
224 : * @node: preallocate node to insert
225 : * @size: size of the allocation
226 : * @alignment: alignment of the allocation
227 : * @color: opaque tag value to use for this node
228 : * @sflags: flags to fine-tune the allocation search
229 : * @aflags: flags to fine-tune the allocation behavior
230 : *
231 : * The preallocated node must be cleared to 0.
232 : *
233 : * Returns:
234 : * 0 on success, -ENOSPC if there's no suitable hole.
235 : */
236 0 : int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
237 : u64 size, unsigned alignment,
238 : unsigned long color,
239 : enum drm_mm_search_flags sflags,
240 : enum drm_mm_allocator_flags aflags)
241 : {
242 : struct drm_mm_node *hole_node;
243 :
244 0 : hole_node = drm_mm_search_free_generic(mm, size, alignment,
245 : color, sflags);
246 0 : if (!hole_node)
247 0 : return -ENOSPC;
248 :
249 0 : drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
250 0 : return 0;
251 0 : }
252 : EXPORT_SYMBOL(drm_mm_insert_node_generic);
253 :
254 0 : static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
255 : struct drm_mm_node *node,
256 : u64 size, unsigned alignment,
257 : unsigned long color,
258 : u64 start, u64 end,
259 : enum drm_mm_allocator_flags flags)
260 : {
261 0 : struct drm_mm *mm = hole_node->mm;
262 0 : u64 hole_start = drm_mm_hole_node_start(hole_node);
263 0 : u64 hole_end = drm_mm_hole_node_end(hole_node);
264 0 : u64 adj_start = hole_start;
265 0 : u64 adj_end = hole_end;
266 :
267 0 : BUG_ON(!hole_node->hole_follows || node->allocated);
268 :
269 0 : if (adj_start < start)
270 0 : adj_start = start;
271 0 : if (adj_end > end)
272 0 : adj_end = end;
273 :
274 0 : if (mm->color_adjust)
275 0 : mm->color_adjust(hole_node, color, &adj_start, &adj_end);
276 :
277 0 : if (flags & DRM_MM_CREATE_TOP)
278 0 : adj_start = adj_end - size;
279 :
280 0 : if (alignment) {
281 0 : u64 tmp = adj_start;
282 : unsigned rem;
283 :
284 0 : rem = do_div(tmp, alignment);
285 0 : if (rem) {
286 0 : if (flags & DRM_MM_CREATE_TOP)
287 0 : adj_start -= rem;
288 : else
289 0 : adj_start += alignment - rem;
290 : }
291 0 : }
292 :
293 0 : if (adj_start == hole_start) {
294 0 : hole_node->hole_follows = 0;
295 0 : list_del(&hole_node->hole_stack);
296 0 : }
297 :
298 0 : node->start = adj_start;
299 0 : node->size = size;
300 0 : node->mm = mm;
301 0 : node->color = color;
302 0 : node->allocated = 1;
303 :
304 0 : INIT_LIST_HEAD(&node->hole_stack);
305 0 : list_add(&node->node_list, &hole_node->node_list);
306 :
307 0 : BUG_ON(node->start < start);
308 0 : BUG_ON(node->start < adj_start);
309 0 : BUG_ON(node->start + node->size > adj_end);
310 0 : BUG_ON(node->start + node->size > end);
311 :
312 0 : node->hole_follows = 0;
313 0 : if (__drm_mm_hole_node_start(node) < hole_end) {
314 0 : list_add(&node->hole_stack, &mm->hole_stack);
315 0 : node->hole_follows = 1;
316 0 : }
317 0 : }
318 :
319 : /**
320 : * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
321 : * @mm: drm_mm to allocate from
322 : * @node: preallocate node to insert
323 : * @size: size of the allocation
324 : * @alignment: alignment of the allocation
325 : * @color: opaque tag value to use for this node
326 : * @start: start of the allowed range for this node
327 : * @end: end of the allowed range for this node
328 : * @sflags: flags to fine-tune the allocation search
329 : * @aflags: flags to fine-tune the allocation behavior
330 : *
331 : * The preallocated node must be cleared to 0.
332 : *
333 : * Returns:
334 : * 0 on success, -ENOSPC if there's no suitable hole.
335 : */
336 0 : int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
337 : u64 size, unsigned alignment,
338 : unsigned long color,
339 : u64 start, u64 end,
340 : enum drm_mm_search_flags sflags,
341 : enum drm_mm_allocator_flags aflags)
342 : {
343 : struct drm_mm_node *hole_node;
344 :
345 0 : hole_node = drm_mm_search_free_in_range_generic(mm,
346 : size, alignment, color,
347 : start, end, sflags);
348 0 : if (!hole_node)
349 0 : return -ENOSPC;
350 :
351 0 : drm_mm_insert_helper_range(hole_node, node,
352 : size, alignment, color,
353 : start, end, aflags);
354 0 : return 0;
355 0 : }
356 : EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
357 :
358 : /**
359 : * drm_mm_remove_node - Remove a memory node from the allocator.
360 : * @node: drm_mm_node to remove
361 : *
362 : * This just removes a node from its drm_mm allocator. The node does not need to
363 : * be cleared again before it can be re-inserted into this or any other drm_mm
364 : * allocator. It is a bug to call this function on a un-allocated node.
365 : */
366 0 : void drm_mm_remove_node(struct drm_mm_node *node)
367 : {
368 0 : struct drm_mm *mm = node->mm;
369 : struct drm_mm_node *prev_node;
370 :
371 0 : if (WARN_ON(!node->allocated))
372 0 : return;
373 :
374 0 : BUG_ON(node->scanned_block || node->scanned_prev_free
375 : || node->scanned_next_free);
376 :
377 : prev_node =
378 0 : list_entry(node->node_list.prev, struct drm_mm_node, node_list);
379 :
380 0 : if (node->hole_follows) {
381 0 : BUG_ON(__drm_mm_hole_node_start(node) ==
382 : __drm_mm_hole_node_end(node));
383 0 : list_del(&node->hole_stack);
384 0 : } else
385 0 : BUG_ON(__drm_mm_hole_node_start(node) !=
386 : __drm_mm_hole_node_end(node));
387 :
388 :
389 0 : if (!prev_node->hole_follows) {
390 0 : prev_node->hole_follows = 1;
391 0 : list_add(&prev_node->hole_stack, &mm->hole_stack);
392 0 : } else
393 0 : list_move(&prev_node->hole_stack, &mm->hole_stack);
394 :
395 0 : list_del(&node->node_list);
396 0 : node->allocated = 0;
397 0 : }
398 : EXPORT_SYMBOL(drm_mm_remove_node);
399 :
400 0 : static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
401 : {
402 0 : if (end - start < size)
403 0 : return 0;
404 :
405 0 : if (alignment) {
406 : u64 tmp = start;
407 : unsigned rem;
408 :
409 0 : rem = do_div(tmp, alignment);
410 0 : if (rem)
411 0 : start += alignment - rem;
412 0 : }
413 :
414 0 : return end >= start + size;
415 0 : }
416 :
417 0 : static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
418 : u64 size,
419 : unsigned alignment,
420 : unsigned long color,
421 : enum drm_mm_search_flags flags)
422 : {
423 : struct drm_mm_node *entry;
424 : struct drm_mm_node *best;
425 0 : u64 adj_start;
426 0 : u64 adj_end;
427 : u64 best_size;
428 :
429 0 : BUG_ON(mm->scanned_blocks);
430 :
431 : best = NULL;
432 : best_size = ~0UL;
433 :
434 0 : __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
435 : flags & DRM_MM_SEARCH_BELOW) {
436 0 : u64 hole_size = adj_end - adj_start;
437 :
438 0 : if (mm->color_adjust) {
439 0 : mm->color_adjust(entry, color, &adj_start, &adj_end);
440 0 : if (adj_end <= adj_start)
441 0 : continue;
442 : }
443 :
444 0 : if (!check_free_hole(adj_start, adj_end, size, alignment))
445 0 : continue;
446 :
447 0 : if (!(flags & DRM_MM_SEARCH_BEST))
448 0 : return entry;
449 :
450 0 : if (hole_size < best_size) {
451 : best = entry;
452 : best_size = hole_size;
453 0 : }
454 0 : }
455 :
456 0 : return best;
457 0 : }
458 :
459 0 : static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
460 : u64 size,
461 : unsigned alignment,
462 : unsigned long color,
463 : u64 start,
464 : u64 end,
465 : enum drm_mm_search_flags flags)
466 : {
467 : struct drm_mm_node *entry;
468 : struct drm_mm_node *best;
469 0 : u64 adj_start;
470 0 : u64 adj_end;
471 : u64 best_size;
472 :
473 0 : BUG_ON(mm->scanned_blocks);
474 :
475 : best = NULL;
476 : best_size = ~0UL;
477 :
478 0 : __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
479 : flags & DRM_MM_SEARCH_BELOW) {
480 0 : u64 hole_size = adj_end - adj_start;
481 :
482 0 : if (adj_start < start)
483 0 : adj_start = start;
484 0 : if (adj_end > end)
485 0 : adj_end = end;
486 :
487 0 : if (mm->color_adjust) {
488 0 : mm->color_adjust(entry, color, &adj_start, &adj_end);
489 0 : if (adj_end <= adj_start)
490 0 : continue;
491 : }
492 :
493 0 : if (!check_free_hole(adj_start, adj_end, size, alignment))
494 0 : continue;
495 :
496 0 : if (!(flags & DRM_MM_SEARCH_BEST))
497 0 : return entry;
498 :
499 0 : if (hole_size < best_size) {
500 : best = entry;
501 : best_size = hole_size;
502 0 : }
503 0 : }
504 :
505 0 : return best;
506 0 : }
507 :
508 : /**
509 : * drm_mm_replace_node - move an allocation from @old to @new
510 : * @old: drm_mm_node to remove from the allocator
511 : * @new: drm_mm_node which should inherit @old's allocation
512 : *
513 : * This is useful for when drivers embed the drm_mm_node structure and hence
514 : * can't move allocations by reassigning pointers. It's a combination of remove
515 : * and insert with the guarantee that the allocation start will match.
516 : */
517 0 : void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
518 : {
519 0 : list_replace(&old->node_list, &new->node_list);
520 0 : list_replace(&old->hole_stack, &new->hole_stack);
521 0 : new->hole_follows = old->hole_follows;
522 0 : new->mm = old->mm;
523 0 : new->start = old->start;
524 0 : new->size = old->size;
525 0 : new->color = old->color;
526 :
527 0 : old->allocated = 0;
528 0 : new->allocated = 1;
529 0 : }
530 : EXPORT_SYMBOL(drm_mm_replace_node);
531 :
532 : /**
533 : * DOC: lru scan roaster
534 : *
535 : * Very often GPUs need to have continuous allocations for a given object. When
536 : * evicting objects to make space for a new one it is therefore not most
537 : * efficient when we simply start to select all objects from the tail of an LRU
538 : * until there's a suitable hole: Especially for big objects or nodes that
539 : * otherwise have special allocation constraints there's a good chance we evict
540 : * lots of (smaller) objects unecessarily.
541 : *
542 : * The DRM range allocator supports this use-case through the scanning
543 : * interfaces. First a scan operation needs to be initialized with
544 : * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
545 : * objects to the roaster (probably by walking an LRU list, but this can be
546 : * freely implemented) until a suitable hole is found or there's no further
547 : * evitable object.
548 : *
549 : * The the driver must walk through all objects again in exactly the reverse
550 : * order to restore the allocator state. Note that while the allocator is used
551 : * in the scan mode no other operation is allowed.
552 : *
553 : * Finally the driver evicts all objects selected in the scan. Adding and
554 : * removing an object is O(1), and since freeing a node is also O(1) the overall
555 : * complexity is O(scanned_objects). So like the free stack which needs to be
556 : * walked before a scan operation even begins this is linear in the number of
557 : * objects. It doesn't seem to hurt badly.
558 : */
559 :
560 : /**
561 : * drm_mm_init_scan - initialize lru scanning
562 : * @mm: drm_mm to scan
563 : * @size: size of the allocation
564 : * @alignment: alignment of the allocation
565 : * @color: opaque tag value to use for the allocation
566 : *
567 : * This simply sets up the scanning routines with the parameters for the desired
568 : * hole. Note that there's no need to specify allocation flags, since they only
569 : * change the place a node is allocated from within a suitable hole.
570 : *
571 : * Warning:
572 : * As long as the scan list is non-empty, no other operations than
573 : * adding/removing nodes to/from the scan list are allowed.
574 : */
575 0 : void drm_mm_init_scan(struct drm_mm *mm,
576 : u64 size,
577 : unsigned alignment,
578 : unsigned long color)
579 : {
580 0 : mm->scan_color = color;
581 0 : mm->scan_alignment = alignment;
582 0 : mm->scan_size = size;
583 0 : mm->scanned_blocks = 0;
584 0 : mm->scan_hit_start = 0;
585 0 : mm->scan_hit_end = 0;
586 0 : mm->scan_check_range = 0;
587 0 : mm->prev_scanned_node = NULL;
588 0 : }
589 : EXPORT_SYMBOL(drm_mm_init_scan);
590 :
591 : /**
592 : * drm_mm_init_scan - initialize range-restricted lru scanning
593 : * @mm: drm_mm to scan
594 : * @size: size of the allocation
595 : * @alignment: alignment of the allocation
596 : * @color: opaque tag value to use for the allocation
597 : * @start: start of the allowed range for the allocation
598 : * @end: end of the allowed range for the allocation
599 : *
600 : * This simply sets up the scanning routines with the parameters for the desired
601 : * hole. Note that there's no need to specify allocation flags, since they only
602 : * change the place a node is allocated from within a suitable hole.
603 : *
604 : * Warning:
605 : * As long as the scan list is non-empty, no other operations than
606 : * adding/removing nodes to/from the scan list are allowed.
607 : */
608 0 : void drm_mm_init_scan_with_range(struct drm_mm *mm,
609 : u64 size,
610 : unsigned alignment,
611 : unsigned long color,
612 : u64 start,
613 : u64 end)
614 : {
615 0 : mm->scan_color = color;
616 0 : mm->scan_alignment = alignment;
617 0 : mm->scan_size = size;
618 0 : mm->scanned_blocks = 0;
619 0 : mm->scan_hit_start = 0;
620 0 : mm->scan_hit_end = 0;
621 0 : mm->scan_start = start;
622 0 : mm->scan_end = end;
623 0 : mm->scan_check_range = 1;
624 0 : mm->prev_scanned_node = NULL;
625 0 : }
626 : EXPORT_SYMBOL(drm_mm_init_scan_with_range);
627 :
628 : /**
629 : * drm_mm_scan_add_block - add a node to the scan list
630 : * @node: drm_mm_node to add
631 : *
632 : * Add a node to the scan list that might be freed to make space for the desired
633 : * hole.
634 : *
635 : * Returns:
636 : * True if a hole has been found, false otherwise.
637 : */
638 0 : bool drm_mm_scan_add_block(struct drm_mm_node *node)
639 : {
640 0 : struct drm_mm *mm = node->mm;
641 : struct drm_mm_node *prev_node;
642 : u64 hole_start, hole_end;
643 0 : u64 adj_start, adj_end;
644 :
645 0 : mm->scanned_blocks++;
646 :
647 0 : BUG_ON(node->scanned_block);
648 0 : node->scanned_block = 1;
649 :
650 0 : prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
651 : node_list);
652 :
653 0 : node->scanned_preceeds_hole = prev_node->hole_follows;
654 0 : prev_node->hole_follows = 1;
655 0 : list_del(&node->node_list);
656 0 : node->node_list.prev = &prev_node->node_list;
657 0 : node->node_list.next = &mm->prev_scanned_node->node_list;
658 0 : mm->prev_scanned_node = node;
659 :
660 0 : adj_start = hole_start = drm_mm_hole_node_start(prev_node);
661 0 : adj_end = hole_end = drm_mm_hole_node_end(prev_node);
662 :
663 0 : if (mm->scan_check_range) {
664 0 : if (adj_start < mm->scan_start)
665 0 : adj_start = mm->scan_start;
666 0 : if (adj_end > mm->scan_end)
667 0 : adj_end = mm->scan_end;
668 : }
669 :
670 0 : if (mm->color_adjust)
671 0 : mm->color_adjust(prev_node, mm->scan_color,
672 : &adj_start, &adj_end);
673 :
674 0 : if (check_free_hole(adj_start, adj_end,
675 0 : mm->scan_size, mm->scan_alignment)) {
676 0 : mm->scan_hit_start = hole_start;
677 0 : mm->scan_hit_end = hole_end;
678 0 : return true;
679 : }
680 :
681 0 : return false;
682 0 : }
683 : EXPORT_SYMBOL(drm_mm_scan_add_block);
684 :
685 : /**
686 : * drm_mm_scan_remove_block - remove a node from the scan list
687 : * @node: drm_mm_node to remove
688 : *
689 : * Nodes _must_ be removed in the exact same order from the scan list as they
690 : * have been added, otherwise the internal state of the memory manager will be
691 : * corrupted.
692 : *
693 : * When the scan list is empty, the selected memory nodes can be freed. An
694 : * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
695 : * return the just freed block (because its at the top of the free_stack list).
696 : *
697 : * Returns:
698 : * True if this block should be evicted, false otherwise. Will always
699 : * return false when no hole has been found.
700 : */
701 0 : bool drm_mm_scan_remove_block(struct drm_mm_node *node)
702 : {
703 0 : struct drm_mm *mm = node->mm;
704 : struct drm_mm_node *prev_node;
705 :
706 0 : mm->scanned_blocks--;
707 :
708 0 : BUG_ON(!node->scanned_block);
709 0 : node->scanned_block = 0;
710 :
711 0 : prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
712 : node_list);
713 :
714 0 : prev_node->hole_follows = node->scanned_preceeds_hole;
715 0 : list_add(&node->node_list, &prev_node->node_list);
716 :
717 0 : return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
718 0 : node->start < mm->scan_hit_end);
719 : }
720 : EXPORT_SYMBOL(drm_mm_scan_remove_block);
721 :
722 : /**
723 : * drm_mm_clean - checks whether an allocator is clean
724 : * @mm: drm_mm allocator to check
725 : *
726 : * Returns:
727 : * True if the allocator is completely free, false if there's still a node
728 : * allocated in it.
729 : */
730 0 : bool drm_mm_clean(struct drm_mm * mm)
731 : {
732 0 : struct list_head *head = &mm->head_node.node_list;
733 :
734 0 : return (head->next->next == head);
735 : }
736 : EXPORT_SYMBOL(drm_mm_clean);
737 :
738 : /**
739 : * drm_mm_init - initialize a drm-mm allocator
740 : * @mm: the drm_mm structure to initialize
741 : * @start: start of the range managed by @mm
742 : * @size: end of the range managed by @mm
743 : *
744 : * Note that @mm must be cleared to 0 before calling this function.
745 : */
746 0 : void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
747 : {
748 0 : INIT_LIST_HEAD(&mm->hole_stack);
749 0 : mm->scanned_blocks = 0;
750 :
751 : /* Clever trick to avoid a special case in the free hole tracking. */
752 0 : INIT_LIST_HEAD(&mm->head_node.node_list);
753 0 : INIT_LIST_HEAD(&mm->head_node.hole_stack);
754 0 : mm->head_node.hole_follows = 1;
755 0 : mm->head_node.scanned_block = 0;
756 0 : mm->head_node.scanned_prev_free = 0;
757 0 : mm->head_node.scanned_next_free = 0;
758 0 : mm->head_node.mm = mm;
759 0 : mm->head_node.start = start + size;
760 0 : mm->head_node.size = start - mm->head_node.start;
761 0 : list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
762 :
763 0 : mm->color_adjust = NULL;
764 0 : }
765 : EXPORT_SYMBOL(drm_mm_init);
766 :
767 : /**
768 : * drm_mm_takedown - clean up a drm_mm allocator
769 : * @mm: drm_mm allocator to clean up
770 : *
771 : * Note that it is a bug to call this function on an allocator which is not
772 : * clean.
773 : */
774 0 : void drm_mm_takedown(struct drm_mm * mm)
775 : {
776 0 : WARN(!list_empty(&mm->head_node.node_list),
777 : "Memory manager not clean during takedown.\n");
778 0 : }
779 : EXPORT_SYMBOL(drm_mm_takedown);
780 :
781 0 : static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
782 : const char *prefix)
783 : {
784 : u64 hole_start, hole_end, hole_size;
785 :
786 0 : if (entry->hole_follows) {
787 0 : hole_start = drm_mm_hole_node_start(entry);
788 0 : hole_end = drm_mm_hole_node_end(entry);
789 0 : hole_size = hole_end - hole_start;
790 : pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
791 : hole_end, hole_size);
792 0 : return hole_size;
793 : }
794 :
795 0 : return 0;
796 0 : }
797 :
798 : /**
799 : * drm_mm_debug_table - dump allocator state to dmesg
800 : * @mm: drm_mm allocator to dump
801 : * @prefix: prefix to use for dumping to dmesg
802 : */
803 0 : void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
804 : {
805 : struct drm_mm_node *entry;
806 : u64 total_used = 0, total_free = 0, total = 0;
807 :
808 0 : total_free += drm_mm_debug_hole(&mm->head_node, prefix);
809 :
810 0 : drm_mm_for_each_node(entry, mm) {
811 : pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
812 : entry->start + entry->size, entry->size);
813 0 : total_used += entry->size;
814 0 : total_free += drm_mm_debug_hole(entry, prefix);
815 : }
816 : total = total_free + total_used;
817 :
818 : pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
819 : total_used, total_free);
820 0 : }
821 : EXPORT_SYMBOL(drm_mm_debug_table);
822 :
823 : #if defined(CONFIG_DEBUG_FS)
824 : static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
825 : {
826 : u64 hole_start, hole_end, hole_size;
827 :
828 : if (entry->hole_follows) {
829 : hole_start = drm_mm_hole_node_start(entry);
830 : hole_end = drm_mm_hole_node_end(entry);
831 : hole_size = hole_end - hole_start;
832 : seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
833 : hole_end, hole_size);
834 : return hole_size;
835 : }
836 :
837 : return 0;
838 : }
839 :
840 : /**
841 : * drm_mm_dump_table - dump allocator state to a seq_file
842 : * @m: seq_file to dump to
843 : * @mm: drm_mm allocator to dump
844 : */
845 : int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
846 : {
847 : struct drm_mm_node *entry;
848 : u64 total_used = 0, total_free = 0, total = 0;
849 :
850 : total_free += drm_mm_dump_hole(m, &mm->head_node);
851 :
852 : drm_mm_for_each_node(entry, mm) {
853 : seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
854 : entry->start + entry->size, entry->size);
855 : total_used += entry->size;
856 : total_free += drm_mm_dump_hole(m, entry);
857 : }
858 : total = total_free + total_used;
859 :
860 : seq_printf(m, "total: %llu, used %llu free %llu\n", total,
861 : total_used, total_free);
862 : return 0;
863 : }
864 : EXPORT_SYMBOL(drm_mm_dump_table);
865 : #endif
|