Line data Source code
1 : /**************************************************************************
2 : *
3 : * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4 : * All Rights Reserved.
5 : *
6 : * Permission is hereby granted, free of charge, to any person obtaining a
7 : * copy of this software and associated documentation files (the
8 : * "Software"), to deal in the Software without restriction, including
9 : * without limitation the rights to use, copy, modify, merge, publish,
10 : * distribute, sub license, and/or sell copies of the Software, and to
11 : * permit persons to whom the Software is furnished to do so, subject to
12 : * the following conditions:
13 : *
14 : * The above copyright notice and this permission notice (including the
15 : * next paragraph) shall be included in all copies or substantial portions
16 : * of the Software.
17 : *
18 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 : * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 : * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 : * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 : * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 : * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 : *
26 : **************************************************************************/
27 : /*
28 : * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 : */
30 : #ifndef _TTM_BO_DRIVER_H_
31 : #define _TTM_BO_DRIVER_H_
32 :
33 : #include <sys/task.h>
34 : #include <dev/pci/drm/ttm/ttm_bo_api.h>
35 : #include <dev/pci/drm/ttm/ttm_memory.h>
36 : #include <dev/pci/drm/ttm/ttm_module.h>
37 : #include <dev/pci/drm/ttm/ttm_placement.h>
38 : #include <dev/pci/drm/drm_mm.h>
39 : #include <dev/pci/drm/drm_global.h>
40 : #include <dev/pci/drm/drm_vma_manager.h>
41 :
42 : struct ttm_backend_func {
43 : /**
44 : * struct ttm_backend_func member bind
45 : *
46 : * @ttm: Pointer to a struct ttm_tt.
47 : * @bo_mem: Pointer to a struct ttm_mem_reg describing the
48 : * memory type and location for binding.
49 : *
50 : * Bind the backend pages into the aperture in the location
51 : * indicated by @bo_mem. This function should be able to handle
52 : * differences between aperture and system page sizes.
53 : */
54 : int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
55 :
56 : /**
57 : * struct ttm_backend_func member unbind
58 : *
59 : * @ttm: Pointer to a struct ttm_tt.
60 : *
61 : * Unbind previously bound backend pages. This function should be
62 : * able to handle differences between aperture and system page sizes.
63 : */
64 : int (*unbind) (struct ttm_tt *ttm);
65 :
66 : /**
67 : * struct ttm_backend_func member destroy
68 : *
69 : * @ttm: Pointer to a struct ttm_tt.
70 : *
71 : * Destroy the backend. This will be call back from ttm_tt_destroy so
72 : * don't call ttm_tt_destroy from the callback or infinite loop.
73 : */
74 : void (*destroy) (struct ttm_tt *ttm);
75 : };
76 :
77 : #define TTM_PAGE_FLAG_WRITE (1 << 3)
78 : #define TTM_PAGE_FLAG_SWAPPED (1 << 4)
79 : #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
80 : #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
81 : #define TTM_PAGE_FLAG_DMA32 (1 << 7)
82 : #define TTM_PAGE_FLAG_SG (1 << 8)
83 :
84 : enum ttm_caching_state {
85 : tt_uncached,
86 : tt_wc,
87 : tt_cached
88 : };
89 :
90 : /**
91 : * struct ttm_tt
92 : *
93 : * @bdev: Pointer to a struct ttm_bo_device.
94 : * @func: Pointer to a struct ttm_backend_func that describes
95 : * the backend methods.
96 : * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
97 : * pointer.
98 : * @pages: Array of pages backing the data.
99 : * @num_pages: Number of pages in the page array.
100 : * @bdev: Pointer to the current struct ttm_bo_device.
101 : * @be: Pointer to the ttm backend.
102 : * @swap_storage: Pointer to shmem struct file for swap storage.
103 : * @caching_state: The current caching state of the pages.
104 : * @state: The current binding state of the pages.
105 : *
106 : * This is a structure holding the pages, caching- and aperture binding
107 : * status for a buffer object that isn't backed by fixed (VRAM / AGP)
108 : * memory.
109 : */
110 :
111 : struct ttm_tt {
112 : struct ttm_bo_device *bdev;
113 : struct ttm_backend_func *func;
114 : struct vm_page *dummy_read_page;
115 : struct vm_page **pages;
116 : uint32_t page_flags;
117 : unsigned long num_pages;
118 : struct sg_table *sg; /* for SG objects via dma-buf */
119 : struct ttm_bo_global *glob;
120 : struct uvm_object *swap_storage;
121 : enum ttm_caching_state caching_state;
122 : enum {
123 : tt_bound,
124 : tt_unbound,
125 : tt_unpopulated,
126 : } state;
127 : };
128 :
129 : /**
130 : * struct ttm_dma_tt
131 : *
132 : * @ttm: Base ttm_tt struct.
133 : * @cpu_address: The CPU address of the pages
134 : * @dma_address: The DMA (bus) addresses of the pages
135 : * @pages_list: used by some page allocation backend
136 : *
137 : * This is a structure holding the pages, caching- and aperture binding
138 : * status for a buffer object that isn't backed by fixed (VRAM / AGP)
139 : * memory.
140 : */
141 : struct ttm_dma_tt {
142 : struct ttm_tt ttm;
143 : void **cpu_address;
144 : dma_addr_t *dma_address;
145 : struct list_head pages_list;
146 : };
147 :
148 : #define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
149 : #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
150 : #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
151 :
152 : struct ttm_mem_type_manager;
153 :
154 : struct ttm_mem_type_manager_func {
155 : /**
156 : * struct ttm_mem_type_manager member init
157 : *
158 : * @man: Pointer to a memory type manager.
159 : * @p_size: Implementation dependent, but typically the size of the
160 : * range to be managed in pages.
161 : *
162 : * Called to initialize a private range manager. The function is
163 : * expected to initialize the man::priv member.
164 : * Returns 0 on success, negative error code on failure.
165 : */
166 : int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
167 :
168 : /**
169 : * struct ttm_mem_type_manager member takedown
170 : *
171 : * @man: Pointer to a memory type manager.
172 : *
173 : * Called to undo the setup done in init. All allocated resources
174 : * should be freed.
175 : */
176 : int (*takedown)(struct ttm_mem_type_manager *man);
177 :
178 : /**
179 : * struct ttm_mem_type_manager member get_node
180 : *
181 : * @man: Pointer to a memory type manager.
182 : * @bo: Pointer to the buffer object we're allocating space for.
183 : * @placement: Placement details.
184 : * @flags: Additional placement flags.
185 : * @mem: Pointer to a struct ttm_mem_reg to be filled in.
186 : *
187 : * This function should allocate space in the memory type managed
188 : * by @man. Placement details if
189 : * applicable are given by @placement. If successful,
190 : * @mem::mm_node should be set to a non-null value, and
191 : * @mem::start should be set to a value identifying the beginning
192 : * of the range allocated, and the function should return zero.
193 : * If the memory region accommodate the buffer object, @mem::mm_node
194 : * should be set to NULL, and the function should return 0.
195 : * If a system error occurred, preventing the request to be fulfilled,
196 : * the function should return a negative error code.
197 : *
198 : * Note that @mem::mm_node will only be dereferenced by
199 : * struct ttm_mem_type_manager functions and optionally by the driver,
200 : * which has knowledge of the underlying type.
201 : *
202 : * This function may not be called from within atomic context, so
203 : * an implementation can and must use either a mutex or a spinlock to
204 : * protect any data structures managing the space.
205 : */
206 : int (*get_node)(struct ttm_mem_type_manager *man,
207 : struct ttm_buffer_object *bo,
208 : const struct ttm_place *place,
209 : struct ttm_mem_reg *mem);
210 :
211 : /**
212 : * struct ttm_mem_type_manager member put_node
213 : *
214 : * @man: Pointer to a memory type manager.
215 : * @mem: Pointer to a struct ttm_mem_reg to be filled in.
216 : *
217 : * This function frees memory type resources previously allocated
218 : * and that are identified by @mem::mm_node and @mem::start. May not
219 : * be called from within atomic context.
220 : */
221 : void (*put_node)(struct ttm_mem_type_manager *man,
222 : struct ttm_mem_reg *mem);
223 :
224 : /**
225 : * struct ttm_mem_type_manager member debug
226 : *
227 : * @man: Pointer to a memory type manager.
228 : * @prefix: Prefix to be used in printout to identify the caller.
229 : *
230 : * This function is called to print out the state of the memory
231 : * type manager to aid debugging of out-of-memory conditions.
232 : * It may not be called from within atomic context.
233 : */
234 : void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
235 : };
236 :
237 : /**
238 : * struct ttm_mem_type_manager
239 : *
240 : * @has_type: The memory type has been initialized.
241 : * @use_type: The memory type is enabled.
242 : * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
243 : * managed by this memory type.
244 : * @gpu_offset: If used, the GPU offset of the first managed page of
245 : * fixed memory or the first managed location in an aperture.
246 : * @size: Size of the managed region.
247 : * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
248 : * as defined in ttm_placement_common.h
249 : * @default_caching: The default caching policy used for a buffer object
250 : * placed in this memory type if the user doesn't provide one.
251 : * @func: structure pointer implementing the range manager. See above
252 : * @priv: Driver private closure for @func.
253 : * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
254 : * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
255 : * reserved by the TTM vm system.
256 : * @io_reserve_lru: Optional lru list for unreserving io mem regions.
257 : * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
258 : * static information. bdev::driver::io_mem_free is never used.
259 : * @lru: The lru list for this memory type.
260 : *
261 : * This structure is used to identify and manage memory types for a device.
262 : * It's set up by the ttm_bo_driver::init_mem_type method.
263 : */
264 :
265 :
266 :
267 : struct ttm_mem_type_manager {
268 : struct ttm_bo_device *bdev;
269 :
270 : /*
271 : * No protection. Constant from start.
272 : */
273 :
274 : bool has_type;
275 : bool use_type;
276 : uint32_t flags;
277 : uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
278 : uint64_t size;
279 : uint32_t available_caching;
280 : uint32_t default_caching;
281 : const struct ttm_mem_type_manager_func *func;
282 : void *priv;
283 : struct rwlock io_reserve_mutex;
284 : bool use_io_reserve_lru;
285 : bool io_reserve_fastpath;
286 :
287 : /*
288 : * Protected by @io_reserve_mutex:
289 : */
290 :
291 : struct list_head io_reserve_lru;
292 :
293 : /*
294 : * Protected by the global->lru_lock.
295 : */
296 :
297 : struct list_head lru;
298 : };
299 :
300 : /**
301 : * struct ttm_bo_driver
302 : *
303 : * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
304 : * @invalidate_caches: Callback to invalidate read caches when a buffer object
305 : * has been evicted.
306 : * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
307 : * structure.
308 : * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
309 : * @move: Callback for a driver to hook in accelerated functions to
310 : * move a buffer.
311 : * If set to NULL, a potentially slow memcpy() move is used.
312 : */
313 :
314 : struct ttm_bo_driver {
315 : /**
316 : * ttm_tt_create
317 : *
318 : * @bdev: pointer to a struct ttm_bo_device:
319 : * @size: Size of the data needed backing.
320 : * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
321 : * @dummy_read_page: See struct ttm_bo_device.
322 : *
323 : * Create a struct ttm_tt to back data with system memory pages.
324 : * No pages are actually allocated.
325 : * Returns:
326 : * NULL: Out of memory.
327 : */
328 : struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
329 : unsigned long size,
330 : uint32_t page_flags,
331 : struct vm_page *dummy_read_page);
332 :
333 : /**
334 : * ttm_tt_populate
335 : *
336 : * @ttm: The struct ttm_tt to contain the backing pages.
337 : *
338 : * Allocate all backing pages
339 : * Returns:
340 : * -ENOMEM: Out of memory.
341 : */
342 : int (*ttm_tt_populate)(struct ttm_tt *ttm);
343 :
344 : /**
345 : * ttm_tt_unpopulate
346 : *
347 : * @ttm: The struct ttm_tt to contain the backing pages.
348 : *
349 : * Free all backing page
350 : */
351 : void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
352 :
353 : /**
354 : * struct ttm_bo_driver member invalidate_caches
355 : *
356 : * @bdev: the buffer object device.
357 : * @flags: new placement of the rebound buffer object.
358 : *
359 : * A previosly evicted buffer has been rebound in a
360 : * potentially new location. Tell the driver that it might
361 : * consider invalidating read (texture) caches on the next command
362 : * submission as a consequence.
363 : */
364 :
365 : int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
366 : int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
367 : struct ttm_mem_type_manager *man);
368 : /**
369 : * struct ttm_bo_driver member evict_flags:
370 : *
371 : * @bo: the buffer object to be evicted
372 : *
373 : * Return the bo flags for a buffer which is not mapped to the hardware.
374 : * These will be placed in proposed_flags so that when the move is
375 : * finished, they'll end up in bo->mem.flags
376 : */
377 :
378 : void(*evict_flags) (struct ttm_buffer_object *bo,
379 : struct ttm_placement *placement);
380 : /**
381 : * struct ttm_bo_driver member move:
382 : *
383 : * @bo: the buffer to move
384 : * @evict: whether this motion is evicting the buffer from
385 : * the graphics address space
386 : * @interruptible: Use interruptible sleeps if possible when sleeping.
387 : * @no_wait: whether this should give up and return -EBUSY
388 : * if this move would require sleeping
389 : * @new_mem: the new memory region receiving the buffer
390 : *
391 : * Move a buffer between two memory regions.
392 : */
393 : int (*move) (struct ttm_buffer_object *bo,
394 : bool evict, bool interruptible,
395 : bool no_wait_gpu,
396 : struct ttm_mem_reg *new_mem);
397 :
398 : /**
399 : * struct ttm_bo_driver_member verify_access
400 : *
401 : * @bo: Pointer to a buffer object.
402 : * @filp: Pointer to a struct file trying to access the object.
403 : *
404 : * Called from the map / write / read methods to verify that the
405 : * caller is permitted to access the buffer object.
406 : * This member may be set to NULL, which will refuse this kind of
407 : * access for all buffer objects.
408 : * This function should return 0 if access is granted, -EPERM otherwise.
409 : */
410 : int (*verify_access) (struct ttm_buffer_object *bo,
411 : struct file *filp);
412 :
413 : /* hook to notify driver about a driver move so it
414 : * can do tiling things */
415 : void (*move_notify)(struct ttm_buffer_object *bo,
416 : struct ttm_mem_reg *new_mem);
417 : /* notify the driver we are taking a fault on this BO
418 : * and have reserved it */
419 : int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
420 :
421 : /**
422 : * notify the driver that we're about to swap out this bo
423 : */
424 : void (*swap_notify) (struct ttm_buffer_object *bo);
425 :
426 : /**
427 : * Driver callback on when mapping io memory (for bo_move_memcpy
428 : * for instance). TTM will take care to call io_mem_free whenever
429 : * the mapping is not use anymore. io_mem_reserve & io_mem_free
430 : * are balanced.
431 : */
432 : int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
433 : void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
434 : };
435 :
436 : /**
437 : * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
438 : */
439 :
440 : struct ttm_bo_global_ref {
441 : struct drm_global_reference ref;
442 : struct ttm_mem_global *mem_glob;
443 : };
444 :
445 : /**
446 : * struct ttm_bo_global - Buffer object driver global data.
447 : *
448 : * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
449 : * @dummy_read_page: Pointer to a dummy page used for mapping requests
450 : * of unpopulated pages.
451 : * @shrink: A shrink callback object used for buffer object swap.
452 : * @device_list_mutex: Mutex protecting the device list.
453 : * This mutex is held while traversing the device list for pm options.
454 : * @lru_lock: Spinlock protecting the bo subsystem lru lists.
455 : * @device_list: List of buffer object devices.
456 : * @swap_lru: Lru list of buffer objects used for swapping.
457 : */
458 :
459 : struct ttm_bo_global {
460 :
461 : /**
462 : * Constant after init.
463 : */
464 :
465 : struct kobject kobj;
466 : struct ttm_mem_global *mem_glob;
467 : struct vm_page *dummy_read_page;
468 : struct ttm_mem_shrink shrink;
469 : struct rwlock device_list_mutex;
470 : spinlock_t lru_lock;
471 :
472 : /**
473 : * Protected by device_list_mutex.
474 : */
475 : struct list_head device_list;
476 :
477 : /**
478 : * Protected by the lru_lock.
479 : */
480 : struct list_head swap_lru;
481 :
482 : /**
483 : * Internal protection.
484 : */
485 : atomic_t bo_count;
486 : };
487 :
488 :
489 : #define TTM_NUM_MEM_TYPES 8
490 :
491 : #define TTM_BO_PRIV_FLAG_MOVING 0 /* Buffer object is moving and needs
492 : idling before CPU mapping */
493 : #define TTM_BO_PRIV_FLAG_MAX 1
494 : /**
495 : * struct ttm_bo_device - Buffer object driver device-specific data.
496 : *
497 : * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
498 : * @man: An array of mem_type_managers.
499 : * @vma_manager: Address space manager
500 : * lru_lock: Spinlock that protects the buffer+device lru lists and
501 : * ddestroy lists.
502 : * @val_seq: Current validation sequence.
503 : * @dev_mapping: A pointer to the struct address_space representing the
504 : * device address space.
505 : * @wq: Work queue structure for the delayed delete workqueue.
506 : *
507 : */
508 :
509 : struct ttm_bo_device {
510 :
511 : /*
512 : * Constant after bo device init / atomic.
513 : */
514 : struct list_head device_list;
515 : struct ttm_bo_global *glob;
516 : struct ttm_bo_driver *driver;
517 : struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
518 :
519 : bus_space_tag_t iot;
520 : bus_space_tag_t memt;
521 : bus_dma_tag_t dmat;
522 :
523 : /*
524 : * Protected by internal locks.
525 : */
526 : struct drm_vma_offset_manager vma_manager;
527 :
528 : /*
529 : * Protected by the global:lru lock.
530 : */
531 : struct list_head ddestroy;
532 : uint32_t val_seq;
533 :
534 : /*
535 : * Protected by load / firstopen / lastclose /unload sync.
536 : */
537 :
538 : struct address_space *dev_mapping;
539 :
540 : /*
541 : * Internal protection.
542 : */
543 :
544 : struct delayed_work wq;
545 :
546 : bool need_dma32;
547 : };
548 :
549 : /**
550 : * ttm_flag_masked
551 : *
552 : * @old: Pointer to the result and original value.
553 : * @new: New value of bits.
554 : * @mask: Mask of bits to change.
555 : *
556 : * Convenience function to change a number of bits identified by a mask.
557 : */
558 :
559 : static inline uint32_t
560 0 : ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
561 : {
562 0 : *old ^= (*old ^ new) & mask;
563 0 : return *old;
564 : }
565 :
566 : /**
567 : * ttm_tt_init
568 : *
569 : * @ttm: The struct ttm_tt.
570 : * @bdev: pointer to a struct ttm_bo_device:
571 : * @size: Size of the data needed backing.
572 : * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
573 : * @dummy_read_page: See struct ttm_bo_device.
574 : *
575 : * Create a struct ttm_tt to back data with system memory pages.
576 : * No pages are actually allocated.
577 : * Returns:
578 : * NULL: Out of memory.
579 : */
580 : extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
581 : unsigned long size, uint32_t page_flags,
582 : struct vm_page *dummy_read_page);
583 : extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
584 : unsigned long size, uint32_t page_flags,
585 : struct vm_page *dummy_read_page);
586 :
587 : /**
588 : * ttm_tt_fini
589 : *
590 : * @ttm: the ttm_tt structure.
591 : *
592 : * Free memory of ttm_tt structure
593 : */
594 : extern void ttm_tt_fini(struct ttm_tt *ttm);
595 : extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
596 :
597 : /**
598 : * ttm_ttm_bind:
599 : *
600 : * @ttm: The struct ttm_tt containing backing pages.
601 : * @bo_mem: The struct ttm_mem_reg identifying the binding location.
602 : *
603 : * Bind the pages of @ttm to an aperture location identified by @bo_mem
604 : */
605 : extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
606 :
607 : /**
608 : * ttm_ttm_destroy:
609 : *
610 : * @ttm: The struct ttm_tt.
611 : *
612 : * Unbind, unpopulate and destroy common struct ttm_tt.
613 : */
614 : extern void ttm_tt_destroy(struct ttm_tt *ttm);
615 :
616 : /**
617 : * ttm_ttm_unbind:
618 : *
619 : * @ttm: The struct ttm_tt.
620 : *
621 : * Unbind a struct ttm_tt.
622 : */
623 : extern void ttm_tt_unbind(struct ttm_tt *ttm);
624 :
625 : /**
626 : * ttm_tt_swapin:
627 : *
628 : * @ttm: The struct ttm_tt.
629 : *
630 : * Swap in a previously swap out ttm_tt.
631 : */
632 : extern int ttm_tt_swapin(struct ttm_tt *ttm);
633 :
634 : /**
635 : * ttm_tt_set_placement_caching:
636 : *
637 : * @ttm A struct ttm_tt the backing pages of which will change caching policy.
638 : * @placement: Flag indicating the desired caching policy.
639 : *
640 : * This function will change caching policy of any default kernel mappings of
641 : * the pages backing @ttm. If changing from cached to uncached or
642 : * write-combined,
643 : * all CPU caches will first be flushed to make sure the data of the pages
644 : * hit RAM. This function may be very costly as it involves global TLB
645 : * and cache flushes and potential page splitting / combining.
646 : */
647 : extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
648 : extern int ttm_tt_swapout(struct ttm_tt *ttm,
649 : struct uvm_object *persistent_swap_storage);
650 :
651 : /**
652 : * ttm_tt_unpopulate - free pages from a ttm
653 : *
654 : * @ttm: Pointer to the ttm_tt structure
655 : *
656 : * Calls the driver method to free all pages from a ttm
657 : */
658 : extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
659 :
660 : /*
661 : * ttm_bo.c
662 : */
663 :
664 : /**
665 : * ttm_mem_reg_is_pci
666 : *
667 : * @bdev: Pointer to a struct ttm_bo_device.
668 : * @mem: A valid struct ttm_mem_reg.
669 : *
670 : * Returns true if the memory described by @mem is PCI memory,
671 : * false otherwise.
672 : */
673 : extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
674 : struct ttm_mem_reg *mem);
675 :
676 : /**
677 : * ttm_bo_mem_space
678 : *
679 : * @bo: Pointer to a struct ttm_buffer_object. the data of which
680 : * we want to allocate space for.
681 : * @proposed_placement: Proposed new placement for the buffer object.
682 : * @mem: A struct ttm_mem_reg.
683 : * @interruptible: Sleep interruptible when sliping.
684 : * @no_wait_gpu: Return immediately if the GPU is busy.
685 : *
686 : * Allocate memory space for the buffer object pointed to by @bo, using
687 : * the placement flags in @mem, potentially evicting other idle buffer objects.
688 : * This function may sleep while waiting for space to become available.
689 : * Returns:
690 : * -EBUSY: No space available (only if no_wait == 1).
691 : * -ENOMEM: Could not allocate memory for the buffer object, either due to
692 : * fragmentation or concurrent allocators.
693 : * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
694 : */
695 : extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
696 : struct ttm_placement *placement,
697 : struct ttm_mem_reg *mem,
698 : bool interruptible,
699 : bool no_wait_gpu);
700 :
701 : extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
702 : struct ttm_mem_reg *mem);
703 : extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
704 : struct ttm_mem_reg *mem);
705 :
706 : extern void ttm_bo_global_release(struct drm_global_reference *ref);
707 : extern int ttm_bo_global_init(struct drm_global_reference *ref);
708 :
709 : extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
710 :
711 : /**
712 : * ttm_bo_device_init
713 : *
714 : * @bdev: A pointer to a struct ttm_bo_device to initialize.
715 : * @glob: A pointer to an initialized struct ttm_bo_global.
716 : * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
717 : * @mapping: The address space to use for this bo.
718 : * @file_page_offset: Offset into the device address space that is available
719 : * for buffer data. This ensures compatibility with other users of the
720 : * address space.
721 : *
722 : * Initializes a struct ttm_bo_device:
723 : * Returns:
724 : * !0: Failure.
725 : */
726 : extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
727 : struct ttm_bo_global *glob,
728 : struct ttm_bo_driver *driver,
729 : struct address_space *mapping,
730 : uint64_t file_page_offset, bool need_dma32);
731 :
732 : /**
733 : * ttm_bo_unmap_virtual
734 : *
735 : * @bo: tear down the virtual mappings for this BO
736 : */
737 : extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
738 :
739 : /**
740 : * ttm_bo_unmap_virtual
741 : *
742 : * @bo: tear down the virtual mappings for this BO
743 : *
744 : * The caller must take ttm_mem_io_lock before calling this function.
745 : */
746 : extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
747 :
748 : extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
749 : extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
750 : extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
751 : bool interruptible);
752 : extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
753 :
754 : extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
755 : extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
756 :
757 : /**
758 : * __ttm_bo_reserve:
759 : *
760 : * @bo: A pointer to a struct ttm_buffer_object.
761 : * @interruptible: Sleep interruptible if waiting.
762 : * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
763 : * @use_ticket: If @bo is already reserved, Only sleep waiting for
764 : * it to become unreserved if @ticket->stamp is older.
765 : *
766 : * Will not remove reserved buffers from the lru lists.
767 : * Otherwise identical to ttm_bo_reserve.
768 : *
769 : * Returns:
770 : * -EDEADLK: The reservation may cause a deadlock.
771 : * Release all buffer reservations, wait for @bo to become unreserved and
772 : * try again. (only if use_sequence == 1).
773 : * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
774 : * a signal. Release all buffer reservations and return to user-space.
775 : * -EBUSY: The function needed to sleep, but @no_wait was true
776 : * -EALREADY: Bo already reserved using @ticket. This error code will only
777 : * be returned if @use_ticket is set to true.
778 : */
779 0 : static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
780 : bool interruptible,
781 : bool no_wait, bool use_ticket,
782 : struct ww_acquire_ctx *ticket)
783 : {
784 : int ret = 0;
785 :
786 0 : if (no_wait) {
787 : bool success;
788 0 : if (WARN_ON(ticket))
789 0 : return -EBUSY;
790 :
791 0 : success = ww_mutex_trylock(&bo->resv->lock);
792 0 : return success ? 0 : -EBUSY;
793 : }
794 :
795 0 : if (interruptible)
796 0 : ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
797 : else
798 0 : ret = ww_mutex_lock(&bo->resv->lock, ticket);
799 0 : if (ret == -EINTR)
800 0 : return -ERESTARTSYS;
801 0 : return ret;
802 0 : }
803 :
804 : /**
805 : * ttm_bo_reserve:
806 : *
807 : * @bo: A pointer to a struct ttm_buffer_object.
808 : * @interruptible: Sleep interruptible if waiting.
809 : * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
810 : * @use_ticket: If @bo is already reserved, Only sleep waiting for
811 : * it to become unreserved if @ticket->stamp is older.
812 : *
813 : * Locks a buffer object for validation. (Or prevents other processes from
814 : * locking it for validation) and removes it from lru lists, while taking
815 : * a number of measures to prevent deadlocks.
816 : *
817 : * Deadlocks may occur when two processes try to reserve multiple buffers in
818 : * different order, either by will or as a result of a buffer being evicted
819 : * to make room for a buffer already reserved. (Buffers are reserved before
820 : * they are evicted). The following algorithm prevents such deadlocks from
821 : * occurring:
822 : * Processes attempting to reserve multiple buffers other than for eviction,
823 : * (typically execbuf), should first obtain a unique 32-bit
824 : * validation sequence number,
825 : * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
826 : * sequence number. If upon call of this function, the buffer object is already
827 : * reserved, the validation sequence is checked against the validation
828 : * sequence of the process currently reserving the buffer,
829 : * and if the current validation sequence is greater than that of the process
830 : * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
831 : * waiting for the buffer to become unreserved, after which it retries
832 : * reserving.
833 : * The caller should, when receiving an -EAGAIN error
834 : * release all its buffer reservations, wait for @bo to become unreserved, and
835 : * then rerun the validation with the same validation sequence. This procedure
836 : * will always guarantee that the process with the lowest validation sequence
837 : * will eventually succeed, preventing both deadlocks and starvation.
838 : *
839 : * Returns:
840 : * -EDEADLK: The reservation may cause a deadlock.
841 : * Release all buffer reservations, wait for @bo to become unreserved and
842 : * try again. (only if use_sequence == 1).
843 : * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
844 : * a signal. Release all buffer reservations and return to user-space.
845 : * -EBUSY: The function needed to sleep, but @no_wait was true
846 : * -EALREADY: Bo already reserved using @ticket. This error code will only
847 : * be returned if @use_ticket is set to true.
848 : */
849 0 : static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
850 : bool interruptible,
851 : bool no_wait, bool use_ticket,
852 : struct ww_acquire_ctx *ticket)
853 : {
854 : int ret;
855 :
856 0 : WARN_ON(!atomic_read(&bo->kref.refcount));
857 :
858 0 : ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
859 0 : if (likely(ret == 0))
860 0 : ttm_bo_del_sub_from_lru(bo);
861 :
862 0 : return ret;
863 : }
864 :
865 : /**
866 : * ttm_bo_reserve_slowpath:
867 : * @bo: A pointer to a struct ttm_buffer_object.
868 : * @interruptible: Sleep interruptible if waiting.
869 : * @sequence: Set (@bo)->sequence to this value after lock
870 : *
871 : * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
872 : * from all our other reservations. Because there are no other reservations
873 : * held by us, this function cannot deadlock any more.
874 : */
875 : static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
876 : bool interruptible,
877 : struct ww_acquire_ctx *ticket)
878 : {
879 : int ret = 0;
880 :
881 : WARN_ON(!atomic_read(&bo->kref.refcount));
882 :
883 : if (interruptible)
884 : ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
885 : ticket);
886 : else
887 : ww_mutex_lock_slow(&bo->resv->lock, ticket);
888 :
889 : if (likely(ret == 0))
890 : ttm_bo_del_sub_from_lru(bo);
891 : else if (ret == -EINTR)
892 : ret = -ERESTARTSYS;
893 :
894 : return ret;
895 : }
896 :
897 : /**
898 : * __ttm_bo_unreserve
899 : * @bo: A pointer to a struct ttm_buffer_object.
900 : *
901 : * Unreserve a previous reservation of @bo where the buffer object is
902 : * already on lru lists.
903 : */
904 0 : static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
905 : {
906 0 : ww_mutex_unlock(&bo->resv->lock);
907 0 : }
908 :
909 : /**
910 : * ttm_bo_unreserve
911 : *
912 : * @bo: A pointer to a struct ttm_buffer_object.
913 : *
914 : * Unreserve a previous reservation of @bo.
915 : */
916 0 : static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
917 : {
918 0 : if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
919 0 : spin_lock(&bo->glob->lru_lock);
920 0 : ttm_bo_add_to_lru(bo);
921 0 : spin_unlock(&bo->glob->lru_lock);
922 0 : }
923 0 : __ttm_bo_unreserve(bo);
924 0 : }
925 :
926 : /**
927 : * ttm_bo_unreserve_ticket
928 : * @bo: A pointer to a struct ttm_buffer_object.
929 : * @ticket: ww_acquire_ctx used for reserving
930 : *
931 : * Unreserve a previous reservation of @bo made with @ticket.
932 : */
933 : static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
934 : struct ww_acquire_ctx *t)
935 : {
936 : ttm_bo_unreserve(bo);
937 : }
938 :
939 : /*
940 : * ttm_bo_util.c
941 : */
942 :
943 : int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
944 : struct ttm_mem_reg *mem);
945 : void ttm_mem_io_free(struct ttm_bo_device *bdev,
946 : struct ttm_mem_reg *mem);
947 : /**
948 : * ttm_bo_move_ttm
949 : *
950 : * @bo: A pointer to a struct ttm_buffer_object.
951 : * @evict: 1: This is an eviction. Don't try to pipeline.
952 : * @no_wait_gpu: Return immediately if the GPU is busy.
953 : * @new_mem: struct ttm_mem_reg indicating where to move.
954 : *
955 : * Optimized move function for a buffer object with both old and
956 : * new placement backed by a TTM. The function will, if successful,
957 : * free any old aperture space, and set (@new_mem)->mm_node to NULL,
958 : * and update the (@bo)->mem placement flags. If unsuccessful, the old
959 : * data remains untouched, and it's up to the caller to free the
960 : * memory space indicated by @new_mem.
961 : * Returns:
962 : * !0: Failure.
963 : */
964 :
965 : extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
966 : bool evict, bool no_wait_gpu,
967 : struct ttm_mem_reg *new_mem);
968 :
969 : /**
970 : * ttm_bo_move_memcpy
971 : *
972 : * @bo: A pointer to a struct ttm_buffer_object.
973 : * @evict: 1: This is an eviction. Don't try to pipeline.
974 : * @no_wait_gpu: Return immediately if the GPU is busy.
975 : * @new_mem: struct ttm_mem_reg indicating where to move.
976 : *
977 : * Fallback move function for a mappable buffer object in mappable memory.
978 : * The function will, if successful,
979 : * free any old aperture space, and set (@new_mem)->mm_node to NULL,
980 : * and update the (@bo)->mem placement flags. If unsuccessful, the old
981 : * data remains untouched, and it's up to the caller to free the
982 : * memory space indicated by @new_mem.
983 : * Returns:
984 : * !0: Failure.
985 : */
986 :
987 : extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
988 : bool evict, bool no_wait_gpu,
989 : struct ttm_mem_reg *new_mem);
990 :
991 : /**
992 : * ttm_bo_free_old_node
993 : *
994 : * @bo: A pointer to a struct ttm_buffer_object.
995 : *
996 : * Utility function to free an old placement after a successful move.
997 : */
998 : extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
999 :
1000 : /**
1001 : * ttm_bo_move_accel_cleanup.
1002 : *
1003 : * @bo: A pointer to a struct ttm_buffer_object.
1004 : * @fence: A fence object that signals when moving is complete.
1005 : * @evict: This is an evict move. Don't return until the buffer is idle.
1006 : * @no_wait_gpu: Return immediately if the GPU is busy.
1007 : * @new_mem: struct ttm_mem_reg indicating where to move.
1008 : *
1009 : * Accelerated move function to be called when an accelerated move
1010 : * has been scheduled. The function will create a new temporary buffer object
1011 : * representing the old placement, and put the sync object on both buffer
1012 : * objects. After that the newly created buffer object is unref'd to be
1013 : * destroyed when the move is complete. This will help pipeline
1014 : * buffer moves.
1015 : */
1016 :
1017 : extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
1018 : struct fence *fence,
1019 : bool evict, bool no_wait_gpu,
1020 : struct ttm_mem_reg *new_mem);
1021 : /**
1022 : * ttm_io_prot
1023 : *
1024 : * @c_state: Caching state.
1025 : * @tmp: Page protection flag for a normal, cached mapping.
1026 : *
1027 : * Utility function that returns the pgprot_t that should be used for
1028 : * setting up a PTE with the caching model indicated by @c_state.
1029 : */
1030 : extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
1031 :
1032 : extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
1033 :
1034 : #if __OS_HAS_AGP
1035 : #define TTM_HAS_AGP
1036 :
1037 : /**
1038 : * ttm_agp_tt_create
1039 : *
1040 : * @bdev: Pointer to a struct ttm_bo_device.
1041 : * @bridge: The agp bridge this device is sitting on.
1042 : * @size: Size of the data needed backing.
1043 : * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
1044 : * @dummy_read_page: See struct ttm_bo_device.
1045 : *
1046 : *
1047 : * Create a TTM backend that uses the indicated AGP bridge as an aperture
1048 : * for TT memory. This function uses the linux agpgart interface to
1049 : * bind and unbind memory backing a ttm_tt.
1050 : */
1051 : extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1052 : struct drm_agp_head *agp,
1053 : unsigned long size, uint32_t page_flags,
1054 : struct vm_page *dummy_read_page);
1055 : int ttm_agp_tt_populate(struct ttm_tt *ttm);
1056 : void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1057 : #endif
1058 :
1059 : #endif
|