Line data Source code
1 : /*
2 : * Copyright (c) Red Hat Inc.
3 :
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice (including the
12 : * next paragraph) shall be included in all copies or substantial portions
13 : * of the Software.
14 : *
15 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 : * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 : * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 : * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 : * DEALINGS IN THE SOFTWARE.
22 : *
23 : * Authors: Dave Airlie <airlied@redhat.com>
24 : * Jerome Glisse <jglisse@redhat.com>
25 : * Pauli Nieminen <suokkos@gmail.com>
26 : */
27 :
28 : /* simple list based uncached page pool
29 : * - Pool collects resently freed pages for reuse
30 : * - Use page->lru to keep a free list
31 : * - doesn't track currently in use pages
32 : */
33 :
34 : #define pr_fmt(fmt) "[TTM] " fmt
35 :
36 : #include <dev/pci/drm/drmP.h>
37 : #include <dev/pci/drm/ttm/ttm_bo_driver.h>
38 : #include <dev/pci/drm/ttm/ttm_page_alloc.h>
39 :
40 : #ifdef TTM_HAS_AGP
41 : #include <dev/pci/agpvar.h>
42 : #endif
43 :
44 : #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct vm_page *))
45 : #define SMALL_ALLOCATION 16
46 : #define FREE_ALL_PAGES (~0U)
47 : /* times are in msecs */
48 : #define PAGE_FREE_INTERVAL 1000
49 :
50 : /**
51 : * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
52 : *
53 : * @lock: Protects the shared pool from concurrnet access. Must be used with
54 : * irqsave/irqrestore variants because pool allocator maybe called from
55 : * delayed work.
56 : * @fill_lock: Prevent concurrent calls to fill.
57 : * @list: Pool of free uc/wc pages for fast reuse.
58 : * @gfp_flags: Flags to pass for alloc_page.
59 : * @npages: Number of pages in pool.
60 : */
61 : struct ttm_page_pool {
62 : spinlock_t lock;
63 : bool fill_lock;
64 : struct pglist list;
65 : gfp_t gfp_flags;
66 : unsigned npages;
67 : char *name;
68 : unsigned long nfrees;
69 : unsigned long nrefills;
70 : };
71 :
72 : /**
73 : * Limits for the pool. They are handled without locks because only place where
74 : * they may change is in sysfs store. They won't have immediate effect anyway
75 : * so forcing serialization to access them is pointless.
76 : */
77 :
78 : struct ttm_pool_opts {
79 : unsigned alloc_size;
80 : unsigned max_size;
81 : unsigned small;
82 : };
83 :
84 : #define NUM_POOLS 4
85 :
86 : /**
87 : * struct ttm_pool_manager - Holds memory pools for fst allocation
88 : *
89 : * Manager is read only object for pool code so it doesn't need locking.
90 : *
91 : * @free_interval: minimum number of jiffies between freeing pages from pool.
92 : * @page_alloc_inited: reference counting for pool allocation.
93 : * @work: Work that is used to shrink the pool. Work is only run when there is
94 : * some pages to free.
95 : * @small_allocation: Limit in number of pages what is small allocation.
96 : *
97 : * @pools: All pool objects in use.
98 : **/
99 : struct ttm_pool_manager {
100 : struct kobject kobj;
101 : #ifdef notyet
102 : struct shrinker mm_shrink;
103 : #endif
104 : struct ttm_pool_opts options;
105 :
106 : union {
107 : struct ttm_page_pool pools[NUM_POOLS];
108 : struct {
109 : struct ttm_page_pool wc_pool;
110 : struct ttm_page_pool uc_pool;
111 : struct ttm_page_pool wc_pool_dma32;
112 : struct ttm_page_pool uc_pool_dma32;
113 : } ;
114 : };
115 : };
116 :
117 : #ifdef notyet
118 : static struct attribute ttm_page_pool_max = {
119 : .name = "pool_max_size",
120 : .mode = S_IRUGO | S_IWUSR
121 : };
122 : static struct attribute ttm_page_pool_small = {
123 : .name = "pool_small_allocation",
124 : .mode = S_IRUGO | S_IWUSR
125 : };
126 : static struct attribute ttm_page_pool_alloc_size = {
127 : .name = "pool_allocation_size",
128 : .mode = S_IRUGO | S_IWUSR
129 : };
130 :
131 : static struct attribute *ttm_pool_attrs[] = {
132 : &ttm_page_pool_max,
133 : &ttm_page_pool_small,
134 : &ttm_page_pool_alloc_size,
135 : NULL
136 : };
137 : #endif
138 :
139 0 : static void ttm_pool_kobj_release(struct kobject *kobj)
140 : {
141 : struct ttm_pool_manager *m =
142 0 : container_of(kobj, struct ttm_pool_manager, kobj);
143 0 : kfree(m);
144 0 : }
145 :
146 : #ifdef notyet
147 : static ssize_t ttm_pool_store(struct kobject *kobj,
148 : struct attribute *attr, const char *buffer, size_t size)
149 : {
150 : struct ttm_pool_manager *m =
151 : container_of(kobj, struct ttm_pool_manager, kobj);
152 : int chars;
153 : unsigned val;
154 : chars = sscanf(buffer, "%u", &val);
155 : if (chars == 0)
156 : return size;
157 :
158 : /* Convert kb to number of pages */
159 : val = val / (PAGE_SIZE >> 10);
160 :
161 : if (attr == &ttm_page_pool_max)
162 : m->options.max_size = val;
163 : else if (attr == &ttm_page_pool_small)
164 : m->options.small = val;
165 : else if (attr == &ttm_page_pool_alloc_size) {
166 : if (val > NUM_PAGES_TO_ALLOC*8) {
167 : pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
168 : NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
169 : NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
170 : return size;
171 : } else if (val > NUM_PAGES_TO_ALLOC) {
172 : pr_warn("Setting allocation size to larger than %lu is not recommended\n",
173 : NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
174 : }
175 : m->options.alloc_size = val;
176 : }
177 :
178 : return size;
179 : }
180 :
181 : static ssize_t ttm_pool_show(struct kobject *kobj,
182 : struct attribute *attr, char *buffer)
183 : {
184 : struct ttm_pool_manager *m =
185 : container_of(kobj, struct ttm_pool_manager, kobj);
186 : unsigned val = 0;
187 :
188 : if (attr == &ttm_page_pool_max)
189 : val = m->options.max_size;
190 : else if (attr == &ttm_page_pool_small)
191 : val = m->options.small;
192 : else if (attr == &ttm_page_pool_alloc_size)
193 : val = m->options.alloc_size;
194 :
195 : val = val * (PAGE_SIZE >> 10);
196 :
197 : return snprintf(buffer, PAGE_SIZE, "%u\n", val);
198 : }
199 :
200 : static const struct sysfs_ops ttm_pool_sysfs_ops = {
201 : .show = &ttm_pool_show,
202 : .store = &ttm_pool_store,
203 : };
204 : #endif
205 :
206 : static struct kobj_type ttm_pool_kobj_type = {
207 : .release = &ttm_pool_kobj_release,
208 : #ifdef __linux__
209 : .sysfs_ops = &ttm_pool_sysfs_ops,
210 : .default_attrs = ttm_pool_attrs,
211 : #endif
212 : };
213 :
214 : #ifndef PG_PMAP_WC
215 : #define PG_PMAP_WC PG_PMAP_UC
216 : #endif
217 :
218 : static struct ttm_pool_manager *_manager;
219 :
220 0 : static int set_pages_array_wb(struct vm_page **pages, int addrinarray)
221 : {
222 : #ifdef TTM_HAS_AGP
223 : #if defined(__amd64__) || defined(__i386__) || defined(__powerpc__)
224 : int i;
225 :
226 0 : for (i = 0; i < addrinarray; i++)
227 0 : atomic_clearbits_int(&pages[i]->pg_flags, PG_PMAP_WC);
228 : #else
229 : return -ENOSYS;
230 : #endif
231 : #endif
232 0 : return 0;
233 : }
234 :
235 0 : static int set_pages_array_wc(struct vm_page **pages, int addrinarray)
236 : {
237 : #ifdef TTM_HAS_AGP
238 : #if defined(__amd64__) || defined(__i386__) || defined(__powerpc__)
239 : int i;
240 :
241 0 : for (i = 0; i < addrinarray; i++)
242 0 : atomic_setbits_int(&pages[i]->pg_flags, PG_PMAP_WC);
243 : #else
244 : return -ENOSYS;
245 : #endif
246 : #endif
247 0 : return 0;
248 : }
249 :
250 0 : static int set_pages_array_uc(struct vm_page **pages, int addrinarray)
251 : {
252 : #ifdef TTM_HAS_AGP
253 0 : STUB();
254 0 : return -ENOSYS;
255 : #ifdef notyet
256 : int i;
257 :
258 : for (i = 0; i < addrinarray; i++)
259 : map_page_into_agp(pages[i]);
260 : #endif
261 : #endif
262 : return 0;
263 : }
264 :
265 : /**
266 : * Select the right pool or requested caching state and ttm flags. */
267 0 : static struct ttm_page_pool *ttm_get_pool(int flags,
268 : enum ttm_caching_state cstate)
269 : {
270 : int pool_index;
271 :
272 0 : if (cstate == tt_cached)
273 0 : return NULL;
274 :
275 0 : if (cstate == tt_wc)
276 0 : pool_index = 0x0;
277 : else
278 : pool_index = 0x1;
279 :
280 0 : if (flags & TTM_PAGE_FLAG_DMA32)
281 0 : pool_index |= 0x2;
282 :
283 0 : return &_manager->pools[pool_index];
284 0 : }
285 :
286 : /* set memory back to wb and free the pages. */
287 0 : static void ttm_pages_put(struct vm_page *pages[], unsigned npages)
288 : {
289 : unsigned i;
290 0 : if (set_pages_array_wb(pages, npages))
291 0 : pr_err("Failed to set %d pages to wb!\n", npages);
292 0 : for (i = 0; i < npages; ++i)
293 0 : __free_page(pages[i]);
294 0 : }
295 :
296 0 : static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
297 : unsigned freed_pages)
298 : {
299 0 : pool->npages -= freed_pages;
300 0 : pool->nfrees += freed_pages;
301 0 : }
302 :
303 : /**
304 : * Free pages from pool.
305 : *
306 : * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
307 : * number of pages in one go.
308 : *
309 : * @pool: to free the pages from
310 : * @free_all: If set to true will free all pages in pool
311 : * @use_static: Safe to use static buffer
312 : **/
313 0 : static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
314 : bool use_static)
315 : {
316 : static struct vm_page *static_buf[NUM_PAGES_TO_ALLOC];
317 : unsigned long irq_flags;
318 : struct vm_page *p, *p1;
319 : struct vm_page **pages_to_free;
320 : unsigned freed_pages = 0,
321 : npages_to_free = nr_free;
322 : unsigned i;
323 :
324 0 : if (NUM_PAGES_TO_ALLOC < nr_free)
325 0 : npages_to_free = NUM_PAGES_TO_ALLOC;
326 :
327 0 : if (use_static)
328 0 : pages_to_free = static_buf;
329 : else
330 0 : pages_to_free = kmalloc(npages_to_free * sizeof(struct vm_page *),
331 : GFP_KERNEL);
332 0 : if (!pages_to_free) {
333 0 : pr_err("Failed to allocate memory for pool free operation\n");
334 0 : return 0;
335 : }
336 :
337 : restart:
338 0 : spin_lock_irqsave(&pool->lock, irq_flags);
339 :
340 0 : TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) {
341 0 : if (freed_pages >= npages_to_free)
342 : break;
343 :
344 0 : pages_to_free[freed_pages++] = p;
345 : /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
346 0 : if (freed_pages >= NUM_PAGES_TO_ALLOC) {
347 : /* remove range of pages from the pool */
348 0 : for (i = 0; i < freed_pages; i++)
349 0 : TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq);
350 :
351 0 : ttm_pool_update_free_locked(pool, freed_pages);
352 : /**
353 : * Because changing page caching is costly
354 : * we unlock the pool to prevent stalling.
355 : */
356 0 : spin_unlock_irqrestore(&pool->lock, irq_flags);
357 :
358 0 : ttm_pages_put(pages_to_free, freed_pages);
359 0 : if (likely(nr_free != FREE_ALL_PAGES))
360 0 : nr_free -= freed_pages;
361 :
362 0 : if (NUM_PAGES_TO_ALLOC >= nr_free)
363 0 : npages_to_free = nr_free;
364 : else
365 : npages_to_free = NUM_PAGES_TO_ALLOC;
366 :
367 : freed_pages = 0;
368 :
369 : /* free all so restart the processing */
370 0 : if (nr_free)
371 0 : goto restart;
372 :
373 : /* Not allowed to fall through or break because
374 : * following context is inside spinlock while we are
375 : * outside here.
376 : */
377 : goto out;
378 :
379 : }
380 : }
381 :
382 : /* remove range of pages from the pool */
383 0 : if (freed_pages) {
384 0 : for (i = 0; i < freed_pages; i++)
385 0 : TAILQ_REMOVE(&pool->list, pages_to_free[i], pageq);
386 :
387 0 : ttm_pool_update_free_locked(pool, freed_pages);
388 0 : nr_free -= freed_pages;
389 0 : }
390 :
391 0 : spin_unlock_irqrestore(&pool->lock, irq_flags);
392 :
393 0 : if (freed_pages)
394 0 : ttm_pages_put(pages_to_free, freed_pages);
395 : out:
396 0 : if (pages_to_free != static_buf)
397 0 : kfree(pages_to_free);
398 0 : return nr_free;
399 0 : }
400 :
401 : /**
402 : * Callback for mm to request pool to reduce number of page held.
403 : *
404 : * XXX: (dchinner) Deadlock warning!
405 : *
406 : * This code is crying out for a shrinker per pool....
407 : */
408 : #ifdef notyet
409 : static unsigned long
410 : ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
411 : {
412 : static DEFINE_MUTEX(lock);
413 : static unsigned start_pool;
414 : unsigned i;
415 : unsigned pool_offset;
416 : struct ttm_page_pool *pool;
417 : int shrink_pages = sc->nr_to_scan;
418 : unsigned long freed = 0;
419 :
420 : if (!mutex_trylock(&lock))
421 : return SHRINK_STOP;
422 : pool_offset = ++start_pool % NUM_POOLS;
423 : /* select start pool in round robin fashion */
424 : for (i = 0; i < NUM_POOLS; ++i) {
425 : unsigned nr_free = shrink_pages;
426 : if (shrink_pages == 0)
427 : break;
428 : pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
429 : /* OK to use static buffer since global mutex is held. */
430 : shrink_pages = ttm_page_pool_free(pool, nr_free, true);
431 : freed += nr_free - shrink_pages;
432 : }
433 : mutex_unlock(&lock);
434 : return freed;
435 : }
436 :
437 :
438 : static unsigned long
439 : ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
440 : {
441 : unsigned i;
442 : unsigned long count = 0;
443 :
444 : for (i = 0; i < NUM_POOLS; ++i)
445 : count += _manager->pools[i].npages;
446 :
447 : return count;
448 : }
449 : #endif
450 :
451 0 : static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
452 : {
453 : #ifdef notyet
454 : manager->mm_shrink.count_objects = ttm_pool_shrink_count;
455 : manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
456 : manager->mm_shrink.seeks = 1;
457 : register_shrinker(&manager->mm_shrink);
458 : #endif
459 0 : }
460 :
461 0 : static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
462 : {
463 0 : STUB();
464 : #ifdef notyet
465 : unregister_shrinker(&manager->mm_shrink);
466 : #endif
467 0 : }
468 :
469 0 : static int ttm_set_pages_caching(struct vm_page **pages,
470 : enum ttm_caching_state cstate, unsigned cpages)
471 : {
472 : int r = 0;
473 : /* Set page caching */
474 0 : switch (cstate) {
475 : case tt_uncached:
476 0 : r = set_pages_array_uc(pages, cpages);
477 0 : if (r)
478 0 : pr_err("Failed to set %d pages to uc!\n", cpages);
479 : break;
480 : case tt_wc:
481 0 : r = set_pages_array_wc(pages, cpages);
482 0 : if (r)
483 0 : pr_err("Failed to set %d pages to wc!\n", cpages);
484 : break;
485 : default:
486 : break;
487 : }
488 0 : return r;
489 : }
490 :
491 : /**
492 : * Free pages the pages that failed to change the caching state. If there is
493 : * any pages that have changed their caching state already put them to the
494 : * pool.
495 : */
496 0 : static void ttm_handle_caching_state_failure(struct pglist *pages,
497 : int ttm_flags, enum ttm_caching_state cstate,
498 : struct vm_page **failed_pages, unsigned cpages)
499 : {
500 : unsigned i;
501 : /* Failed pages have to be freed */
502 0 : for (i = 0; i < cpages; ++i) {
503 0 : TAILQ_REMOVE(pages, failed_pages[i], pageq);
504 0 : __free_page(failed_pages[i]);
505 : }
506 0 : }
507 :
508 : /**
509 : * Allocate new pages with correct caching.
510 : *
511 : * This function is reentrant if caller updates count depending on number of
512 : * pages returned in pages array.
513 : */
514 0 : static int ttm_alloc_new_pages(struct pglist *pages, gfp_t gfp_flags,
515 : int ttm_flags, enum ttm_caching_state cstate, unsigned count)
516 : {
517 : struct vm_page **caching_array;
518 : struct vm_page *p;
519 : int r = 0;
520 : unsigned i, cpages;
521 0 : unsigned max_cpages = min(count,
522 : (unsigned)(PAGE_SIZE/sizeof(struct vm_page *)));
523 :
524 : /* allocate array for page caching change */
525 0 : caching_array = kmalloc(max_cpages*sizeof(struct vm_page *), GFP_KERNEL);
526 :
527 0 : if (!caching_array) {
528 0 : pr_err("Unable to allocate table for new pages\n");
529 0 : return -ENOMEM;
530 : }
531 :
532 0 : for (i = 0, cpages = 0; i < count; ++i) {
533 0 : p = alloc_page(gfp_flags);
534 :
535 0 : if (!p) {
536 0 : pr_err("Unable to get page %u\n", i);
537 :
538 : /* store already allocated pages in the pool after
539 : * setting the caching state */
540 0 : if (cpages) {
541 0 : r = ttm_set_pages_caching(caching_array,
542 : cstate, cpages);
543 0 : if (r)
544 0 : ttm_handle_caching_state_failure(pages,
545 : ttm_flags, cstate,
546 : caching_array, cpages);
547 : }
548 : r = -ENOMEM;
549 0 : goto out;
550 : }
551 :
552 : #ifdef CONFIG_HIGHMEM
553 : /* gfp flags of highmem page should never be dma32 so we
554 : * we should be fine in such case
555 : */
556 : if (!PageHighMem(p))
557 : #endif
558 : {
559 0 : caching_array[cpages++] = p;
560 0 : if (cpages == max_cpages) {
561 :
562 0 : r = ttm_set_pages_caching(caching_array,
563 : cstate, cpages);
564 0 : if (r) {
565 0 : ttm_handle_caching_state_failure(pages,
566 : ttm_flags, cstate,
567 : caching_array, cpages);
568 0 : goto out;
569 : }
570 : cpages = 0;
571 0 : }
572 : }
573 :
574 0 : TAILQ_INSERT_HEAD(pages, p, pageq);
575 : }
576 :
577 0 : if (cpages) {
578 0 : r = ttm_set_pages_caching(caching_array, cstate, cpages);
579 0 : if (r)
580 0 : ttm_handle_caching_state_failure(pages,
581 : ttm_flags, cstate,
582 : caching_array, cpages);
583 : }
584 : out:
585 0 : kfree(caching_array);
586 :
587 0 : return r;
588 0 : }
589 :
590 : /**
591 : * Fill the given pool if there aren't enough pages and the requested number of
592 : * pages is small.
593 : */
594 0 : static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
595 : int ttm_flags, enum ttm_caching_state cstate, unsigned count,
596 : unsigned long *irq_flags)
597 : {
598 : struct vm_page *p;
599 : int r;
600 : unsigned cpages = 0;
601 : /**
602 : * Only allow one pool fill operation at a time.
603 : * If pool doesn't have enough pages for the allocation new pages are
604 : * allocated from outside of pool.
605 : */
606 0 : if (pool->fill_lock)
607 0 : return;
608 :
609 0 : pool->fill_lock = true;
610 :
611 : /* If allocation request is small and there are not enough
612 : * pages in a pool we fill the pool up first. */
613 0 : if (count < _manager->options.small
614 0 : && count > pool->npages) {
615 0 : struct pglist new_pages;
616 0 : unsigned alloc_size = _manager->options.alloc_size;
617 :
618 : /**
619 : * Can't change page caching if in irqsave context. We have to
620 : * drop the pool->lock.
621 : */
622 0 : spin_unlock_irqrestore(&pool->lock, *irq_flags);
623 :
624 0 : TAILQ_INIT(&new_pages);
625 0 : r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
626 : cstate, alloc_size);
627 0 : spin_lock_irqsave(&pool->lock, *irq_flags);
628 :
629 0 : if (!r) {
630 0 : TAILQ_CONCAT(&pool->list, &new_pages, pageq);
631 0 : ++pool->nrefills;
632 0 : pool->npages += alloc_size;
633 0 : } else {
634 0 : pr_err("Failed to fill pool (%p)\n", pool);
635 : /* If we have any pages left put them to the pool. */
636 0 : TAILQ_FOREACH(p, &pool->list, pageq) {
637 0 : ++cpages;
638 : }
639 0 : TAILQ_CONCAT(&pool->list, &new_pages, pageq);
640 0 : pool->npages += cpages;
641 : }
642 :
643 0 : }
644 0 : pool->fill_lock = false;
645 0 : }
646 :
647 : /**
648 : * Cut 'count' number of pages from the pool and put them on the return list.
649 : *
650 : * @return count of pages still required to fulfill the request.
651 : */
652 0 : static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
653 : struct pglist *pages,
654 : int ttm_flags,
655 : enum ttm_caching_state cstate,
656 : unsigned count)
657 : {
658 0 : unsigned long irq_flags;
659 : vm_page_t p;
660 : unsigned i;
661 :
662 0 : spin_lock_irqsave(&pool->lock, irq_flags);
663 0 : ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
664 :
665 0 : if (count >= pool->npages) {
666 : /* take all pages from the pool */
667 0 : TAILQ_CONCAT(pages, &pool->list, pageq);
668 0 : count -= pool->npages;
669 0 : pool->npages = 0;
670 0 : goto out;
671 : }
672 : #ifdef __linux__
673 : /* find the last pages to include for requested number of pages. Split
674 : * pool to begin and halve it to reduce search space. */
675 : if (count <= pool->npages/2) {
676 : i = 0;
677 : list_for_each(p, &pool->list) {
678 : if (++i == count)
679 : break;
680 : }
681 : } else {
682 : i = pool->npages + 1;
683 : list_for_each_prev(p, &pool->list) {
684 : if (--i == count)
685 : break;
686 : }
687 : }
688 : /* Cut 'count' number of pages from the pool */
689 : list_cut_position(pages, &pool->list, p);
690 : #else
691 0 : for (i = 0; i < count; i++) {
692 0 : p = TAILQ_FIRST(&pool->list);
693 0 : TAILQ_REMOVE(&pool->list, p, pageq);
694 0 : TAILQ_INSERT_TAIL(pages, p, pageq);
695 : }
696 : #endif
697 0 : pool->npages -= count;
698 0 : count = 0;
699 : out:
700 0 : spin_unlock_irqrestore(&pool->lock, irq_flags);
701 0 : return count;
702 0 : }
703 :
704 : /* Put all pages in pages list to correct pool to wait for reuse */
705 0 : static void ttm_put_pages(struct vm_page **pages, unsigned npages, int flags,
706 : enum ttm_caching_state cstate)
707 : {
708 : unsigned long irq_flags;
709 0 : struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
710 : unsigned i;
711 :
712 0 : if (pool == NULL) {
713 : /* No pool for this memory type so free the pages */
714 0 : for (i = 0; i < npages; i++) {
715 0 : if (pages[i]) {
716 : #ifdef notyet
717 : if (page_count(pages[i]) != 1)
718 : pr_err("Erroneous page count. Leaking pages.\n");
719 : #endif
720 0 : __free_page(pages[i]);
721 0 : pages[i] = NULL;
722 0 : }
723 : }
724 0 : return;
725 : }
726 :
727 0 : spin_lock_irqsave(&pool->lock, irq_flags);
728 0 : for (i = 0; i < npages; i++) {
729 0 : if (pages[i]) {
730 : #ifdef notyet
731 : if (page_count(pages[i]) != 1)
732 : pr_err("Erroneous page count. Leaking pages.\n");
733 : #endif
734 0 : TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
735 0 : pages[i] = NULL;
736 0 : pool->npages++;
737 0 : }
738 : }
739 : /* Check that we don't go over the pool limit */
740 : npages = 0;
741 0 : if (pool->npages > _manager->options.max_size) {
742 0 : npages = pool->npages - _manager->options.max_size;
743 : /* free at least NUM_PAGES_TO_ALLOC number of pages
744 : * to reduce calls to set_memory_wb */
745 0 : if (npages < NUM_PAGES_TO_ALLOC)
746 : npages = NUM_PAGES_TO_ALLOC;
747 0 : }
748 0 : spin_unlock_irqrestore(&pool->lock, irq_flags);
749 0 : if (npages)
750 0 : ttm_page_pool_free(pool, npages, false);
751 0 : }
752 :
753 : /*
754 : * On success pages list will hold count number of correctly
755 : * cached pages.
756 : */
757 0 : static int ttm_get_pages(struct vm_page **pages, unsigned npages, int flags,
758 : enum ttm_caching_state cstate)
759 : {
760 0 : struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
761 0 : struct pglist plist;
762 : struct vm_page *p = NULL;
763 : gfp_t gfp_flags = GFP_USER;
764 : unsigned count;
765 : int r;
766 :
767 : /* set zero flag for page allocation if required */
768 0 : if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
769 0 : gfp_flags |= __GFP_ZERO;
770 :
771 : /* No pool for cached pages */
772 0 : if (pool == NULL) {
773 : if (flags & TTM_PAGE_FLAG_DMA32)
774 : gfp_flags |= GFP_DMA32;
775 : else
776 : gfp_flags |= GFP_HIGHUSER;
777 :
778 0 : for (r = 0; r < npages; ++r) {
779 0 : p = alloc_page(gfp_flags);
780 0 : if (!p) {
781 :
782 0 : pr_err("Unable to allocate page\n");
783 0 : return -ENOMEM;
784 : }
785 :
786 0 : pages[r] = p;
787 : }
788 0 : return 0;
789 : }
790 :
791 : /* combine zero flag to pool flags */
792 0 : gfp_flags |= pool->gfp_flags;
793 :
794 : /* First we take pages from the pool */
795 0 : TAILQ_INIT(&plist);
796 0 : npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
797 : count = 0;
798 0 : TAILQ_FOREACH(p, &plist, pageq) {
799 0 : pages[count++] = p;
800 : }
801 :
802 : /* clear the pages coming from the pool if requested */
803 0 : if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
804 : #ifdef __linux
805 : list_for_each_entry(p, &plist, lru) {
806 : if (PageHighMem(p))
807 : clear_highpage(p);
808 : else
809 : clear_page(page_address(p));
810 : }
811 : #else
812 0 : TAILQ_FOREACH(p, &plist, pageq) {
813 0 : pmap_zero_page(p);
814 : }
815 : #endif
816 : }
817 :
818 : /* If pool didn't have enough pages allocate new one. */
819 0 : if (npages > 0) {
820 : /* ttm_alloc_new_pages doesn't reference pool so we can run
821 : * multiple requests in parallel.
822 : **/
823 0 : TAILQ_INIT(&plist);
824 0 : r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
825 0 : TAILQ_FOREACH(p, &plist, pageq) {
826 0 : pages[count++] = p;
827 : }
828 0 : if (r) {
829 : /* If there is any pages in the list put them back to
830 : * the pool. */
831 0 : pr_err("Failed to allocate extra pages for large request\n");
832 0 : ttm_put_pages(pages, count, flags, cstate);
833 0 : return r;
834 : }
835 : }
836 :
837 0 : return 0;
838 0 : }
839 :
840 0 : static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
841 : char *name)
842 : {
843 0 : mtx_init(&pool->lock, IPL_TTY);
844 0 : pool->fill_lock = false;
845 0 : TAILQ_INIT(&pool->list);
846 0 : pool->npages = pool->nfrees = 0;
847 0 : pool->gfp_flags = flags;
848 0 : pool->name = name;
849 0 : }
850 :
851 0 : int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
852 : {
853 : int ret;
854 :
855 0 : WARN_ON(_manager);
856 :
857 : pr_info("Initializing pool allocator\n");
858 :
859 0 : _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
860 0 : if (!_manager)
861 0 : return -ENOMEM;
862 :
863 0 : ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
864 :
865 0 : ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
866 :
867 0 : ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
868 : GFP_USER | GFP_DMA32, "wc dma");
869 :
870 0 : ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
871 : GFP_USER | GFP_DMA32, "uc dma");
872 :
873 0 : _manager->options.max_size = max_pages;
874 0 : _manager->options.small = SMALL_ALLOCATION;
875 0 : _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
876 :
877 0 : ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
878 0 : &glob->kobj, "pool");
879 0 : if (unlikely(ret != 0)) {
880 0 : kobject_put(&_manager->kobj);
881 0 : _manager = NULL;
882 0 : return ret;
883 : }
884 :
885 0 : ttm_pool_mm_shrink_init(_manager);
886 :
887 0 : return 0;
888 0 : }
889 :
890 0 : void ttm_page_alloc_fini(void)
891 : {
892 : int i;
893 :
894 : pr_info("Finalizing pool allocator\n");
895 0 : ttm_pool_mm_shrink_fini(_manager);
896 :
897 : /* OK to use static buffer since global mutex is no longer used. */
898 0 : for (i = 0; i < NUM_POOLS; ++i)
899 0 : ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
900 :
901 0 : kobject_put(&_manager->kobj);
902 0 : _manager = NULL;
903 0 : }
904 :
905 0 : int ttm_pool_populate(struct ttm_tt *ttm)
906 : {
907 0 : struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
908 : unsigned i;
909 : int ret;
910 :
911 0 : if (ttm->state != tt_unpopulated)
912 0 : return 0;
913 :
914 0 : for (i = 0; i < ttm->num_pages; ++i) {
915 0 : ret = ttm_get_pages(&ttm->pages[i], 1,
916 0 : ttm->page_flags,
917 0 : ttm->caching_state);
918 0 : if (ret != 0) {
919 0 : ttm_pool_unpopulate(ttm);
920 0 : return -ENOMEM;
921 : }
922 :
923 0 : ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
924 : false, false);
925 0 : if (unlikely(ret != 0)) {
926 0 : ttm_pool_unpopulate(ttm);
927 0 : return -ENOMEM;
928 : }
929 : }
930 :
931 0 : if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
932 0 : ret = ttm_tt_swapin(ttm);
933 0 : if (unlikely(ret != 0)) {
934 0 : ttm_pool_unpopulate(ttm);
935 0 : return ret;
936 : }
937 : }
938 :
939 0 : ttm->state = tt_unbound;
940 0 : return 0;
941 0 : }
942 : EXPORT_SYMBOL(ttm_pool_populate);
943 :
944 0 : void ttm_pool_unpopulate(struct ttm_tt *ttm)
945 : {
946 : unsigned i;
947 :
948 0 : for (i = 0; i < ttm->num_pages; ++i) {
949 0 : if (ttm->pages[i]) {
950 0 : ttm_mem_global_free_page(ttm->glob->mem_glob,
951 : ttm->pages[i]);
952 0 : ttm_put_pages(&ttm->pages[i], 1,
953 0 : ttm->page_flags,
954 0 : ttm->caching_state);
955 0 : }
956 : }
957 0 : ttm->state = tt_unpopulated;
958 0 : }
959 : EXPORT_SYMBOL(ttm_pool_unpopulate);
960 :
961 0 : int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
962 : {
963 : struct ttm_page_pool *p;
964 : unsigned i;
965 : char *h[] = {"pool", "refills", "pages freed", "size"};
966 0 : if (!_manager) {
967 0 : seq_printf(m, "No pool allocator running.\n");
968 0 : return 0;
969 : }
970 0 : seq_printf(m, "%6s %12s %13s %8s\n",
971 : h[0], h[1], h[2], h[3]);
972 0 : for (i = 0; i < NUM_POOLS; ++i) {
973 0 : p = &_manager->pools[i];
974 :
975 0 : seq_printf(m, "%6s %12ld %13ld %8d\n",
976 0 : p->name, p->nrefills,
977 0 : p->nfrees, p->npages);
978 : }
979 0 : return 0;
980 0 : }
981 : EXPORT_SYMBOL(ttm_page_alloc_debugfs);
|