Line data Source code
1 : /**************************************************************************
2 : *
3 : * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 : * All Rights Reserved.
5 : *
6 : * Permission is hereby granted, free of charge, to any person obtaining a
7 : * copy of this software and associated documentation files (the
8 : * "Software"), to deal in the Software without restriction, including
9 : * without limitation the rights to use, copy, modify, merge, publish,
10 : * distribute, sub license, and/or sell copies of the Software, and to
11 : * permit persons to whom the Software is furnished to do so, subject to
12 : * the following conditions:
13 : *
14 : * The above copyright notice and this permission notice (including the
15 : * next paragraph) shall be included in all copies or substantial portions
16 : * of the Software.
17 : *
18 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 : * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 : * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 : * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 : * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 : * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 : *
26 : **************************************************************************/
27 :
28 : #define pr_fmt(fmt) "[TTM] " fmt
29 :
30 : #include <dev/pci/drm/drm_linux.h>
31 : #include <dev/pci/drm/ttm/ttm_memory.h>
32 : #include <dev/pci/drm/ttm/ttm_module.h>
33 : #include <dev/pci/drm/ttm/ttm_page_alloc.h>
34 :
35 : #define TTM_MEMORY_ALLOC_RETRIES 4
36 :
37 : struct ttm_mem_zone {
38 : struct kobject kobj;
39 : struct ttm_mem_global *glob;
40 : const char *name;
41 : uint64_t zone_mem;
42 : uint64_t emer_mem;
43 : uint64_t max_mem;
44 : uint64_t swap_limit;
45 : uint64_t used_mem;
46 : };
47 :
48 : #ifdef notyet
49 : static struct attribute ttm_mem_sys = {
50 : .name = "zone_memory",
51 : .mode = S_IRUGO
52 : };
53 : static struct attribute ttm_mem_emer = {
54 : .name = "emergency_memory",
55 : .mode = S_IRUGO | S_IWUSR
56 : };
57 : static struct attribute ttm_mem_max = {
58 : .name = "available_memory",
59 : .mode = S_IRUGO | S_IWUSR
60 : };
61 : static struct attribute ttm_mem_swap = {
62 : .name = "swap_limit",
63 : .mode = S_IRUGO | S_IWUSR
64 : };
65 : static struct attribute ttm_mem_used = {
66 : .name = "used_memory",
67 : .mode = S_IRUGO
68 : };
69 : #endif
70 :
71 0 : static void ttm_mem_zone_kobj_release(struct kobject *kobj)
72 : {
73 : struct ttm_mem_zone *zone =
74 0 : container_of(kobj, struct ttm_mem_zone, kobj);
75 :
76 : pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
77 : zone->name, (unsigned long long)zone->used_mem >> 10);
78 0 : kfree(zone);
79 0 : }
80 :
81 : #ifdef notyet
82 : static ssize_t ttm_mem_zone_show(struct kobject *kobj,
83 : struct attribute *attr,
84 : char *buffer)
85 : {
86 : struct ttm_mem_zone *zone =
87 : container_of(kobj, struct ttm_mem_zone, kobj);
88 : uint64_t val = 0;
89 :
90 : spin_lock(&zone->glob->lock);
91 : if (attr == &ttm_mem_sys)
92 : val = zone->zone_mem;
93 : else if (attr == &ttm_mem_emer)
94 : val = zone->emer_mem;
95 : else if (attr == &ttm_mem_max)
96 : val = zone->max_mem;
97 : else if (attr == &ttm_mem_swap)
98 : val = zone->swap_limit;
99 : else if (attr == &ttm_mem_used)
100 : val = zone->used_mem;
101 : spin_unlock(&zone->glob->lock);
102 :
103 : return snprintf(buffer, PAGE_SIZE, "%llu\n",
104 : (unsigned long long) val >> 10);
105 : }
106 :
107 : static void ttm_check_swapping(struct ttm_mem_global *glob);
108 :
109 : static ssize_t ttm_mem_zone_store(struct kobject *kobj,
110 : struct attribute *attr,
111 : const char *buffer,
112 : size_t size)
113 : {
114 : struct ttm_mem_zone *zone =
115 : container_of(kobj, struct ttm_mem_zone, kobj);
116 : int chars;
117 : unsigned long val;
118 : uint64_t val64;
119 :
120 : chars = sscanf(buffer, "%lu", &val);
121 : if (chars == 0)
122 : return size;
123 :
124 : val64 = val;
125 : val64 <<= 10;
126 :
127 : spin_lock(&zone->glob->lock);
128 : if (val64 > zone->zone_mem)
129 : val64 = zone->zone_mem;
130 : if (attr == &ttm_mem_emer) {
131 : zone->emer_mem = val64;
132 : if (zone->max_mem > val64)
133 : zone->max_mem = val64;
134 : } else if (attr == &ttm_mem_max) {
135 : zone->max_mem = val64;
136 : if (zone->emer_mem < val64)
137 : zone->emer_mem = val64;
138 : } else if (attr == &ttm_mem_swap)
139 : zone->swap_limit = val64;
140 : spin_unlock(&zone->glob->lock);
141 :
142 : ttm_check_swapping(zone->glob);
143 :
144 : return size;
145 : }
146 :
147 : static struct attribute *ttm_mem_zone_attrs[] = {
148 : &ttm_mem_sys,
149 : &ttm_mem_emer,
150 : &ttm_mem_max,
151 : &ttm_mem_swap,
152 : &ttm_mem_used,
153 : NULL
154 : };
155 :
156 : static const struct sysfs_ops ttm_mem_zone_ops = {
157 : .show = &ttm_mem_zone_show,
158 : .store = &ttm_mem_zone_store
159 : };
160 : #endif
161 :
162 : static struct kobj_type ttm_mem_zone_kobj_type = {
163 : .release = &ttm_mem_zone_kobj_release,
164 : #ifdef __linux__
165 : .sysfs_ops = &ttm_mem_zone_ops,
166 : .default_attrs = ttm_mem_zone_attrs,
167 : #endif
168 : };
169 :
170 0 : static void ttm_mem_global_kobj_release(struct kobject *kobj)
171 : {
172 : struct ttm_mem_global *glob =
173 0 : container_of(kobj, struct ttm_mem_global, kobj);
174 :
175 0 : kfree(glob);
176 0 : }
177 :
178 : static struct kobj_type ttm_mem_glob_kobj_type = {
179 : .release = &ttm_mem_global_kobj_release,
180 : };
181 :
182 0 : static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
183 : bool from_wq, uint64_t extra)
184 : {
185 : unsigned int i;
186 : struct ttm_mem_zone *zone;
187 : uint64_t target;
188 :
189 0 : for (i = 0; i < glob->num_zones; ++i) {
190 0 : zone = glob->zones[i];
191 :
192 0 : if (from_wq)
193 0 : target = zone->swap_limit;
194 0 : else if (capable(CAP_SYS_ADMIN))
195 0 : target = zone->emer_mem;
196 : else
197 0 : target = zone->max_mem;
198 :
199 0 : target = (extra > target) ? 0ULL : target;
200 :
201 0 : if (zone->used_mem > target)
202 0 : return true;
203 : }
204 0 : return false;
205 0 : }
206 :
207 : /**
208 : * At this point we only support a single shrink callback.
209 : * Extend this if needed, perhaps using a linked list of callbacks.
210 : * Note that this function is reentrant:
211 : * many threads may try to swap out at any given time.
212 : */
213 :
214 0 : static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
215 : uint64_t extra)
216 : {
217 : int ret;
218 : struct ttm_mem_shrink *shrink;
219 :
220 0 : spin_lock(&glob->lock);
221 0 : if (glob->shrink == NULL)
222 : goto out;
223 :
224 0 : while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
225 0 : shrink = glob->shrink;
226 0 : spin_unlock(&glob->lock);
227 0 : ret = shrink->do_shrink(shrink);
228 0 : spin_lock(&glob->lock);
229 0 : if (unlikely(ret != 0))
230 : goto out;
231 : }
232 : out:
233 0 : spin_unlock(&glob->lock);
234 0 : }
235 :
236 :
237 :
238 0 : static void ttm_shrink_work(struct work_struct *work)
239 : {
240 : struct ttm_mem_global *glob =
241 0 : container_of(work, struct ttm_mem_global, work);
242 :
243 0 : ttm_shrink(glob, true, 0ULL);
244 0 : }
245 :
246 0 : static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
247 : uint64_t mem)
248 : {
249 0 : struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
250 : int ret;
251 :
252 0 : if (unlikely(!zone))
253 0 : return -ENOMEM;
254 :
255 0 : zone->name = "kernel";
256 0 : zone->zone_mem = mem;
257 0 : zone->max_mem = mem >> 1;
258 0 : zone->emer_mem = (mem >> 1) + (mem >> 2);
259 0 : zone->swap_limit = zone->max_mem - (mem >> 3);
260 0 : zone->used_mem = 0;
261 0 : zone->glob = glob;
262 0 : glob->zone_kernel = zone;
263 0 : ret = kobject_init_and_add(
264 0 : &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
265 0 : if (unlikely(ret != 0)) {
266 0 : kobject_put(&zone->kobj);
267 0 : return ret;
268 : }
269 0 : glob->zones[glob->num_zones++] = zone;
270 0 : return 0;
271 0 : }
272 :
273 : #ifdef CONFIG_HIGHMEM
274 : static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
275 : uint64_t mem)
276 : {
277 : struct ttm_mem_zone *zone;
278 : int ret;
279 :
280 : if (si->totalhigh == 0)
281 : return 0;
282 :
283 : zone = kzalloc(sizeof(*zone), GFP_KERNEL);
284 : if (unlikely(!zone))
285 : return -ENOMEM;
286 :
287 : zone->name = "highmem";
288 : zone->zone_mem = mem;
289 : zone->max_mem = mem >> 1;
290 : zone->emer_mem = (mem >> 1) + (mem >> 2);
291 : zone->swap_limit = zone->max_mem - (mem >> 3);
292 : zone->used_mem = 0;
293 : zone->glob = glob;
294 : glob->zone_highmem = zone;
295 : ret = kobject_init_and_add(
296 : &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
297 : zone->name);
298 : if (unlikely(ret != 0)) {
299 : kobject_put(&zone->kobj);
300 : return ret;
301 : }
302 : glob->zones[glob->num_zones++] = zone;
303 : return 0;
304 : }
305 : #else
306 0 : static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
307 : uint64_t mem)
308 : {
309 0 : struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
310 : int ret;
311 :
312 0 : if (unlikely(!zone))
313 0 : return -ENOMEM;
314 :
315 : /**
316 : * No special dma32 zone needed.
317 : */
318 :
319 0 : if (mem <= ((uint64_t) 1ULL << 32)) {
320 0 : kfree(zone);
321 0 : return 0;
322 : }
323 :
324 : /*
325 : * Limit max dma32 memory to 4GB for now
326 : * until we can figure out how big this
327 : * zone really is.
328 : */
329 :
330 : mem = ((uint64_t) 1ULL << 32);
331 0 : zone->name = "dma32";
332 0 : zone->zone_mem = mem;
333 0 : zone->max_mem = mem >> 1;
334 0 : zone->emer_mem = (mem >> 1) + (mem >> 2);
335 0 : zone->swap_limit = zone->max_mem - (mem >> 3);
336 0 : zone->used_mem = 0;
337 0 : zone->glob = glob;
338 0 : glob->zone_dma32 = zone;
339 0 : ret = kobject_init_and_add(
340 0 : &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
341 0 : if (unlikely(ret != 0)) {
342 0 : kobject_put(&zone->kobj);
343 0 : return ret;
344 : }
345 0 : glob->zones[glob->num_zones++] = zone;
346 0 : return 0;
347 0 : }
348 : #endif
349 :
350 0 : int ttm_mem_global_init(struct ttm_mem_global *glob)
351 : {
352 : uint64_t mem;
353 : int ret;
354 : int i;
355 : struct ttm_mem_zone *zone;
356 :
357 0 : mtx_init(&glob->lock, IPL_TTY);
358 0 : glob->swap_queue = create_singlethread_workqueue("ttm_swap");
359 0 : INIT_WORK(&glob->work, ttm_shrink_work);
360 0 : ret = kobject_init_and_add(
361 0 : &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
362 0 : if (unlikely(ret != 0)) {
363 0 : kobject_put(&glob->kobj);
364 0 : return ret;
365 : }
366 :
367 0 : mem = ptoa(physmem);
368 :
369 0 : ret = ttm_mem_init_kernel_zone(glob, mem);
370 0 : if (unlikely(ret != 0))
371 : goto out_no_zone;
372 : #ifdef CONFIG_HIGHMEM
373 : ret = ttm_mem_init_highmem_zone(glob, mem);
374 : if (unlikely(ret != 0))
375 : goto out_no_zone;
376 : #else
377 0 : ret = ttm_mem_init_dma32_zone(glob, mem);
378 0 : if (unlikely(ret != 0))
379 : goto out_no_zone;
380 : #endif
381 0 : for (i = 0; i < glob->num_zones; ++i) {
382 0 : zone = glob->zones[i];
383 : pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
384 : zone->name, (unsigned long long)zone->max_mem >> 10);
385 : }
386 0 : ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
387 0 : ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
388 0 : return 0;
389 : out_no_zone:
390 0 : ttm_mem_global_release(glob);
391 0 : return ret;
392 0 : }
393 : EXPORT_SYMBOL(ttm_mem_global_init);
394 :
395 0 : void ttm_mem_global_release(struct ttm_mem_global *glob)
396 : {
397 : unsigned int i;
398 : struct ttm_mem_zone *zone;
399 :
400 : /* let the page allocator first stop the shrink work. */
401 0 : ttm_page_alloc_fini();
402 0 : ttm_dma_page_alloc_fini();
403 :
404 0 : flush_workqueue(glob->swap_queue);
405 0 : destroy_workqueue(glob->swap_queue);
406 0 : glob->swap_queue = NULL;
407 0 : for (i = 0; i < glob->num_zones; ++i) {
408 0 : zone = glob->zones[i];
409 0 : kobject_del(&zone->kobj);
410 0 : kobject_put(&zone->kobj);
411 : }
412 0 : kobject_del(&glob->kobj);
413 0 : kobject_put(&glob->kobj);
414 0 : }
415 : EXPORT_SYMBOL(ttm_mem_global_release);
416 :
417 0 : static void ttm_check_swapping(struct ttm_mem_global *glob)
418 : {
419 : bool needs_swapping = false;
420 : unsigned int i;
421 : struct ttm_mem_zone *zone;
422 :
423 0 : spin_lock(&glob->lock);
424 0 : for (i = 0; i < glob->num_zones; ++i) {
425 0 : zone = glob->zones[i];
426 0 : if (zone->used_mem > zone->swap_limit) {
427 : needs_swapping = true;
428 0 : break;
429 : }
430 : }
431 :
432 0 : spin_unlock(&glob->lock);
433 :
434 0 : if (unlikely(needs_swapping))
435 0 : (void)queue_work(glob->swap_queue, &glob->work);
436 :
437 0 : }
438 :
439 0 : static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
440 : struct ttm_mem_zone *single_zone,
441 : uint64_t amount)
442 : {
443 : unsigned int i;
444 : struct ttm_mem_zone *zone;
445 :
446 0 : spin_lock(&glob->lock);
447 0 : for (i = 0; i < glob->num_zones; ++i) {
448 0 : zone = glob->zones[i];
449 0 : if (single_zone && zone != single_zone)
450 : continue;
451 0 : zone->used_mem -= amount;
452 0 : }
453 0 : spin_unlock(&glob->lock);
454 0 : }
455 :
456 0 : void ttm_mem_global_free(struct ttm_mem_global *glob,
457 : uint64_t amount)
458 : {
459 0 : return ttm_mem_global_free_zone(glob, NULL, amount);
460 : }
461 : EXPORT_SYMBOL(ttm_mem_global_free);
462 :
463 0 : static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
464 : struct ttm_mem_zone *single_zone,
465 : uint64_t amount, bool reserve)
466 : {
467 : uint64_t limit;
468 : int ret = -ENOMEM;
469 : unsigned int i;
470 : struct ttm_mem_zone *zone;
471 :
472 0 : spin_lock(&glob->lock);
473 0 : for (i = 0; i < glob->num_zones; ++i) {
474 0 : zone = glob->zones[i];
475 0 : if (single_zone && zone != single_zone)
476 : continue;
477 :
478 0 : limit = (capable(CAP_SYS_ADMIN)) ?
479 0 : zone->emer_mem : zone->max_mem;
480 :
481 0 : if (zone->used_mem > limit)
482 : goto out_unlock;
483 : }
484 :
485 0 : if (reserve) {
486 0 : for (i = 0; i < glob->num_zones; ++i) {
487 0 : zone = glob->zones[i];
488 0 : if (single_zone && zone != single_zone)
489 : continue;
490 0 : zone->used_mem += amount;
491 0 : }
492 : }
493 :
494 0 : ret = 0;
495 : out_unlock:
496 0 : spin_unlock(&glob->lock);
497 0 : ttm_check_swapping(glob);
498 :
499 0 : return ret;
500 : }
501 :
502 :
503 0 : static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
504 : struct ttm_mem_zone *single_zone,
505 : uint64_t memory,
506 : bool no_wait, bool interruptible)
507 : {
508 : int count = TTM_MEMORY_ALLOC_RETRIES;
509 :
510 0 : while (unlikely(ttm_mem_global_reserve(glob,
511 : single_zone,
512 : memory, true)
513 : != 0)) {
514 0 : if (no_wait)
515 0 : return -ENOMEM;
516 0 : if (unlikely(count-- == 0))
517 0 : return -ENOMEM;
518 0 : ttm_shrink(glob, false, memory + (memory >> 2) + 16);
519 : }
520 :
521 0 : return 0;
522 0 : }
523 :
524 0 : int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
525 : bool no_wait, bool interruptible)
526 : {
527 : /**
528 : * Normal allocations of kernel memory are registered in
529 : * all zones.
530 : */
531 :
532 0 : return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
533 : interruptible);
534 : }
535 : EXPORT_SYMBOL(ttm_mem_global_alloc);
536 :
537 0 : int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
538 : struct vm_page *page,
539 : bool no_wait, bool interruptible)
540 : {
541 :
542 : struct ttm_mem_zone *zone = NULL;
543 :
544 : /**
545 : * Page allocations may be registed in a single zone
546 : * only if highmem or !dma32.
547 : */
548 :
549 : #ifdef CONFIG_HIGHMEM
550 : if (PageHighMem(page) && glob->zone_highmem != NULL)
551 : zone = glob->zone_highmem;
552 : #else
553 0 : if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
554 0 : zone = glob->zone_kernel;
555 : #endif
556 0 : return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
557 : interruptible);
558 : }
559 :
560 0 : void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
561 : {
562 : struct ttm_mem_zone *zone = NULL;
563 :
564 : #ifdef CONFIG_HIGHMEM
565 : if (PageHighMem(page) && glob->zone_highmem != NULL)
566 : zone = glob->zone_highmem;
567 : #else
568 0 : if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
569 0 : zone = glob->zone_kernel;
570 : #endif
571 0 : ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
572 0 : }
573 :
574 :
575 0 : size_t ttm_round_pot(size_t size)
576 : {
577 0 : if ((size & (size - 1)) == 0)
578 0 : return size;
579 0 : else if (size > PAGE_SIZE)
580 0 : return PAGE_ALIGN(size);
581 : else {
582 : size_t tmp_size = 4;
583 :
584 0 : while (tmp_size < size)
585 0 : tmp_size <<= 1;
586 :
587 : return tmp_size;
588 : }
589 : return 0;
590 0 : }
591 : EXPORT_SYMBOL(ttm_round_pot);
|