GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: lib/librthread/rthread_stack.c Lines: 47 58 81.0 %
Date: 2017-11-13 Branches: 22 34 64.7 %

Line Branch Exec Source
1
/* $OpenBSD: rthread_stack.c,v 1.17 2017/09/05 02:40:54 guenther Exp $ */
2
3
/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
4
5
#include <sys/param.h>
6
#include <sys/mman.h>
7
8
#include <errno.h>
9
#include <pthread.h>
10
#include <stdint.h>
11
#include <stdlib.h>
12
#include <unistd.h>
13
14
#include "rthread.h"
15
16
/*
17
 * Follow uthread's example and keep around stacks that have default
18
 * attributes for possible reuse.
19
 */
20
static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
21
static _atomic_lock_t def_stacks_lock = _SPINLOCK_UNLOCKED;
22
23
struct stack *
24
_rthread_alloc_stack(pthread_t thread)
25
{
26
	struct stack *stack;
27
	u_int32_t rnd;
28
	caddr_t base;
29
	caddr_t guard;
30
	size_t size;
31
	size_t guardsize;
32
33
	/* if the request uses the defaults, try to reuse one */
34

19401
	if (thread->attr.stack_addr == NULL &&
35
6466
	    thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
36
6463
	    thread->attr.guard_size == _thread_pagesize) {
37
6463
		_spinlock(&def_stacks_lock);
38
6463
		stack = SLIST_FIRST(&def_stacks);
39
6463
		if (stack != NULL) {
40
3072
			SLIST_REMOVE_HEAD(&def_stacks, link);
41
3072
			_spinunlock(&def_stacks_lock);
42
3072
			return (stack);
43
		}
44
3391
		_spinunlock(&def_stacks_lock);
45
3391
	}
46
47
	/* allocate the stack struct that we'll return */
48
3397
	stack = malloc(sizeof(*stack));
49
3397
	if (stack == NULL)
50
		return (NULL);
51
52
	/* Smaller the stack, smaller the random bias */
53
3397
	if (thread->attr.stack_size > _thread_pagesize)
54
3397
		rnd = arc4random() & (_thread_pagesize - 1);
55
	else if (thread->attr.stack_size == _thread_pagesize)
56
		rnd = arc4random() & (_thread_pagesize / 16 - 1);
57
	else
58
		rnd = 0;
59
3397
	rnd &= ~_STACKALIGNBYTES;
60
61
	/* If a stack address was provided, just fill in the details */
62
3397
	if (thread->attr.stack_addr != NULL) {
63
3
		stack->base = base = thread->attr.stack_addr;
64
3
		stack->len  = thread->attr.stack_size;
65
#ifdef MACHINE_STACK_GROWS_UP
66
		stack->sp = base + rnd;
67
#else
68
3
		stack->sp = base + thread->attr.stack_size - rnd;
69
#endif
70
		/*
71
		 * This impossible guardsize marks this stack as
72
		 * application allocated so it won't be freed or
73
		 * cached by _rthread_free_stack()
74
		 */
75
3
		stack->guardsize = 1;
76
3
		return (stack);
77
	}
78
79
	/* round up the requested sizes up to full pages */
80
3394
	size = ROUND_TO_PAGE(thread->attr.stack_size);
81
3394
	guardsize = ROUND_TO_PAGE(thread->attr.guard_size);
82
83
	/* check for overflow */
84

6788
	if (size < thread->attr.stack_size ||
85
3394
	    guardsize < thread->attr.guard_size ||
86
3394
	    SIZE_MAX - size < guardsize) {
87
		free(stack);
88
		errno = EINVAL;
89
		return (NULL);
90
	}
91
3394
	size += guardsize;
92
93
	/* actually allocate the real stack */
94
3394
	base = mmap(NULL, size, PROT_READ | PROT_WRITE,
95
	    MAP_PRIVATE | MAP_ANON, -1, 0);
96
3394
	if (base == MAP_FAILED) {
97
		free(stack);
98
		return (NULL);
99
	}
100
101
#ifdef MACHINE_STACK_GROWS_UP
102
	guard = base + size - guardsize;
103
	stack->sp = base + rnd;
104
#else
105
	guard = base;
106
3394
	stack->sp = base + size - rnd;
107
#endif
108
109
	/* memory protect the guard region */
110

6788
	if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
111
		munmap(base, size);
112
		free(stack);
113
		return (NULL);
114
	}
115
116
3394
	stack->base = base;
117
3394
	stack->guardsize = guardsize;
118
3394
	stack->len = size;
119
3394
	return (stack);
120
6469
}
121
122
void
123
_rthread_free_stack(struct stack *stack)
124
{
125

14592
	if (stack->len == RTHREAD_STACK_SIZE_DEF + stack->guardsize &&
126
4860
	    stack->guardsize == _thread_pagesize) {
127
4860
		_spinlock(&def_stacks_lock);
128
4860
		SLIST_INSERT_HEAD(&def_stacks, stack, link);
129
4860
		_spinunlock(&def_stacks_lock);
130
4860
	} else {
131
		/* unmap the storage unless it was application allocated */
132
6
		if (stack->guardsize != 1)
133
3
			munmap(stack->base, stack->len);
134
6
		free(stack);
135
	}
136
4866
}