GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: lib/librthread/rthread.c Lines: 191 257 74.3 %
Date: 2017-11-07 Branches: 78 138 56.5 %

Line Branch Exec Source
1
/*	$OpenBSD: rthread.c,v 1.96 2017/09/05 02:40:54 guenther Exp $ */
2
/*
3
 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4
 * All Rights Reserved.
5
 *
6
 * Permission to use, copy, modify, and distribute this software for any
7
 * purpose with or without fee is hereby granted, provided that the above
8
 * copyright notice and this permission notice appear in all copies.
9
 *
10
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
 */
18
/*
19
 * The heart of rthreads.  Basic functions like creating and joining
20
 * threads.
21
 */
22
23
#include <sys/types.h>
24
#ifndef NO_PIC
25
#include <sys/exec_elf.h>
26
#pragma weak _DYNAMIC
27
#endif
28
29
#include <stdlib.h>
30
#include <unistd.h>
31
#include <signal.h>
32
#include <stdio.h>
33
#include <string.h>
34
#include <errno.h>
35
#include <dlfcn.h>
36
#include <tib.h>
37
38
#include <pthread.h>
39
40
#include "cancel.h"		/* in libc/include */
41
#include "rthread.h"
42
#include "rthread_cb.h"
43
44
/*
45
 * Call nonstandard functions via names in the reserved namespace:
46
 *	dlctl() -> _dlctl()
47
 *	getthrid -> _thread_sys_getthrid
48
 */
49
typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak));
50
REDIRECT_SYSCALL(getthrid);
51
52
/* weak stub to be overriden by ld.so */
53
int	dlctl(void *handle, int cmd, void *data) { return 0; }
54
55
/*
56
 * libc's signal wrappers hide SIGTHR; we need to call the real syscall
57
 * stubs _thread_sys_* directly.
58
 */
59
REDIRECT_SYSCALL(sigaction);
60
REDIRECT_SYSCALL(sigprocmask);
61
REDIRECT_SYSCALL(thrkill);
62
63
static int concurrency_level;	/* not used */
64
65
int _threads_ready;
66
int _post_threaded;
67
size_t _thread_pagesize;
68
struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
69
_atomic_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
70
static struct pthread_queue _thread_gc_list
71
    = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
72
static _atomic_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
73
static struct pthread _initial_thread;
74
75
struct pthread_attr _rthread_attr_default = {
76
	.stack_addr			= NULL,
77
	.stack_size			= RTHREAD_STACK_SIZE_DEF,
78
/*	.guard_size		set in _rthread_init */
79
	.detach_state			= PTHREAD_CREATE_JOINABLE,
80
	.contention_scope		= PTHREAD_SCOPE_SYSTEM,
81
	.sched_policy			= SCHED_OTHER,
82
	.sched_param = { .sched_priority = 0 },
83
	.sched_inherit			= PTHREAD_INHERIT_SCHED,
84
};
85
86
/*
87
 * internal support functions
88
 */
89
90
static void
91
_rthread_start(void *v)
92
{
93
10885
	pthread_t thread = v;
94
	void *retval;
95
96
	retval = thread->fn(thread->arg);
97
	pthread_exit(retval);
98
}
99
100
static void
101
sigthr_handler(__unused int sig)
102
{
103
126
	struct tib *tib = TIB_GET();
104
63
	pthread_t self = tib->tib_thread;
105
106
	/*
107
	 * Do nothing unless
108
	 * 1) pthread_cancel() has been called on this thread,
109
	 * 2) cancelation is enabled for it, and
110
	 * 3) we're not already in cancelation processing
111
	 */
112

126
	if (!tib->tib_canceled || tib->tib_cantcancel)
113
		return;
114
115
	/*
116
	 * If delaying cancels inside complex ops (pthread_cond_wait,
117
	 * pthread_join, etc), just mark that this has happened to
118
	 * prevent a race with going to sleep
119
	 */
120
63
	if (tib->tib_cancel_point & CANCEL_POINT_DELAYED) {
121
		self->delayed_cancel = 1;
122
		return;
123
	}
124
125
	/*
126
	 * otherwise, if in a cancel point or async cancels are
127
	 * enabled, then exit
128
	 */
129

75
	if (tib->tib_cancel_point ||
130
12
	    (tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL))
131
		pthread_exit(PTHREAD_CANCELED);
132
14
}
133
134
135
/*
136
 * A few basic callbacks for libc.  The first couple are only used
137
 * on archs where there isn't a fast TCB_GET()
138
 */
139
#ifndef TCB_HAVE_MD_GET
140
static int *
141
multi_threaded_errnoptr(void)
142
{
143
        return (&TIB_GET()->tib_errno);
144
}
145
146
static void *
147
multi_threaded_tcb(void)
148
{
149
	return (TCB_GET());
150
}
151
#endif /* TCB_HAVE_MD_GET */
152
153
static void
154
_rthread_free(pthread_t thread)
155
{
156
16572
	_spinlock(&_thread_gc_lock);
157
8286
	TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
158
8286
	_spinunlock(&_thread_gc_lock);
159
8286
}
160
161
static void
162
_thread_release(pthread_t thread)
163
{
164
21670
	_spinlock(&_thread_lock);
165
32505
	LIST_REMOVE(thread, threads);
166
10835
	_spinunlock(&_thread_lock);
167
168
10835
	_spinlock(&thread->flags_lock);
169
10835
	if (thread->flags & THREAD_DETACHED) {
170
9
		_spinunlock(&thread->flags_lock);
171
9
		_rthread_free(thread);
172
9
	} else {
173
10826
		thread->flags |= THREAD_DONE;
174
10826
		_spinunlock(&thread->flags_lock);
175
10826
		_sem_post(&thread->donesem);
176
	}
177
10835
}
178
179
static void
180
_thread_key_zero(int key)
181
{
182
	pthread_t thread;
183
	struct rthread_storage *rs;
184
185
20
	LIST_FOREACH(thread, &_thread_list, threads) {
186
8
		for (rs = thread->local_storage; rs; rs = rs->next) {
187
			if (rs->keyid == key)
188
				rs->data = NULL;
189
		}
190
	}
191
4
}
192
193
void
194
_rthread_init(void)
195
{
196
5740
	pthread_t thread = pthread_self();
197
2870
	struct sigaction sa;
198
199
2870
	if (_threads_ready)
200
		return;
201
202
5740
	LIST_INSERT_HEAD(&_thread_list, thread, threads);
203
204
2870
	_thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
205
2870
	_rthread_attr_default.guard_size = _thread_pagesize;
206
2870
	thread->attr = _rthread_attr_default;
207
208
	/* get libc to start using our callbacks */
209
	{
210
2870
		struct thread_callbacks cb = { 0 };
211
212
#ifndef TCB_HAVE_MD_GET
213
		cb.tc_errnoptr		= multi_threaded_errnoptr;
214
		cb.tc_tcb		= multi_threaded_tcb;
215
#endif
216
2870
		cb.tc_fork		= _thread_fork;
217
2870
		cb.tc_vfork		= _thread_vfork;
218
2870
		cb.tc_thread_release	= _thread_release;
219
2870
		cb.tc_thread_key_zero	= _thread_key_zero;
220
2870
		_thread_set_callbacks(&cb, sizeof(cb));
221
2870
	}
222
223
#ifndef NO_PIC
224
2870
	if (_DYNAMIC) {
225
2870
		dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
226
2870
	}
227
#endif
228
229
	/*
230
	 * Set the handler on the signal used for cancelation and
231
	 * suspension, and make sure it's unblocked
232
	 */
233
2870
	memset(&sa, 0, sizeof(sa));
234
2870
	sigemptyset(&sa.sa_mask);
235
2870
	sa.sa_handler = sigthr_handler;
236
2870
	sigaction(SIGTHR, &sa, NULL);
237
2870
	sigaddset(&sa.sa_mask, SIGTHR);
238
2870
	sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
239
240
2870
	_threads_ready = 1;
241
242
2870
	_malloc_init(1);
243
244
2870
	_rthread_debug(1, "rthread init\n");
245
5740
}
246
247
static void
248
_rthread_reaper(void)
249
{
250
38344
	pthread_t thread;
251
252
restart:
253
27449
	_spinlock(&_thread_gc_lock);
254
54898
	TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
255
8277
		if (thread->tib->tib_tid != 0)
256
			continue;
257
16554
		TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
258
8277
		_spinunlock(&_thread_gc_lock);
259
8277
		if (thread != &_initial_thread) {
260
8277
			_rthread_debug(3, "rthread reaping %p stack %p\n",
261
8277
			    (void *)thread, (void *)thread->stack);
262
8277
			_rthread_free_stack(thread->stack);
263
8277
			_dl_free_tib(thread->tib, sizeof(*thread));
264
8277
		} else {
265
			/* initial thread isn't part of TIB allocation */
266
			_rthread_debug(3, "rthread reaping %p (initial)\n",
267
			    (void *)thread);
268
			_dl_free_tib(thread->tib, 0);
269
		}
270
		goto restart;
271
	}
272
19172
	_spinunlock(&_thread_gc_lock);
273
19172
}
274
275
/*
276
 * real pthread functions
277
 */
278
279
int
280
pthread_join(pthread_t thread, void **retval)
281
{
282
	int e;
283
16554
	struct tib *tib = TIB_GET();
284
	pthread_t self;
285
8277
	PREP_CANCEL_POINT(tib);
286
287
8277
	if (_post_threaded) {
288
#define GREATSCOTT "great scott! serious repercussions on future events!\n"
289
		write(2, GREATSCOTT, sizeof(GREATSCOTT) - 1);
290
		abort();
291
	}
292
8277
	if (!_threads_ready)
293
		_rthread_init();
294
8277
	self = tib->tib_thread;
295
296
	e = 0;
297

16554
	ENTER_DELAYED_CANCEL_POINT(tib, self);
298
8277
	if (thread == NULL)
299
		e = EINVAL;
300
8277
	else if (thread == self)
301
		e = EDEADLK;
302
8277
	else if (thread->flags & THREAD_DETACHED)
303
		e = EINVAL;
304
16554
	else if ((e = _sem_wait(&thread->donesem, 0, NULL,
305
8277
	    &self->delayed_cancel)) == 0) {
306
8277
		if (retval)
307
5215
			*retval = thread->retval;
308
309
		/*
310
		 * We should be the last having a ref to this thread,
311
		 * but someone stupid or evil might haved detached it;
312
		 * in that case the thread will clean up itself
313
		 */
314
8277
		if ((thread->flags & THREAD_DETACHED) == 0)
315
8277
			_rthread_free(thread);
316
	}
317
318

16554
	LEAVE_CANCEL_POINT_INNER(tib, e);
319
8277
	_rthread_reaper();
320
8277
	return (e);
321
}
322
323
int
324
pthread_detach(pthread_t thread)
325
{
326
	int rc = 0;
327
328
16
	_spinlock(&thread->flags_lock);
329
8
	if (thread->flags & THREAD_DETACHED) {
330
		rc = EINVAL;
331
		_spinunlock(&thread->flags_lock);
332
8
	} else if (thread->flags & THREAD_DONE) {
333
		_spinunlock(&thread->flags_lock);
334
		_rthread_free(thread);
335
	} else {
336
8
		thread->flags |= THREAD_DETACHED;
337
8
		_spinunlock(&thread->flags_lock);
338
	}
339
8
	_rthread_reaper();
340
8
	return (rc);
341
}
342
343
int
344
pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
345
    void *(*start_routine)(void *), void *arg)
346
{
347
	extern int __isthreaded;
348
	struct tib *tib;
349
	pthread_t thread;
350
21774
	struct __tfork param;
351
	int rc;
352
353
10887
	if (!_threads_ready)
354
222
		_rthread_init();
355
356
10887
	_rthread_reaper();
357
358
10887
	tib = _dl_allocate_tib(sizeof(*thread));
359
10887
	if (tib == NULL)
360
		return (ENOMEM);
361
10887
	thread = tib->tib_thread;
362
10887
	memset(thread, 0, sizeof(*thread));
363
10887
	thread->tib = tib;
364
10887
	thread->donesem.lock = _SPINLOCK_UNLOCKED;
365
10887
	thread->flags_lock = _SPINLOCK_UNLOCKED;
366
10887
	thread->fn = start_routine;
367
10887
	thread->arg = arg;
368
10887
	tib->tib_tid = -1;
369
370
21774
	thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
371
10887
	if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
372
10887
		pthread_t self = pthread_self();
373
374
10887
		thread->attr.sched_policy = self->attr.sched_policy;
375
10887
		thread->attr.sched_param = self->attr.sched_param;
376
10887
	}
377
10887
	if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
378
5
		thread->flags |= THREAD_DETACHED;
379
380
10887
	thread->stack = _rthread_alloc_stack(thread);
381
10887
	if (!thread->stack) {
382
		rc = errno;
383
		goto fail1;
384
	}
385
386
10887
	param.tf_tcb = TIB_TO_TCB(tib);
387
10887
	param.tf_tid = &tib->tib_tid;
388
10887
	param.tf_stack = thread->stack->sp;
389
390
10887
	_spinlock(&_thread_lock);
391
32661
	LIST_INSERT_HEAD(&_thread_list, thread, threads);
392
10887
	_spinunlock(&_thread_lock);
393
394
	/* we're going to be multi-threaded real soon now */
395
10887
	__isthreaded = 1;
396
10887
	rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
397
10887
	if (rc != -1) {
398
		/* success */
399
10887
		*threadp = thread;
400
10887
		return (0);
401
	}
402
403
	rc = errno;
404
405
	_spinlock(&_thread_lock);
406
	LIST_REMOVE(thread, threads);
407
	_spinunlock(&_thread_lock);
408
	_rthread_free_stack(thread->stack);
409
fail1:
410
	_dl_free_tib(tib, sizeof(*thread));
411
412
	return (rc);
413
10887
}
414
415
int
416
pthread_kill(pthread_t thread, int sig)
417
{
418
256
	struct tib *tib = thread->tib;
419
420
128
	if (sig == SIGTHR)
421
		return (EINVAL);
422
128
	if (thrkill(tib->tib_tid, sig, TIB_TO_TCB(tib)))
423
		return (errno);
424
128
	return (0);
425
128
}
426
427
int
428
pthread_cancel(pthread_t thread)
429
{
430
138
	struct tib *tib = thread->tib;
431
69
	pid_t tid = tib->tib_tid;
432
433

137
	if (tib->tib_canceled == 0 && tid != 0 &&
434
68
	    (tib->tib_cantcancel & CANCEL_DYING) == 0) {
435
68
		tib->tib_canceled = 1;
436
437
68
		if ((tib->tib_cantcancel & CANCEL_DISABLED) == 0) {
438
63
			thrkill(tid, SIGTHR, TIB_TO_TCB(tib));
439
63
			return (0);
440
		}
441
	}
442
6
	return (0);
443
64
}
444
445
void
446
pthread_testcancel(void)
447
{
448
40
	struct tib *tib = TIB_GET();
449
450

35
	if (tib->tib_canceled && (tib->tib_cantcancel & CANCEL_DISABLED) == 0)
451
		pthread_exit(PTHREAD_CANCELED);
452
10
}
453
454
int
455
pthread_setcancelstate(int state, int *oldstatep)
456
{
457
220
	struct tib *tib = TIB_GET();
458
	int oldstate;
459
460
110
	oldstate = tib->tib_cantcancel & CANCEL_DISABLED ?
461
	    PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
462
110
	if (state == PTHREAD_CANCEL_ENABLE) {
463
55
		tib->tib_cantcancel &= ~CANCEL_DISABLED;
464
110
	} else if (state == PTHREAD_CANCEL_DISABLE) {
465
55
		tib->tib_cantcancel |= CANCEL_DISABLED;
466
	} else {
467
		return (EINVAL);
468
	}
469
110
	if (oldstatep)
470
50
		*oldstatep = oldstate;
471
472
110
	return (0);
473
110
}
474
DEF_STD(pthread_setcancelstate);
475
476
int
477
pthread_setcanceltype(int type, int *oldtypep)
478
{
479
30
	struct tib *tib = TIB_GET();
480
	int oldtype;
481
482
15
	oldtype = tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL ?
483
	    PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
484
15
	if (type == PTHREAD_CANCEL_DEFERRED) {
485
5
		tib->tib_thread_flags &=~ TIB_THREAD_ASYNC_CANCEL;
486
15
	} else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
487
10
		tib->tib_thread_flags |= TIB_THREAD_ASYNC_CANCEL;
488
	} else {
489
		return (EINVAL);
490
	}
491
15
	if (oldtypep)
492
		*oldtypep = oldtype;
493
494
15
	return (0);
495
15
}
496
497
void
498
pthread_cleanup_push(void (*fn)(void *), void *arg)
499
{
500
	struct rthread_cleanup_fn *clfn;
501
40
	pthread_t self = pthread_self();
502
503
20
	clfn = calloc(1, sizeof(*clfn));
504
20
	if (!clfn)
505
		return;
506
20
	clfn->fn = fn;
507
20
	clfn->arg = arg;
508
20
	clfn->next = self->cleanup_fns;
509
20
	self->cleanup_fns = clfn;
510
40
}
511
512
void
513
pthread_cleanup_pop(int execute)
514
{
515
	struct rthread_cleanup_fn *clfn;
516
	pthread_t self = pthread_self();
517
518
	clfn = self->cleanup_fns;
519
	if (clfn) {
520
		self->cleanup_fns = clfn->next;
521
		if (execute)
522
			clfn->fn(clfn->arg);
523
		free(clfn);
524
	}
525
}
526
527
int
528
pthread_getconcurrency(void)
529
{
530
	return (concurrency_level);
531
}
532
533
int
534
pthread_setconcurrency(int new_level)
535
{
536
	if (new_level < 0)
537
		return (EINVAL);
538
	concurrency_level = new_level;
539
	return (0);
540
}
541
542
/*
543
 * compat debug stuff
544
 */
545
void
546
_thread_dump_info(void)
547
{
548
	pthread_t thread;
549
550
	_spinlock(&_thread_lock);
551
	LIST_FOREACH(thread, &_thread_list, threads)
552
		printf("thread %d flags 0x%x name %s\n", thread->tib->tib_tid,
553
		    thread->tib->tib_thread_flags, thread->name);
554
	_spinunlock(&_thread_lock);
555
}
556
557
#ifndef NO_PIC
558
/*
559
 * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
560
 * the function called via atexit() to invoke all destructors.  The latter
561
 * two call shared-object destructors, which may need to call dlclose(),
562
 * so this lock needs to permit recursive locking.
563
 * The specific code here was extracted from _rthread_mutex_lock() and
564
 * pthread_mutex_unlock() and simplified to use the static variables.
565
 */
566
void
567
_rthread_dl_lock(int what)
568
{
569
	static _atomic_lock_t lock = _SPINLOCK_UNLOCKED;
570
	static pthread_t owner = NULL;
571
	static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
572
	static int count = 0;
573
574
1125570
	if (what == 0) {
575
282827
		pthread_t self = pthread_self();
576
577
		/* lock, possibly recursive */
578
282827
		_spinlock(&lock);
579
282827
		if (owner == NULL) {
580
72127
			owner = self;
581
282827
		} else if (owner != self) {
582
			TAILQ_INSERT_TAIL(&lockers, self, waiting);
583
			while (owner != self) {
584
				__thrsleep(self, 0, NULL, &lock, NULL);
585
				_spinlock(&lock);
586
			}
587
		}
588
282827
		count++;
589
282827
		_spinunlock(&lock);
590
562785
	} else if (what == 1) {
591
		/* unlock, possibly recursive */
592
279958
		if (--count == 0) {
593
			pthread_t next;
594
595
69258
			_spinlock(&lock);
596
69258
			owner = next = TAILQ_FIRST(&lockers);
597
69258
			if (next != NULL)
598
				TAILQ_REMOVE(&lockers, next, waiting);
599
69258
			_spinunlock(&lock);
600
69258
			if (next != NULL)
601
				__thrwakeup(next, 1);
602
69258
		}
603
	} else {
604
		/* reinit: used in child after fork to clear the queue */
605
		lock = _SPINLOCK_UNLOCKED;
606
		if (--count == 0)
607
			owner = NULL;
608
		TAILQ_INIT(&lockers);
609
	}
610
562785
}
611
#endif