1 |
|
|
/* $OpenBSD: rthread_mutex.c,v 1.3 2017/08/15 07:06:29 guenther Exp $ */ |
2 |
|
|
/* |
3 |
|
|
* Copyright (c) 2017 Martin Pieuchot <mpi@openbsd.org> |
4 |
|
|
* Copyright (c) 2012 Philip Guenther <guenther@openbsd.org> |
5 |
|
|
* |
6 |
|
|
* Permission to use, copy, modify, and distribute this software for any |
7 |
|
|
* purpose with or without fee is hereby granted, provided that the above |
8 |
|
|
* copyright notice and this permission notice appear in all copies. |
9 |
|
|
* |
10 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
11 |
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
12 |
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
13 |
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
14 |
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
15 |
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
16 |
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
17 |
|
|
*/ |
18 |
|
|
|
19 |
|
|
#include <assert.h> |
20 |
|
|
#include <errno.h> |
21 |
|
|
#include <pthread.h> |
22 |
|
|
#include <stdint.h> |
23 |
|
|
#include <stdlib.h> |
24 |
|
|
#include <string.h> |
25 |
|
|
#include <unistd.h> |
26 |
|
|
|
27 |
|
|
#include "rthread.h" |
28 |
|
|
#include "cancel.h" |
29 |
|
|
#include "synch.h" |
30 |
|
|
|
31 |
|
|
/* |
32 |
|
|
* States defined in "Futexes Are Tricky" 5.2 |
33 |
|
|
*/ |
34 |
|
|
enum { |
35 |
|
|
UNLOCKED = 0, |
36 |
|
|
LOCKED = 1, /* locked without waiter */ |
37 |
|
|
CONTENDED = 2, /* threads waiting for this mutex */ |
38 |
|
|
}; |
39 |
|
|
|
40 |
|
|
#define SPIN_COUNT 128 |
41 |
|
|
#if defined(__i386__) || defined(__amd64__) |
42 |
|
|
#define SPIN_WAIT() asm volatile("pause": : : "memory") |
43 |
|
|
#else |
44 |
|
|
#define SPIN_WAIT() do { } while (0) |
45 |
|
|
#endif |
46 |
|
|
|
47 |
|
|
static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED; |
48 |
|
|
|
49 |
|
|
int |
50 |
|
|
pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr) |
51 |
|
|
{ |
52 |
|
|
pthread_mutex_t mutex; |
53 |
|
|
|
54 |
|
|
mutex = calloc(1, sizeof(*mutex)); |
55 |
|
|
if (mutex == NULL) |
56 |
|
|
return (ENOMEM); |
57 |
|
|
|
58 |
|
|
if (attr == NULL) { |
59 |
|
|
mutex->type = PTHREAD_MUTEX_DEFAULT; |
60 |
|
|
mutex->prioceiling = -1; |
61 |
|
|
} else { |
62 |
|
|
mutex->type = (*attr)->ma_type; |
63 |
|
|
mutex->prioceiling = (*attr)->ma_protocol == |
64 |
|
|
PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1; |
65 |
|
|
} |
66 |
|
|
*mutexp = mutex; |
67 |
|
|
|
68 |
|
|
return (0); |
69 |
|
|
} |
70 |
|
|
DEF_STRONG(pthread_mutex_init); |
71 |
|
|
|
72 |
|
|
int |
73 |
|
|
pthread_mutex_destroy(pthread_mutex_t *mutexp) |
74 |
|
|
{ |
75 |
|
|
pthread_mutex_t mutex; |
76 |
|
|
|
77 |
|
|
if (mutexp == NULL || *mutexp == NULL) |
78 |
|
|
return (EINVAL); |
79 |
|
|
|
80 |
|
|
mutex = *mutexp; |
81 |
|
|
if (mutex) { |
82 |
|
|
if (mutex->lock != UNLOCKED) { |
83 |
|
|
#define MSG "pthread_mutex_destroy on mutex with waiters!\n" |
84 |
|
|
write(2, MSG, sizeof(MSG) - 1); |
85 |
|
|
#undef MSG |
86 |
|
|
return (EBUSY); |
87 |
|
|
} |
88 |
|
|
free((void *)mutex); |
89 |
|
|
*mutexp = NULL; |
90 |
|
|
} |
91 |
|
|
|
92 |
|
|
return (0); |
93 |
|
|
} |
94 |
|
|
DEF_STRONG(pthread_mutex_destroy); |
95 |
|
|
|
96 |
|
|
static int |
97 |
|
|
_rthread_mutex_trylock(pthread_mutex_t mutex, int trywait, |
98 |
|
|
const struct timespec *abs) |
99 |
|
|
{ |
100 |
|
|
pthread_t self = pthread_self(); |
101 |
|
|
|
102 |
|
|
if (atomic_cas_uint(&mutex->lock, UNLOCKED, LOCKED) == UNLOCKED) { |
103 |
|
|
membar_enter_after_atomic(); |
104 |
|
|
mutex->owner = self; |
105 |
|
|
return (0); |
106 |
|
|
} |
107 |
|
|
|
108 |
|
|
if (mutex->owner == self) { |
109 |
|
|
int type = mutex->type; |
110 |
|
|
|
111 |
|
|
/* already owner? handle recursive behavior */ |
112 |
|
|
if (type != PTHREAD_MUTEX_RECURSIVE) { |
113 |
|
|
if (trywait || type == PTHREAD_MUTEX_ERRORCHECK) |
114 |
|
|
return (trywait ? EBUSY : EDEADLK); |
115 |
|
|
|
116 |
|
|
/* self-deadlock is disallowed by strict */ |
117 |
|
|
if (type == PTHREAD_MUTEX_STRICT_NP && abs == NULL) |
118 |
|
|
abort(); |
119 |
|
|
|
120 |
|
|
/* self-deadlock, possibly until timeout */ |
121 |
|
|
while (_twait(&mutex->type, type, CLOCK_REALTIME, |
122 |
|
|
abs) != ETIMEDOUT) |
123 |
|
|
; |
124 |
|
|
return (ETIMEDOUT); |
125 |
|
|
} else { |
126 |
|
|
if (mutex->count == INT_MAX) |
127 |
|
|
return (EAGAIN); |
128 |
|
|
mutex->count++; |
129 |
|
|
return (0); |
130 |
|
|
} |
131 |
|
|
} |
132 |
|
|
|
133 |
|
|
return (EBUSY); |
134 |
|
|
} |
135 |
|
|
|
136 |
|
|
static int |
137 |
|
|
_rthread_mutex_timedlock(pthread_mutex_t *mutexp, int trywait, |
138 |
|
|
const struct timespec *abs, int timed) |
139 |
|
|
{ |
140 |
|
|
pthread_t self = pthread_self(); |
141 |
|
|
pthread_mutex_t mutex; |
142 |
|
|
unsigned int i, lock; |
143 |
|
|
int error = 0; |
144 |
|
|
|
145 |
|
|
if (mutexp == NULL) |
146 |
|
|
return (EINVAL); |
147 |
|
|
|
148 |
|
|
/* |
149 |
|
|
* If the mutex is statically initialized, perform the dynamic |
150 |
|
|
* initialization. Note: _thread_mutex_lock() in libc requires |
151 |
|
|
* pthread_mutex_lock() to perform the mutex init when *mutexp |
152 |
|
|
* is NULL. |
153 |
|
|
*/ |
154 |
|
|
if (*mutexp == NULL) { |
155 |
|
|
_spinlock(&static_init_lock); |
156 |
|
|
if (*mutexp == NULL) |
157 |
|
|
error = pthread_mutex_init(mutexp, NULL); |
158 |
|
|
_spinunlock(&static_init_lock); |
159 |
|
|
if (error != 0) |
160 |
|
|
return (EINVAL); |
161 |
|
|
} |
162 |
|
|
|
163 |
|
|
mutex = *mutexp; |
164 |
|
|
_rthread_debug(5, "%p: mutex_%slock %p (%p)\n", self, |
165 |
|
|
(timed ? "timed" : (trywait ? "try" : "")), (void *)mutex, |
166 |
|
|
(void *)mutex->owner); |
167 |
|
|
|
168 |
|
|
error = _rthread_mutex_trylock(mutex, trywait, abs); |
169 |
|
|
if (error != EBUSY || trywait) |
170 |
|
|
return (error); |
171 |
|
|
|
172 |
|
|
/* Try hard to not enter the kernel. */ |
173 |
|
|
for (i = 0; i < SPIN_COUNT; i ++) { |
174 |
|
|
if (mutex->lock == UNLOCKED) |
175 |
|
|
break; |
176 |
|
|
|
177 |
|
|
SPIN_WAIT(); |
178 |
|
|
} |
179 |
|
|
|
180 |
|
|
lock = atomic_cas_uint(&mutex->lock, UNLOCKED, LOCKED); |
181 |
|
|
if (lock == UNLOCKED) { |
182 |
|
|
membar_enter_after_atomic(); |
183 |
|
|
mutex->owner = self; |
184 |
|
|
return (0); |
185 |
|
|
} |
186 |
|
|
|
187 |
|
|
if (lock != CONTENDED) { |
188 |
|
|
/* Indicate that we're waiting on this mutex. */ |
189 |
|
|
lock = atomic_swap_uint(&mutex->lock, CONTENDED); |
190 |
|
|
} |
191 |
|
|
|
192 |
|
|
while (lock != UNLOCKED) { |
193 |
|
|
error = _twait(&mutex->lock, CONTENDED, CLOCK_REALTIME, abs); |
194 |
|
|
if (error == ETIMEDOUT) |
195 |
|
|
return (error); |
196 |
|
|
/* |
197 |
|
|
* We cannot know if there's another waiter, so in |
198 |
|
|
* doubt set the state to CONTENDED. |
199 |
|
|
*/ |
200 |
|
|
lock = atomic_swap_uint(&mutex->lock, CONTENDED); |
201 |
|
|
}; |
202 |
|
|
|
203 |
|
|
membar_enter_after_atomic(); |
204 |
|
|
mutex->owner = self; |
205 |
|
|
return (0); |
206 |
|
|
} |
207 |
|
|
|
208 |
|
|
int |
209 |
|
|
pthread_mutex_trylock(pthread_mutex_t *mutexp) |
210 |
|
|
{ |
211 |
|
|
return (_rthread_mutex_timedlock(mutexp, 1, NULL, 0)); |
212 |
|
|
} |
213 |
|
|
|
214 |
|
|
int |
215 |
|
|
pthread_mutex_timedlock(pthread_mutex_t *mutexp, const struct timespec *abs) |
216 |
|
|
{ |
217 |
|
|
return (_rthread_mutex_timedlock(mutexp, 0, abs, 1)); |
218 |
|
|
} |
219 |
|
|
|
220 |
|
|
int |
221 |
|
|
pthread_mutex_lock(pthread_mutex_t *mutexp) |
222 |
|
|
{ |
223 |
|
|
return (_rthread_mutex_timedlock(mutexp, 0, NULL, 0)); |
224 |
|
|
} |
225 |
|
|
DEF_STRONG(pthread_mutex_lock); |
226 |
|
|
|
227 |
|
|
int |
228 |
|
|
pthread_mutex_unlock(pthread_mutex_t *mutexp) |
229 |
|
|
{ |
230 |
|
|
pthread_t self = pthread_self(); |
231 |
|
|
pthread_mutex_t mutex; |
232 |
|
|
|
233 |
|
|
if (mutexp == NULL) |
234 |
|
|
return (EINVAL); |
235 |
|
|
|
236 |
|
|
if (*mutexp == NULL) |
237 |
|
|
#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK |
238 |
|
|
return (EPERM); |
239 |
|
|
#elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL |
240 |
|
|
return(0); |
241 |
|
|
#else |
242 |
|
|
abort(); |
243 |
|
|
#endif |
244 |
|
|
|
245 |
|
|
mutex = *mutexp; |
246 |
|
|
_rthread_debug(5, "%p: mutex_unlock %p (%p)\n", self, (void *)mutex, |
247 |
|
|
(void *)mutex->owner); |
248 |
|
|
|
249 |
|
|
if (mutex->owner != self) { |
250 |
|
|
_rthread_debug(5, "%p: different owner %p (%p)\n", self, (void *)mutex, |
251 |
|
|
(void *)mutex->owner); |
252 |
|
|
if (mutex->type == PTHREAD_MUTEX_ERRORCHECK || |
253 |
|
|
mutex->type == PTHREAD_MUTEX_RECURSIVE) { |
254 |
|
|
return (EPERM); |
255 |
|
|
} else { |
256 |
|
|
/* |
257 |
|
|
* For mutex type NORMAL our undefined behavior for |
258 |
|
|
* unlocking an unlocked mutex is to succeed without |
259 |
|
|
* error. All other undefined behaviors are to |
260 |
|
|
* abort() immediately. |
261 |
|
|
*/ |
262 |
|
|
if (mutex->owner == NULL && |
263 |
|
|
mutex->type == PTHREAD_MUTEX_NORMAL) |
264 |
|
|
return (0); |
265 |
|
|
else |
266 |
|
|
abort(); |
267 |
|
|
|
268 |
|
|
} |
269 |
|
|
} |
270 |
|
|
|
271 |
|
|
if (mutex->type == PTHREAD_MUTEX_RECURSIVE) { |
272 |
|
|
if (mutex->count > 0) { |
273 |
|
|
mutex->count--; |
274 |
|
|
return (0); |
275 |
|
|
} |
276 |
|
|
} |
277 |
|
|
|
278 |
|
|
mutex->owner = NULL; |
279 |
|
|
membar_exit_before_atomic(); |
280 |
|
|
if (atomic_dec_int_nv(&mutex->lock) != UNLOCKED) { |
281 |
|
|
mutex->lock = UNLOCKED; |
282 |
|
|
_wake(&mutex->lock, 1); |
283 |
|
|
} |
284 |
|
|
|
285 |
|
|
return (0); |
286 |
|
|
} |
287 |
|
|
DEF_STRONG(pthread_mutex_unlock); |