1 |
|
|
/* $OpenBSD: malloc.c,v 1.236 2017/11/02 14:01:50 otto Exp $ */ |
2 |
|
|
/* |
3 |
|
|
* Copyright (c) 2008, 2010, 2011, 2016 Otto Moerbeek <otto@drijf.net> |
4 |
|
|
* Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org> |
5 |
|
|
* Copyright (c) 2008 Damien Miller <djm@openbsd.org> |
6 |
|
|
* Copyright (c) 2000 Poul-Henning Kamp <phk@FreeBSD.org> |
7 |
|
|
* |
8 |
|
|
* Permission to use, copy, modify, and distribute this software for any |
9 |
|
|
* purpose with or without fee is hereby granted, provided that the above |
10 |
|
|
* copyright notice and this permission notice appear in all copies. |
11 |
|
|
* |
12 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
13 |
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
14 |
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
15 |
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
16 |
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
17 |
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
18 |
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
/* |
22 |
|
|
* If we meet some day, and you think this stuff is worth it, you |
23 |
|
|
* can buy me a beer in return. Poul-Henning Kamp |
24 |
|
|
*/ |
25 |
|
|
|
26 |
|
|
/* #define MALLOC_STATS */ |
27 |
|
|
|
28 |
|
|
#include <sys/types.h> |
29 |
|
|
#include <sys/param.h> /* PAGE_SHIFT ALIGN */ |
30 |
|
|
#include <sys/queue.h> |
31 |
|
|
#include <sys/mman.h> |
32 |
|
|
#include <sys/uio.h> |
33 |
|
|
#include <errno.h> |
34 |
|
|
#include <stdarg.h> |
35 |
|
|
#include <stdint.h> |
36 |
|
|
#include <stdlib.h> |
37 |
|
|
#include <string.h> |
38 |
|
|
#include <stdio.h> |
39 |
|
|
#include <unistd.h> |
40 |
|
|
|
41 |
|
|
#ifdef MALLOC_STATS |
42 |
|
|
#include <sys/tree.h> |
43 |
|
|
#include <fcntl.h> |
44 |
|
|
#endif |
45 |
|
|
|
46 |
|
|
#include "thread_private.h" |
47 |
|
|
#include <tib.h> |
48 |
|
|
|
49 |
|
|
#if defined(__mips64__) |
50 |
|
|
#define MALLOC_PAGESHIFT (14U) |
51 |
|
|
#else |
52 |
|
|
#define MALLOC_PAGESHIFT (PAGE_SHIFT) |
53 |
|
|
#endif |
54 |
|
|
|
55 |
|
|
#define MALLOC_MINSHIFT 4 |
56 |
|
|
#define MALLOC_MAXSHIFT (MALLOC_PAGESHIFT - 1) |
57 |
|
|
#define MALLOC_PAGESIZE (1UL << MALLOC_PAGESHIFT) |
58 |
|
|
#define MALLOC_MINSIZE (1UL << MALLOC_MINSHIFT) |
59 |
|
|
#define MALLOC_PAGEMASK (MALLOC_PAGESIZE - 1) |
60 |
|
|
#define MASK_POINTER(p) ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK)) |
61 |
|
|
|
62 |
|
|
#define MALLOC_MAXCHUNK (1 << MALLOC_MAXSHIFT) |
63 |
|
|
#define MALLOC_MAXCACHE 256 |
64 |
|
|
#define MALLOC_DELAYED_CHUNK_MASK 15 |
65 |
|
|
#define MALLOC_INITIAL_REGIONS 512 |
66 |
|
|
#define MALLOC_DEFAULT_CACHE 64 |
67 |
|
|
#define MALLOC_CHUNK_LISTS 4 |
68 |
|
|
#define CHUNK_CHECK_LENGTH 32 |
69 |
|
|
|
70 |
|
|
/* |
71 |
|
|
* We move allocations between half a page and a whole page towards the end, |
72 |
|
|
* subject to alignment constraints. This is the extra headroom we allow. |
73 |
|
|
* Set to zero to be the most strict. |
74 |
|
|
*/ |
75 |
|
|
#define MALLOC_LEEWAY 0 |
76 |
|
|
#define MALLOC_MOVE_COND(sz) ((sz) - mopts.malloc_guard < \ |
77 |
|
|
MALLOC_PAGESIZE - MALLOC_LEEWAY) |
78 |
|
|
#define MALLOC_MOVE(p, sz) (((char *)(p)) + \ |
79 |
|
|
((MALLOC_PAGESIZE - MALLOC_LEEWAY - \ |
80 |
|
|
((sz) - mopts.malloc_guard)) & \ |
81 |
|
|
~(MALLOC_MINSIZE - 1))) |
82 |
|
|
|
83 |
|
|
#define PAGEROUND(x) (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK) |
84 |
|
|
|
85 |
|
|
/* |
86 |
|
|
* What to use for Junk. This is the byte value we use to fill with |
87 |
|
|
* when the 'J' option is enabled. Use SOME_JUNK right after alloc, |
88 |
|
|
* and SOME_FREEJUNK right before free. |
89 |
|
|
*/ |
90 |
|
|
#define SOME_JUNK 0xdb /* deadbeef */ |
91 |
|
|
#define SOME_FREEJUNK 0xdf /* dead, free */ |
92 |
|
|
|
93 |
|
|
#define MMAP(sz) mmap(NULL, (sz), PROT_READ | PROT_WRITE, \ |
94 |
|
|
MAP_ANON | MAP_PRIVATE, -1, 0) |
95 |
|
|
|
96 |
|
|
#define MMAPA(a,sz) mmap((a), (sz), PROT_READ | PROT_WRITE, \ |
97 |
|
|
MAP_ANON | MAP_PRIVATE, -1, 0) |
98 |
|
|
|
99 |
|
|
#define MQUERY(a, sz) mquery((a), (sz), PROT_READ | PROT_WRITE, \ |
100 |
|
|
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0) |
101 |
|
|
|
102 |
|
|
struct region_info { |
103 |
|
|
void *p; /* page; low bits used to mark chunks */ |
104 |
|
|
uintptr_t size; /* size for pages, or chunk_info pointer */ |
105 |
|
|
#ifdef MALLOC_STATS |
106 |
|
|
void *f; /* where allocated from */ |
107 |
|
|
#endif |
108 |
|
|
}; |
109 |
|
|
|
110 |
|
|
LIST_HEAD(chunk_head, chunk_info); |
111 |
|
|
|
112 |
|
|
struct dir_info { |
113 |
|
|
u_int32_t canary1; |
114 |
|
|
int active; /* status of malloc */ |
115 |
|
|
struct region_info *r; /* region slots */ |
116 |
|
|
size_t regions_total; /* number of region slots */ |
117 |
|
|
size_t regions_free; /* number of free slots */ |
118 |
|
|
/* lists of free chunk info structs */ |
119 |
|
|
struct chunk_head chunk_info_list[MALLOC_MAXSHIFT + 1]; |
120 |
|
|
/* lists of chunks with free slots */ |
121 |
|
|
struct chunk_head chunk_dir[MALLOC_MAXSHIFT + 1][MALLOC_CHUNK_LISTS]; |
122 |
|
|
size_t free_regions_size; /* free pages cached */ |
123 |
|
|
/* free pages cache */ |
124 |
|
|
struct region_info free_regions[MALLOC_MAXCACHE]; |
125 |
|
|
/* delayed free chunk slots */ |
126 |
|
|
void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1]; |
127 |
|
|
size_t rbytesused; /* random bytes used */ |
128 |
|
|
char *func; /* current function */ |
129 |
|
|
int mutex; |
130 |
|
|
u_char rbytes[32]; /* random bytes */ |
131 |
|
|
u_short chunk_start; |
132 |
|
|
#ifdef MALLOC_STATS |
133 |
|
|
size_t inserts; |
134 |
|
|
size_t insert_collisions; |
135 |
|
|
size_t finds; |
136 |
|
|
size_t find_collisions; |
137 |
|
|
size_t deletes; |
138 |
|
|
size_t delete_moves; |
139 |
|
|
size_t cheap_realloc_tries; |
140 |
|
|
size_t cheap_reallocs; |
141 |
|
|
size_t malloc_used; /* bytes allocated */ |
142 |
|
|
size_t malloc_guarded; /* bytes used for guards */ |
143 |
|
|
#define STATS_ADD(x,y) ((x) += (y)) |
144 |
|
|
#define STATS_SUB(x,y) ((x) -= (y)) |
145 |
|
|
#define STATS_INC(x) ((x)++) |
146 |
|
|
#define STATS_ZERO(x) ((x) = 0) |
147 |
|
|
#define STATS_SETF(x,y) ((x)->f = (y)) |
148 |
|
|
#else |
149 |
|
|
#define STATS_ADD(x,y) /* nothing */ |
150 |
|
|
#define STATS_SUB(x,y) /* nothing */ |
151 |
|
|
#define STATS_INC(x) /* nothing */ |
152 |
|
|
#define STATS_ZERO(x) /* nothing */ |
153 |
|
|
#define STATS_SETF(x,y) /* nothing */ |
154 |
|
|
#endif /* MALLOC_STATS */ |
155 |
|
|
u_int32_t canary2; |
156 |
|
|
}; |
157 |
|
|
#define DIR_INFO_RSZ ((sizeof(struct dir_info) + MALLOC_PAGEMASK) & \ |
158 |
|
|
~MALLOC_PAGEMASK) |
159 |
|
|
|
160 |
|
|
/* |
161 |
|
|
* This structure describes a page worth of chunks. |
162 |
|
|
* |
163 |
|
|
* How many bits per u_short in the bitmap |
164 |
|
|
*/ |
165 |
|
|
#define MALLOC_BITS (NBBY * sizeof(u_short)) |
166 |
|
|
struct chunk_info { |
167 |
|
|
LIST_ENTRY(chunk_info) entries; |
168 |
|
|
void *page; /* pointer to the page */ |
169 |
|
|
u_int32_t canary; |
170 |
|
|
u_short size; /* size of this page's chunks */ |
171 |
|
|
u_short shift; /* how far to shift for this size */ |
172 |
|
|
u_short free; /* how many free chunks */ |
173 |
|
|
u_short total; /* how many chunks */ |
174 |
|
|
u_short offset; /* requested size table offset */ |
175 |
|
|
/* which chunks are free */ |
176 |
|
|
u_short bits[1]; |
177 |
|
|
}; |
178 |
|
|
|
179 |
|
|
struct malloc_readonly { |
180 |
|
|
struct dir_info *malloc_pool[_MALLOC_MUTEXES]; /* Main bookkeeping information */ |
181 |
|
|
int malloc_mt; /* multi-threaded mode? */ |
182 |
|
|
int malloc_freecheck; /* Extensive double free check */ |
183 |
|
|
int malloc_freeunmap; /* mprotect free pages PROT_NONE? */ |
184 |
|
|
int malloc_junk; /* junk fill? */ |
185 |
|
|
int malloc_realloc; /* always realloc? */ |
186 |
|
|
int malloc_xmalloc; /* xmalloc behaviour? */ |
187 |
|
|
int chunk_canaries; /* use canaries after chunks? */ |
188 |
|
|
int internal_funcs; /* use better recallocarray/freezero? */ |
189 |
|
|
u_int malloc_cache; /* free pages we cache */ |
190 |
|
|
size_t malloc_guard; /* use guard pages after allocations? */ |
191 |
|
|
#ifdef MALLOC_STATS |
192 |
|
|
int malloc_stats; /* dump statistics at end */ |
193 |
|
|
#endif |
194 |
|
|
u_int32_t malloc_canary; /* Matched against ones in malloc_pool */ |
195 |
|
|
}; |
196 |
|
|
|
197 |
|
|
/* This object is mapped PROT_READ after initialisation to prevent tampering */ |
198 |
|
|
static union { |
199 |
|
|
struct malloc_readonly mopts; |
200 |
|
|
u_char _pad[MALLOC_PAGESIZE]; |
201 |
|
|
} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE))); |
202 |
|
|
#define mopts malloc_readonly.mopts |
203 |
|
|
|
204 |
|
|
char *malloc_options; /* compile-time options */ |
205 |
|
|
|
206 |
|
|
static u_char getrbyte(struct dir_info *d); |
207 |
|
|
static __dead void wrterror(struct dir_info *d, char *msg, ...) |
208 |
|
|
__attribute__((__format__ (printf, 2, 3))); |
209 |
|
|
static void fill_canary(char *ptr, size_t sz, size_t allocated); |
210 |
|
|
|
211 |
|
|
#ifdef MALLOC_STATS |
212 |
|
|
void malloc_dump(int, int, struct dir_info *); |
213 |
|
|
PROTO_NORMAL(malloc_dump); |
214 |
|
|
void malloc_gdump(int); |
215 |
|
|
PROTO_NORMAL(malloc_gdump); |
216 |
|
|
static void malloc_exit(void); |
217 |
|
|
#define CALLER __builtin_return_address(0) |
218 |
|
|
#else |
219 |
|
|
#define CALLER NULL |
220 |
|
|
#endif |
221 |
|
|
|
222 |
|
|
/* low bits of r->p determine size: 0 means >= page size and r->size holding |
223 |
|
|
* real size, otherwise low bits are a shift count, or 1 for malloc(0) |
224 |
|
|
*/ |
225 |
|
|
#define REALSIZE(sz, r) \ |
226 |
|
|
(sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK, \ |
227 |
|
|
(sz) = ((sz) == 0 ? (r)->size : ((sz) == 1 ? 0 : (1 << ((sz)-1)))) |
228 |
|
|
|
229 |
|
|
static inline void |
230 |
|
|
_MALLOC_LEAVE(struct dir_info *d) |
231 |
|
|
{ |
232 |
✗✓ |
744 |
if (mopts.malloc_mt) { |
233 |
|
|
d->active--; |
234 |
|
|
_MALLOC_UNLOCK(d->mutex); |
235 |
|
|
} |
236 |
|
372 |
} |
237 |
|
|
|
238 |
|
|
static inline void |
239 |
|
|
_MALLOC_ENTER(struct dir_info *d) |
240 |
|
|
{ |
241 |
✗✓ |
744 |
if (mopts.malloc_mt) { |
242 |
|
|
_MALLOC_LOCK(d->mutex); |
243 |
|
|
d->active++; |
244 |
|
|
} |
245 |
|
372 |
} |
246 |
|
|
|
247 |
|
|
static inline size_t |
248 |
|
|
hash(void *p) |
249 |
|
|
{ |
250 |
|
|
size_t sum; |
251 |
|
|
uintptr_t u; |
252 |
|
|
|
253 |
|
906842 |
u = (uintptr_t)p >> MALLOC_PAGESHIFT; |
254 |
|
|
sum = u; |
255 |
|
453421 |
sum = (sum << 7) - sum + (u >> 16); |
256 |
|
|
#ifdef __LP64__ |
257 |
|
453421 |
sum = (sum << 7) - sum + (u >> 32); |
258 |
|
453421 |
sum = (sum << 7) - sum + (u >> 48); |
259 |
|
|
#endif |
260 |
|
453421 |
return sum; |
261 |
|
|
} |
262 |
|
|
|
263 |
|
|
static inline |
264 |
|
|
struct dir_info *getpool(void) |
265 |
|
|
{ |
266 |
✓✗ |
544986 |
if (!mopts.malloc_mt) |
267 |
|
272493 |
return mopts.malloc_pool[0]; |
268 |
|
|
else |
269 |
|
|
return mopts.malloc_pool[TIB_GET()->tib_tid & |
270 |
|
|
(_MALLOC_MUTEXES - 1)]; |
271 |
|
272493 |
} |
272 |
|
|
|
273 |
|
|
static __dead void |
274 |
|
|
wrterror(struct dir_info *d, char *msg, ...) |
275 |
|
|
{ |
276 |
|
|
int saved_errno = errno; |
277 |
|
|
va_list ap; |
278 |
|
|
|
279 |
|
|
dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname, |
280 |
|
|
getpid(), (d != NULL && d->func) ? d->func : "unknown"); |
281 |
|
|
va_start(ap, msg); |
282 |
|
|
vdprintf(STDERR_FILENO, msg, ap); |
283 |
|
|
va_end(ap); |
284 |
|
|
dprintf(STDERR_FILENO, "\n"); |
285 |
|
|
|
286 |
|
|
#ifdef MALLOC_STATS |
287 |
|
|
if (mopts.malloc_stats) |
288 |
|
|
malloc_gdump(STDERR_FILENO); |
289 |
|
|
#endif /* MALLOC_STATS */ |
290 |
|
|
|
291 |
|
|
errno = saved_errno; |
292 |
|
|
|
293 |
|
|
abort(); |
294 |
|
|
} |
295 |
|
|
|
296 |
|
|
static void |
297 |
|
|
rbytes_init(struct dir_info *d) |
298 |
|
|
{ |
299 |
|
41564 |
arc4random_buf(d->rbytes, sizeof(d->rbytes)); |
300 |
|
|
/* add 1 to account for using d->rbytes[0] */ |
301 |
|
20782 |
d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2); |
302 |
|
20782 |
} |
303 |
|
|
|
304 |
|
|
static inline u_char |
305 |
|
|
getrbyte(struct dir_info *d) |
306 |
|
|
{ |
307 |
|
|
u_char x; |
308 |
|
|
|
309 |
✓✓ |
976120 |
if (d->rbytesused >= sizeof(d->rbytes)) |
310 |
|
20787 |
rbytes_init(d); |
311 |
|
488060 |
x = d->rbytes[d->rbytesused++]; |
312 |
|
488060 |
return x; |
313 |
|
|
} |
314 |
|
|
|
315 |
|
|
/* |
316 |
|
|
* Cache maintenance. We keep at most malloc_cache pages cached. |
317 |
|
|
* If the cache is becoming full, unmap pages in the cache for real, |
318 |
|
|
* and then add the region to the cache |
319 |
|
|
* Opposed to the regular region data structure, the sizes in the |
320 |
|
|
* cache are in MALLOC_PAGESIZE units. |
321 |
|
|
*/ |
322 |
|
|
static void |
323 |
|
|
unmap(struct dir_info *d, void *p, size_t sz, int clear) |
324 |
|
|
{ |
325 |
|
82588 |
size_t psz = sz >> MALLOC_PAGESHIFT; |
326 |
|
|
size_t rsz, tounmap; |
327 |
|
|
struct region_info *r; |
328 |
|
|
u_int i, offset; |
329 |
|
|
|
330 |
✗✓ |
41294 |
if (sz != PAGEROUND(sz)) |
331 |
|
|
wrterror(d, "munmap round"); |
332 |
|
|
|
333 |
|
41294 |
rsz = mopts.malloc_cache - d->free_regions_size; |
334 |
|
|
|
335 |
|
|
/* |
336 |
|
|
* normally the cache holds recently freed regions, but if the region |
337 |
|
|
* to unmap is larger than the cache size or we're clearing and the |
338 |
|
|
* cache is full, just munmap |
339 |
|
|
*/ |
340 |
✓✗✗✓
|
82588 |
if (psz > mopts.malloc_cache || (clear && rsz == 0)) { |
341 |
|
|
i = munmap(p, sz); |
342 |
|
|
if (i) |
343 |
|
|
wrterror(d, "munmap %p", p); |
344 |
|
|
STATS_SUB(d->malloc_used, sz); |
345 |
|
|
return; |
346 |
|
|
} |
347 |
|
|
tounmap = 0; |
348 |
✓✓ |
41294 |
if (psz > rsz) |
349 |
|
7 |
tounmap = psz - rsz; |
350 |
|
41294 |
offset = getrbyte(d); |
351 |
✓✓✓✗
|
82858 |
for (i = 0; tounmap > 0 && i < mopts.malloc_cache; i++) { |
352 |
|
90 |
r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; |
353 |
✓✓ |
90 |
if (r->p != NULL) { |
354 |
|
9 |
rsz = r->size << MALLOC_PAGESHIFT; |
355 |
✗✓ |
9 |
if (munmap(r->p, rsz)) |
356 |
|
|
wrterror(d, "munmap %p", r->p); |
357 |
|
9 |
r->p = NULL; |
358 |
✓✓ |
9 |
if (tounmap > r->size) |
359 |
|
2 |
tounmap -= r->size; |
360 |
|
|
else |
361 |
|
|
tounmap = 0; |
362 |
|
9 |
d->free_regions_size -= r->size; |
363 |
|
9 |
r->size = 0; |
364 |
|
|
STATS_SUB(d->malloc_used, rsz); |
365 |
|
9 |
} |
366 |
|
|
} |
367 |
✗✓ |
41294 |
if (tounmap > 0) |
368 |
|
|
wrterror(d, "malloc cache underflow"); |
369 |
✓✗ |
119040 |
for (i = 0; i < mopts.malloc_cache; i++) { |
370 |
|
59520 |
r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; |
371 |
✓✓ |
59520 |
if (r->p == NULL) { |
372 |
✗✓ |
41294 |
if (clear) |
373 |
|
|
memset(p, 0, sz - mopts.malloc_guard); |
374 |
✓✗ |
41294 |
if (mopts.malloc_junk && !mopts.malloc_freeunmap) { |
375 |
|
41294 |
size_t amt = mopts.malloc_junk == 1 ? |
376 |
|
|
MALLOC_MAXCHUNK : sz; |
377 |
|
41294 |
memset(p, SOME_FREEJUNK, amt); |
378 |
|
41294 |
} |
379 |
✗✓ |
41294 |
if (mopts.malloc_freeunmap) |
380 |
|
|
mprotect(p, sz, PROT_NONE); |
381 |
|
41294 |
r->p = p; |
382 |
|
41294 |
r->size = psz; |
383 |
|
41294 |
d->free_regions_size += psz; |
384 |
|
41294 |
break; |
385 |
|
|
} |
386 |
|
|
} |
387 |
✗✓ |
41294 |
if (i == mopts.malloc_cache) |
388 |
|
|
wrterror(d, "malloc free slot lost"); |
389 |
✗✓ |
41294 |
if (d->free_regions_size > mopts.malloc_cache) |
390 |
|
|
wrterror(d, "malloc cache overflow"); |
391 |
|
82588 |
} |
392 |
|
|
|
393 |
|
|
static void |
394 |
|
|
zapcacheregion(struct dir_info *d, void *p, size_t len) |
395 |
|
|
{ |
396 |
|
|
u_int i; |
397 |
|
|
struct region_info *r; |
398 |
|
|
size_t rsz; |
399 |
|
|
|
400 |
✓✓ |
786 |
for (i = 0; i < mopts.malloc_cache; i++) { |
401 |
|
384 |
r = &d->free_regions[i]; |
402 |
✓✓✗✓
|
441 |
if (r->p >= p && r->p <= (void *)((char *)p + len)) { |
403 |
|
|
rsz = r->size << MALLOC_PAGESHIFT; |
404 |
|
|
if (munmap(r->p, rsz)) |
405 |
|
|
wrterror(d, "munmap %p", r->p); |
406 |
|
|
r->p = NULL; |
407 |
|
|
d->free_regions_size -= r->size; |
408 |
|
|
r->size = 0; |
409 |
|
|
STATS_SUB(d->malloc_used, rsz); |
410 |
|
|
} |
411 |
|
|
} |
412 |
|
6 |
} |
413 |
|
|
|
414 |
|
|
static void * |
415 |
|
|
map(struct dir_info *d, void *hint, size_t sz, int zero_fill) |
416 |
|
|
{ |
417 |
|
83060 |
size_t psz = sz >> MALLOC_PAGESHIFT; |
418 |
|
|
struct region_info *r, *big = NULL; |
419 |
|
|
u_int i, offset; |
420 |
|
|
void *p; |
421 |
|
|
|
422 |
✓✗✗✓
|
83060 |
if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || |
423 |
|
41530 |
d->canary1 != ~d->canary2) |
424 |
|
|
wrterror(d, "internal struct corrupt"); |
425 |
✗✓ |
41530 |
if (sz != PAGEROUND(sz)) |
426 |
|
|
wrterror(d, "map round"); |
427 |
|
|
|
428 |
✓✓✓✓
|
83054 |
if (!hint && psz > d->free_regions_size) { |
429 |
|
332 |
_MALLOC_LEAVE(d); |
430 |
|
332 |
p = MMAP(sz); |
431 |
|
332 |
_MALLOC_ENTER(d); |
432 |
|
|
if (p != MAP_FAILED) |
433 |
|
|
STATS_ADD(d->malloc_used, sz); |
434 |
|
|
/* zero fill not needed */ |
435 |
|
332 |
return p; |
436 |
|
|
} |
437 |
|
41198 |
offset = getrbyte(d); |
438 |
✓✓ |
680996 |
for (i = 0; i < mopts.malloc_cache; i++) { |
439 |
|
340379 |
r = &d->free_regions[(i + offset) & (mopts.malloc_cache - 1)]; |
440 |
✓✓ |
340379 |
if (r->p != NULL) { |
441 |
✓✓✗✓
|
50714 |
if (hint && r->p != hint) |
442 |
|
|
continue; |
443 |
✓✓ |
50550 |
if (r->size == psz) { |
444 |
|
41079 |
p = r->p; |
445 |
|
41079 |
r->p = NULL; |
446 |
|
41079 |
r->size = 0; |
447 |
|
41079 |
d->free_regions_size -= psz; |
448 |
✗✓ |
41079 |
if (mopts.malloc_freeunmap) |
449 |
|
|
mprotect(p, sz, PROT_READ | PROT_WRITE); |
450 |
✗✓ |
41079 |
if (zero_fill) |
451 |
|
|
memset(p, 0, sz); |
452 |
✗✓ |
82158 |
else if (mopts.malloc_junk == 2 && |
453 |
|
41079 |
mopts.malloc_freeunmap) |
454 |
|
|
memset(p, SOME_FREEJUNK, sz); |
455 |
|
41079 |
return p; |
456 |
✓✓ |
9471 |
} else if (r->size > psz) |
457 |
|
2188 |
big = r; |
458 |
|
|
} |
459 |
|
|
} |
460 |
✓✓ |
119 |
if (big != NULL) { |
461 |
|
|
r = big; |
462 |
|
73 |
p = r->p; |
463 |
|
73 |
r->p = (char *)r->p + (psz << MALLOC_PAGESHIFT); |
464 |
✗✓ |
73 |
if (mopts.malloc_freeunmap) |
465 |
|
|
mprotect(p, sz, PROT_READ | PROT_WRITE); |
466 |
|
73 |
r->size -= psz; |
467 |
|
73 |
d->free_regions_size -= psz; |
468 |
✗✓ |
73 |
if (zero_fill) |
469 |
|
|
memset(p, 0, sz); |
470 |
✗✓ |
73 |
else if (mopts.malloc_junk == 2 && mopts.malloc_freeunmap) |
471 |
|
|
memset(p, SOME_FREEJUNK, sz); |
472 |
|
73 |
return p; |
473 |
|
|
} |
474 |
✓✓ |
46 |
if (hint) |
475 |
|
6 |
return MAP_FAILED; |
476 |
✗✓ |
40 |
if (d->free_regions_size > mopts.malloc_cache) |
477 |
|
|
wrterror(d, "malloc cache"); |
478 |
|
40 |
_MALLOC_LEAVE(d); |
479 |
|
40 |
p = MMAP(sz); |
480 |
|
40 |
_MALLOC_ENTER(d); |
481 |
|
|
if (p != MAP_FAILED) |
482 |
|
|
STATS_ADD(d->malloc_used, sz); |
483 |
|
|
/* zero fill not needed */ |
484 |
|
40 |
return p; |
485 |
|
41530 |
} |
486 |
|
|
|
487 |
|
|
static void |
488 |
|
|
omalloc_parseopt(char opt) |
489 |
|
|
{ |
490 |
|
|
switch (opt) { |
491 |
|
|
case '>': |
492 |
|
|
mopts.malloc_cache <<= 1; |
493 |
|
|
if (mopts.malloc_cache > MALLOC_MAXCACHE) |
494 |
|
|
mopts.malloc_cache = MALLOC_MAXCACHE; |
495 |
|
|
break; |
496 |
|
|
case '<': |
497 |
|
|
mopts.malloc_cache >>= 1; |
498 |
|
|
break; |
499 |
|
|
case 'c': |
500 |
|
|
mopts.chunk_canaries = 0; |
501 |
|
|
break; |
502 |
|
|
case 'C': |
503 |
|
|
mopts.chunk_canaries = 1; |
504 |
|
|
break; |
505 |
|
|
#ifdef MALLOC_STATS |
506 |
|
|
case 'd': |
507 |
|
|
mopts.malloc_stats = 0; |
508 |
|
|
break; |
509 |
|
|
case 'D': |
510 |
|
|
mopts.malloc_stats = 1; |
511 |
|
|
break; |
512 |
|
|
#endif /* MALLOC_STATS */ |
513 |
|
|
case 'f': |
514 |
|
|
mopts.malloc_freecheck = 0; |
515 |
|
|
mopts.malloc_freeunmap = 0; |
516 |
|
|
break; |
517 |
|
|
case 'F': |
518 |
|
|
mopts.malloc_freecheck = 1; |
519 |
|
|
mopts.malloc_freeunmap = 1; |
520 |
|
|
break; |
521 |
|
|
case 'g': |
522 |
|
|
mopts.malloc_guard = 0; |
523 |
|
|
break; |
524 |
|
|
case 'G': |
525 |
|
|
mopts.malloc_guard = MALLOC_PAGESIZE; |
526 |
|
|
break; |
527 |
|
|
case 'j': |
528 |
|
|
if (mopts.malloc_junk > 0) |
529 |
|
|
mopts.malloc_junk--; |
530 |
|
|
break; |
531 |
|
|
case 'J': |
532 |
|
|
if (mopts.malloc_junk < 2) |
533 |
|
|
mopts.malloc_junk++; |
534 |
|
|
break; |
535 |
|
|
case 'r': |
536 |
|
|
mopts.malloc_realloc = 0; |
537 |
|
|
break; |
538 |
|
|
case 'R': |
539 |
|
|
mopts.malloc_realloc = 1; |
540 |
|
|
break; |
541 |
|
|
case 'u': |
542 |
|
|
mopts.malloc_freeunmap = 0; |
543 |
|
|
break; |
544 |
|
|
case 'U': |
545 |
|
|
mopts.malloc_freeunmap = 1; |
546 |
|
|
break; |
547 |
|
|
case 'x': |
548 |
|
|
mopts.malloc_xmalloc = 0; |
549 |
|
|
break; |
550 |
|
|
case 'X': |
551 |
|
|
mopts.malloc_xmalloc = 1; |
552 |
|
|
break; |
553 |
|
|
default: { |
554 |
|
|
dprintf(STDERR_FILENO, "malloc() warning: " |
555 |
|
|
"unknown char in MALLOC_OPTIONS\n"); |
556 |
|
|
break; |
557 |
|
|
} |
558 |
|
|
} |
559 |
|
|
} |
560 |
|
|
|
561 |
|
|
static void |
562 |
|
|
omalloc_init(void) |
563 |
|
|
{ |
564 |
|
30 |
char *p, *q, b[64]; |
565 |
|
|
int i, j; |
566 |
|
|
|
567 |
|
|
/* |
568 |
|
|
* Default options |
569 |
|
|
*/ |
570 |
|
15 |
mopts.malloc_junk = 1; |
571 |
|
15 |
mopts.malloc_cache = MALLOC_DEFAULT_CACHE; |
572 |
|
|
|
573 |
✓✓ |
120 |
for (i = 0; i < 3; i++) { |
574 |
✓✓✓✗
|
45 |
switch (i) { |
575 |
|
|
case 0: |
576 |
|
15 |
j = readlink("/etc/malloc.conf", b, sizeof b - 1); |
577 |
✗✓ |
15 |
if (j <= 0) |
578 |
|
|
continue; |
579 |
|
|
b[j] = '\0'; |
580 |
|
|
p = b; |
581 |
|
|
break; |
582 |
|
|
case 1: |
583 |
✓✗ |
15 |
if (issetugid() == 0) |
584 |
|
15 |
p = getenv("MALLOC_OPTIONS"); |
585 |
|
|
else |
586 |
|
|
continue; |
587 |
|
15 |
break; |
588 |
|
|
case 2: |
589 |
|
15 |
p = malloc_options; |
590 |
|
15 |
break; |
591 |
|
|
default: |
592 |
|
|
p = NULL; |
593 |
|
|
} |
594 |
|
|
|
595 |
✗✓✗✗
|
60 |
for (; p != NULL && *p != '\0'; p++) { |
596 |
|
|
switch (*p) { |
597 |
|
|
case 'S': |
598 |
|
|
for (q = "CFGJ"; *q != '\0'; q++) |
599 |
|
|
omalloc_parseopt(*q); |
600 |
|
|
mopts.malloc_cache = 0; |
601 |
|
|
break; |
602 |
|
|
case 's': |
603 |
|
|
for (q = "cfgj"; *q != '\0'; q++) |
604 |
|
|
omalloc_parseopt(*q); |
605 |
|
|
mopts.malloc_cache = MALLOC_DEFAULT_CACHE; |
606 |
|
|
break; |
607 |
|
|
default: |
608 |
|
|
omalloc_parseopt(*p); |
609 |
|
|
break; |
610 |
|
|
} |
611 |
|
|
} |
612 |
|
|
} |
613 |
|
|
|
614 |
|
|
#ifdef MALLOC_STATS |
615 |
|
|
if (mopts.malloc_stats && (atexit(malloc_exit) == -1)) { |
616 |
|
|
dprintf(STDERR_FILENO, "malloc() warning: atexit(2) failed." |
617 |
|
|
" Will not be able to dump stats on exit\n"); |
618 |
|
|
} |
619 |
|
|
#endif /* MALLOC_STATS */ |
620 |
|
|
|
621 |
✗✓ |
30 |
while ((mopts.malloc_canary = arc4random()) == 0) |
622 |
|
|
; |
623 |
|
15 |
} |
624 |
|
|
|
625 |
|
|
/* |
626 |
|
|
* Initialize a dir_info, which should have been cleared by caller |
627 |
|
|
*/ |
628 |
|
|
static void |
629 |
|
|
omalloc_poolinit(struct dir_info **dp) |
630 |
|
|
{ |
631 |
|
|
void *p; |
632 |
|
|
size_t d_avail, regioninfo_size; |
633 |
|
|
struct dir_info *d; |
634 |
|
|
int i, j; |
635 |
|
|
|
636 |
|
|
/* |
637 |
|
|
* Allocate dir_info with a guard page on either side. Also |
638 |
|
|
* randomise offset inside the page at which the dir_info |
639 |
|
|
* lies (subject to alignment by 1 << MALLOC_MINSHIFT) |
640 |
|
|
*/ |
641 |
✗✓ |
30 |
if ((p = MMAP(DIR_INFO_RSZ + (MALLOC_PAGESIZE * 2))) == MAP_FAILED) |
642 |
|
|
wrterror(NULL, "malloc init mmap failed"); |
643 |
|
15 |
mprotect(p, MALLOC_PAGESIZE, PROT_NONE); |
644 |
|
15 |
mprotect((char *)p + MALLOC_PAGESIZE + DIR_INFO_RSZ, |
645 |
|
|
MALLOC_PAGESIZE, PROT_NONE); |
646 |
|
|
d_avail = (DIR_INFO_RSZ - sizeof(*d)) >> MALLOC_MINSHIFT; |
647 |
|
15 |
d = (struct dir_info *)((char *)p + MALLOC_PAGESIZE + |
648 |
|
15 |
(arc4random_uniform(d_avail) << MALLOC_MINSHIFT)); |
649 |
|
|
|
650 |
|
15 |
rbytes_init(d); |
651 |
|
15 |
d->regions_free = d->regions_total = MALLOC_INITIAL_REGIONS; |
652 |
|
15 |
regioninfo_size = d->regions_total * sizeof(struct region_info); |
653 |
|
15 |
d->r = MMAP(regioninfo_size); |
654 |
✗✓ |
15 |
if (d->r == MAP_FAILED) { |
655 |
|
|
d->regions_total = 0; |
656 |
|
|
wrterror(NULL, "malloc init mmap failed"); |
657 |
|
|
} |
658 |
✓✓ |
390 |
for (i = 0; i <= MALLOC_MAXSHIFT; i++) { |
659 |
|
180 |
LIST_INIT(&d->chunk_info_list[i]); |
660 |
✓✓ |
1800 |
for (j = 0; j < MALLOC_CHUNK_LISTS; j++) |
661 |
|
720 |
LIST_INIT(&d->chunk_dir[i][j]); |
662 |
|
|
} |
663 |
|
|
STATS_ADD(d->malloc_used, regioninfo_size); |
664 |
|
15 |
d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d; |
665 |
|
15 |
d->canary2 = ~d->canary1; |
666 |
|
|
|
667 |
|
15 |
*dp = d; |
668 |
|
15 |
} |
669 |
|
|
|
670 |
|
|
static int |
671 |
|
|
omalloc_grow(struct dir_info *d) |
672 |
|
|
{ |
673 |
|
|
size_t newtotal; |
674 |
|
|
size_t newsize; |
675 |
|
|
size_t mask; |
676 |
|
|
size_t i; |
677 |
|
|
struct region_info *p; |
678 |
|
|
|
679 |
|
|
if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2 ) |
680 |
|
|
return 1; |
681 |
|
|
|
682 |
|
|
newtotal = d->regions_total * 2; |
683 |
|
|
newsize = newtotal * sizeof(struct region_info); |
684 |
|
|
mask = newtotal - 1; |
685 |
|
|
|
686 |
|
|
p = MMAP(newsize); |
687 |
|
|
if (p == MAP_FAILED) |
688 |
|
|
return 1; |
689 |
|
|
|
690 |
|
|
STATS_ADD(d->malloc_used, newsize); |
691 |
|
|
STATS_ZERO(d->inserts); |
692 |
|
|
STATS_ZERO(d->insert_collisions); |
693 |
|
|
for (i = 0; i < d->regions_total; i++) { |
694 |
|
|
void *q = d->r[i].p; |
695 |
|
|
if (q != NULL) { |
696 |
|
|
size_t index = hash(q) & mask; |
697 |
|
|
STATS_INC(d->inserts); |
698 |
|
|
while (p[index].p != NULL) { |
699 |
|
|
index = (index - 1) & mask; |
700 |
|
|
STATS_INC(d->insert_collisions); |
701 |
|
|
} |
702 |
|
|
p[index] = d->r[i]; |
703 |
|
|
} |
704 |
|
|
} |
705 |
|
|
/* avoid pages containing meta info to end up in cache */ |
706 |
|
|
if (munmap(d->r, d->regions_total * sizeof(struct region_info))) |
707 |
|
|
wrterror(d, "munmap %p", (void *)d->r); |
708 |
|
|
else |
709 |
|
|
STATS_SUB(d->malloc_used, |
710 |
|
|
d->regions_total * sizeof(struct region_info)); |
711 |
|
|
d->regions_free = d->regions_free + d->regions_total; |
712 |
|
|
d->regions_total = newtotal; |
713 |
|
|
d->r = p; |
714 |
|
|
return 0; |
715 |
|
|
} |
716 |
|
|
|
717 |
|
|
static struct chunk_info * |
718 |
|
|
alloc_chunk_info(struct dir_info *d, int bits) |
719 |
|
|
{ |
720 |
|
|
struct chunk_info *p; |
721 |
|
|
size_t size, count; |
722 |
|
|
|
723 |
✗✓ |
73320 |
if (bits == 0) |
724 |
|
|
count = MALLOC_PAGESIZE / MALLOC_MINSIZE; |
725 |
|
|
else |
726 |
|
36660 |
count = MALLOC_PAGESIZE >> bits; |
727 |
|
|
|
728 |
|
36660 |
size = howmany(count, MALLOC_BITS); |
729 |
|
36660 |
size = sizeof(struct chunk_info) + (size - 1) * sizeof(u_short); |
730 |
✗✓ |
36660 |
if (mopts.chunk_canaries) |
731 |
|
|
size += count * sizeof(u_short); |
732 |
|
36660 |
size = ALIGN(size); |
733 |
|
|
|
734 |
✓✓ |
36660 |
if (LIST_EMPTY(&d->chunk_info_list[bits])) { |
735 |
|
|
char *q; |
736 |
|
|
size_t i; |
737 |
|
|
|
738 |
|
115 |
q = MMAP(MALLOC_PAGESIZE); |
739 |
✗✓ |
115 |
if (q == MAP_FAILED) |
740 |
|
|
return NULL; |
741 |
|
|
STATS_ADD(d->malloc_used, MALLOC_PAGESIZE); |
742 |
|
115 |
count = MALLOC_PAGESIZE / size; |
743 |
✓✓ |
20454 |
for (i = 0; i < count; i++, q += size) |
744 |
✓✓ |
30221 |
LIST_INSERT_HEAD(&d->chunk_info_list[bits], |
745 |
|
|
(struct chunk_info *)q, entries); |
746 |
✓✗ |
115 |
} |
747 |
|
36660 |
p = LIST_FIRST(&d->chunk_info_list[bits]); |
748 |
✓✗ |
109980 |
LIST_REMOVE(p, entries); |
749 |
|
36660 |
memset(p, 0, size); |
750 |
|
36660 |
p->canary = d->canary1; |
751 |
|
36660 |
return p; |
752 |
|
36660 |
} |
753 |
|
|
|
754 |
|
|
|
755 |
|
|
/* |
756 |
|
|
* The hashtable uses the assumption that p is never NULL. This holds since |
757 |
|
|
* non-MAP_FIXED mappings with hint 0 start at BRKSIZ. |
758 |
|
|
*/ |
759 |
|
|
static int |
760 |
|
|
insert(struct dir_info *d, void *p, size_t sz, void *f) |
761 |
|
|
{ |
762 |
|
|
size_t index; |
763 |
|
|
size_t mask; |
764 |
|
|
void *q; |
765 |
|
|
|
766 |
✗✓ |
83072 |
if (d->regions_free * 4 < d->regions_total) { |
767 |
|
|
if (omalloc_grow(d)) |
768 |
|
|
return 1; |
769 |
|
|
} |
770 |
|
41536 |
mask = d->regions_total - 1; |
771 |
|
41536 |
index = hash(p) & mask; |
772 |
|
41536 |
q = d->r[index].p; |
773 |
|
|
STATS_INC(d->inserts); |
774 |
✓✓ |
89576 |
while (q != NULL) { |
775 |
|
3252 |
index = (index - 1) & mask; |
776 |
|
3252 |
q = d->r[index].p; |
777 |
|
|
STATS_INC(d->insert_collisions); |
778 |
|
|
} |
779 |
|
41536 |
d->r[index].p = p; |
780 |
|
41536 |
d->r[index].size = sz; |
781 |
|
|
#ifdef MALLOC_STATS |
782 |
|
|
d->r[index].f = f; |
783 |
|
|
#endif |
784 |
|
41536 |
d->regions_free--; |
785 |
|
41536 |
return 0; |
786 |
|
41536 |
} |
787 |
|
|
|
788 |
|
|
static struct region_info * |
789 |
|
|
find(struct dir_info *d, void *p) |
790 |
|
|
{ |
791 |
|
|
size_t index; |
792 |
|
821562 |
size_t mask = d->regions_total - 1; |
793 |
|
|
void *q, *r; |
794 |
|
|
|
795 |
✓✗✗✓
|
821562 |
if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || |
796 |
|
410781 |
d->canary1 != ~d->canary2) |
797 |
|
|
wrterror(d, "internal struct corrupt"); |
798 |
|
410781 |
p = MASK_POINTER(p); |
799 |
|
410781 |
index = hash(p) & mask; |
800 |
|
410781 |
r = d->r[index].p; |
801 |
|
410781 |
q = MASK_POINTER(r); |
802 |
|
|
STATS_INC(d->finds); |
803 |
✓✓ |
859354 |
while (q != p && r != NULL) { |
804 |
|
18896 |
index = (index - 1) & mask; |
805 |
|
18896 |
r = d->r[index].p; |
806 |
|
18896 |
q = MASK_POINTER(r); |
807 |
|
|
STATS_INC(d->find_collisions); |
808 |
|
|
} |
809 |
✓✗ |
1232343 |
return (q == p && r != NULL) ? &d->r[index] : NULL; |
810 |
|
|
} |
811 |
|
|
|
812 |
|
|
static void |
813 |
|
|
delete(struct dir_info *d, struct region_info *ri) |
814 |
|
|
{ |
815 |
|
|
/* algorithm R, Knuth Vol III section 6.4 */ |
816 |
|
82766 |
size_t mask = d->regions_total - 1; |
817 |
|
|
size_t i, j, r; |
818 |
|
|
|
819 |
✗✓ |
41383 |
if (d->regions_total & (d->regions_total - 1)) |
820 |
|
|
wrterror(d, "regions_total not 2^x"); |
821 |
|
41383 |
d->regions_free++; |
822 |
|
|
STATS_INC(d->deletes); |
823 |
|
|
|
824 |
|
41383 |
i = ri - d->r; |
825 |
|
41803 |
for (;;) { |
826 |
|
41803 |
d->r[i].p = NULL; |
827 |
|
41803 |
d->r[i].size = 0; |
828 |
|
|
j = i; |
829 |
|
41803 |
for (;;) { |
830 |
|
42613 |
i = (i - 1) & mask; |
831 |
✓✓ |
42613 |
if (d->r[i].p == NULL) |
832 |
|
|
return; |
833 |
|
1230 |
r = hash(d->r[i].p) & mask; |
834 |
✓✗✓✓ ✗✓✗✗ ✗✗ |
2880 |
if ((i <= r && r < j) || (r < j && j < i) || |
835 |
✗✓ |
420 |
(j < i && i <= r)) |
836 |
|
810 |
continue; |
837 |
|
420 |
d->r[j] = d->r[i]; |
838 |
|
|
STATS_INC(d->delete_moves); |
839 |
|
|
break; |
840 |
|
|
} |
841 |
|
|
|
842 |
|
|
} |
843 |
|
41383 |
} |
844 |
|
|
|
845 |
|
|
/* |
846 |
|
|
* Allocate a page of chunks |
847 |
|
|
*/ |
848 |
|
|
static struct chunk_info * |
849 |
|
|
omalloc_make_chunks(struct dir_info *d, int bits, int listnum) |
850 |
|
|
{ |
851 |
|
|
struct chunk_info *bp; |
852 |
|
|
void *pp; |
853 |
|
|
int i, k; |
854 |
|
|
|
855 |
|
|
/* Allocate a new bucket */ |
856 |
|
73308 |
pp = map(d, NULL, MALLOC_PAGESIZE, 0); |
857 |
✗✓ |
36654 |
if (pp == MAP_FAILED) |
858 |
|
|
return NULL; |
859 |
|
|
|
860 |
|
36654 |
bp = alloc_chunk_info(d, bits); |
861 |
✗✓ |
36654 |
if (bp == NULL) { |
862 |
|
|
unmap(d, pp, MALLOC_PAGESIZE, 0); |
863 |
|
|
return NULL; |
864 |
|
|
} |
865 |
|
|
|
866 |
|
|
/* memory protect the page allocated in the malloc(0) case */ |
867 |
✗✓ |
36654 |
if (bits == 0) { |
868 |
|
|
bp->size = 0; |
869 |
|
|
bp->shift = 1; |
870 |
|
|
i = MALLOC_MINSIZE - 1; |
871 |
|
|
while (i >>= 1) |
872 |
|
|
bp->shift++; |
873 |
|
|
bp->total = bp->free = MALLOC_PAGESIZE >> bp->shift; |
874 |
|
|
bp->offset = 0xdead; |
875 |
|
|
bp->page = pp; |
876 |
|
|
|
877 |
|
|
k = mprotect(pp, MALLOC_PAGESIZE, PROT_NONE); |
878 |
|
|
if (k < 0) { |
879 |
|
|
unmap(d, pp, MALLOC_PAGESIZE, 0); |
880 |
|
|
LIST_INSERT_HEAD(&d->chunk_info_list[0], bp, entries); |
881 |
|
|
return NULL; |
882 |
|
|
} |
883 |
|
|
} else { |
884 |
|
36654 |
bp->size = 1U << bits; |
885 |
|
36654 |
bp->shift = bits; |
886 |
|
36654 |
bp->total = bp->free = MALLOC_PAGESIZE >> bits; |
887 |
|
36654 |
bp->offset = howmany(bp->total, MALLOC_BITS); |
888 |
|
36654 |
bp->page = pp; |
889 |
|
|
} |
890 |
|
|
|
891 |
|
|
/* set all valid bits in the bitmap */ |
892 |
|
36654 |
k = bp->total; |
893 |
|
|
i = 0; |
894 |
|
|
|
895 |
|
|
/* Do a bunch at a time */ |
896 |
✓✓ |
203236 |
for (; (k - i) >= MALLOC_BITS; i += MALLOC_BITS) |
897 |
|
64964 |
bp->bits[i / MALLOC_BITS] = (u_short)~0U; |
898 |
|
|
|
899 |
✓✓ |
250200 |
for (; i < k; i++) |
900 |
|
88446 |
bp->bits[i / MALLOC_BITS] |= (u_short)1U << (i % MALLOC_BITS); |
901 |
|
|
|
902 |
✗✓ |
73308 |
LIST_INSERT_HEAD(&d->chunk_dir[bits][listnum], bp, entries); |
903 |
|
|
|
904 |
|
36654 |
bits++; |
905 |
✗✓ |
36654 |
if ((uintptr_t)pp & bits) |
906 |
|
|
wrterror(d, "pp & bits %p", pp); |
907 |
|
|
|
908 |
|
36654 |
insert(d, (void *)((uintptr_t)pp | bits), (uintptr_t)bp, NULL); |
909 |
|
36654 |
return bp; |
910 |
|
36654 |
} |
911 |
|
|
|
912 |
|
|
static int |
913 |
|
|
find_chunksize(size_t size) |
914 |
|
|
{ |
915 |
|
|
int i, j; |
916 |
|
|
|
917 |
|
|
/* Don't bother with anything less than this */ |
918 |
|
|
/* unless we have a malloc(0) requests */ |
919 |
✓✓ |
278710 |
if (size != 0 && size < MALLOC_MINSIZE) |
920 |
|
190 |
size = MALLOC_MINSIZE; |
921 |
|
|
|
922 |
|
|
/* Find the right bucket */ |
923 |
✗✓ |
139355 |
if (size == 0) |
924 |
|
|
j = 0; |
925 |
|
|
else { |
926 |
|
|
j = MALLOC_MINSHIFT; |
927 |
|
139355 |
i = (size - 1) >> (MALLOC_MINSHIFT - 1); |
928 |
✓✓ |
1468084 |
while (i >>= 1) |
929 |
|
594687 |
j++; |
930 |
|
|
} |
931 |
|
139355 |
return j; |
932 |
|
|
} |
933 |
|
|
|
934 |
|
|
/* |
935 |
|
|
* Allocate a chunk |
936 |
|
|
*/ |
937 |
|
|
static void * |
938 |
|
|
malloc_bytes(struct dir_info *d, size_t size, void *f) |
939 |
|
|
{ |
940 |
|
|
int i, j, listnum; |
941 |
|
|
size_t k; |
942 |
|
|
u_short u, *lp; |
943 |
|
|
struct chunk_info *bp; |
944 |
|
|
|
945 |
✓✗✗✓
|
408111 |
if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) || |
946 |
|
136037 |
d->canary1 != ~d->canary2) |
947 |
|
|
wrterror(d, "internal struct corrupt"); |
948 |
|
|
|
949 |
|
136037 |
j = find_chunksize(size); |
950 |
|
|
|
951 |
|
136037 |
listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; |
952 |
|
|
/* If it's empty, make a page more of that size chunks */ |
953 |
✓✓ |
136037 |
if ((bp = LIST_FIRST(&d->chunk_dir[j][listnum])) == NULL) { |
954 |
|
36600 |
bp = omalloc_make_chunks(d, j, listnum); |
955 |
✗✓ |
36600 |
if (bp == NULL) |
956 |
|
|
return NULL; |
957 |
|
|
} |
958 |
|
|
|
959 |
✗✓ |
136037 |
if (bp->canary != d->canary1) |
960 |
|
|
wrterror(d, "chunk info corrupted"); |
961 |
|
|
|
962 |
|
136037 |
i = d->chunk_start; |
963 |
✓✓ |
136037 |
if (bp->free > 1) |
964 |
|
119175 |
i += getrbyte(d); |
965 |
✓✓ |
136037 |
if (i >= bp->total) |
966 |
|
135989 |
i &= bp->total - 1; |
967 |
|
170536 |
for (;;) { |
968 |
|
170536 |
for (;;) { |
969 |
|
170536 |
lp = &bp->bits[i / MALLOC_BITS]; |
970 |
✗✓ |
170536 |
if (!*lp) { |
971 |
|
|
i += MALLOC_BITS; |
972 |
|
|
i &= ~(MALLOC_BITS - 1); |
973 |
|
|
if (i >= bp->total) |
974 |
|
|
i = 0; |
975 |
|
|
} else |
976 |
|
|
break; |
977 |
|
|
} |
978 |
|
170536 |
k = i % MALLOC_BITS; |
979 |
|
170536 |
u = 1 << k; |
980 |
✓✓ |
170536 |
if (*lp & u) |
981 |
|
|
break; |
982 |
|
34499 |
if (++i >= bp->total) |
983 |
|
|
i = 0; |
984 |
|
|
} |
985 |
|
136037 |
d->chunk_start += i + 1; |
986 |
|
|
#ifdef MALLOC_STATS |
987 |
|
|
if (i == 0) { |
988 |
|
|
struct region_info *r = find(d, bp->page); |
989 |
|
|
r->f = f; |
990 |
|
|
} |
991 |
|
|
#endif |
992 |
|
|
|
993 |
|
136037 |
*lp ^= u; |
994 |
|
|
|
995 |
|
|
/* If there are no more free, remove from free-list */ |
996 |
✓✓ |
136037 |
if (!--bp->free) |
997 |
✓✓ |
37174 |
LIST_REMOVE(bp, entries); |
998 |
|
|
|
999 |
|
|
/* Adjust to the real offset of that chunk */ |
1000 |
|
136037 |
k += (lp - bp->bits) * MALLOC_BITS; |
1001 |
|
|
|
1002 |
✗✓ |
136037 |
if (mopts.chunk_canaries && size > 0) |
1003 |
|
|
bp->bits[bp->offset + k] = size; |
1004 |
|
|
|
1005 |
|
136037 |
k <<= bp->shift; |
1006 |
|
|
|
1007 |
✓✗ |
136037 |
if (bp->size > 0) { |
1008 |
✗✓ |
136037 |
if (mopts.malloc_junk == 2) |
1009 |
|
|
memset((char *)bp->page + k, SOME_JUNK, bp->size); |
1010 |
✗✓ |
136037 |
else if (mopts.chunk_canaries) |
1011 |
|
|
fill_canary((char *)bp->page + k, size, bp->size); |
1012 |
|
|
} |
1013 |
|
136037 |
return ((char *)bp->page + k); |
1014 |
|
136037 |
} |
1015 |
|
|
|
1016 |
|
|
static void |
1017 |
|
|
fill_canary(char *ptr, size_t sz, size_t allocated) |
1018 |
|
|
{ |
1019 |
|
|
size_t check_sz = allocated - sz; |
1020 |
|
|
|
1021 |
|
|
if (check_sz > CHUNK_CHECK_LENGTH) |
1022 |
|
|
check_sz = CHUNK_CHECK_LENGTH; |
1023 |
|
|
memset(ptr + sz, SOME_JUNK, check_sz); |
1024 |
|
|
} |
1025 |
|
|
|
1026 |
|
|
static void |
1027 |
|
|
validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated) |
1028 |
|
|
{ |
1029 |
|
|
size_t check_sz = allocated - sz; |
1030 |
|
|
u_char *p, *q; |
1031 |
|
|
|
1032 |
|
|
if (check_sz > CHUNK_CHECK_LENGTH) |
1033 |
|
|
check_sz = CHUNK_CHECK_LENGTH; |
1034 |
|
|
p = ptr + sz; |
1035 |
|
|
q = p + check_sz; |
1036 |
|
|
|
1037 |
|
|
while (p < q) { |
1038 |
|
|
if (*p != SOME_JUNK) { |
1039 |
|
|
wrterror(d, "chunk canary corrupted %p %#tx@%#zx%s", |
1040 |
|
|
ptr, p - ptr, sz, *p == SOME_FREEJUNK ? |
1041 |
|
|
" (double free?)" : ""); |
1042 |
|
|
} |
1043 |
|
|
p++; |
1044 |
|
|
} |
1045 |
|
|
} |
1046 |
|
|
|
1047 |
|
|
static uint32_t |
1048 |
|
|
find_chunknum(struct dir_info *d, struct region_info *r, void *ptr, int check) |
1049 |
|
|
{ |
1050 |
|
|
struct chunk_info *info; |
1051 |
|
|
uint32_t chunknum; |
1052 |
|
|
|
1053 |
|
533240 |
info = (struct chunk_info *)r->size; |
1054 |
✗✓ |
266620 |
if (info->canary != d->canary1) |
1055 |
|
|
wrterror(d, "chunk info corrupted"); |
1056 |
|
|
|
1057 |
|
|
/* Find the chunk number on the page */ |
1058 |
|
266620 |
chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) >> info->shift; |
1059 |
|
|
|
1060 |
✗✓ |
266620 |
if ((uintptr_t)ptr & ((1U << (info->shift)) - 1)) |
1061 |
|
|
wrterror(d, "modified chunk-pointer %p", ptr); |
1062 |
✗✓ |
533240 |
if (info->bits[chunknum / MALLOC_BITS] & |
1063 |
|
266620 |
(1U << (chunknum % MALLOC_BITS))) |
1064 |
|
|
wrterror(d, "chunk is already free %p", ptr); |
1065 |
✗✓✗✗
|
266620 |
if (check && info->size > 0) { |
1066 |
|
|
validate_canary(d, ptr, info->bits[info->offset + chunknum], |
1067 |
|
|
info->size); |
1068 |
|
|
} |
1069 |
|
266620 |
return chunknum; |
1070 |
|
|
} |
1071 |
|
|
|
1072 |
|
|
/* |
1073 |
|
|
* Free a chunk, and possibly the page it's on, if the page becomes empty. |
1074 |
|
|
*/ |
1075 |
|
|
static void |
1076 |
|
|
free_bytes(struct dir_info *d, struct region_info *r, void *ptr) |
1077 |
|
|
{ |
1078 |
|
|
struct chunk_head *mp; |
1079 |
|
|
struct chunk_info *info; |
1080 |
|
|
uint32_t chunknum; |
1081 |
|
|
int listnum; |
1082 |
|
|
|
1083 |
|
266492 |
info = (struct chunk_info *)r->size; |
1084 |
|
133246 |
chunknum = find_chunknum(d, r, ptr, 0); |
1085 |
|
|
|
1086 |
|
133246 |
info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS); |
1087 |
|
133246 |
info->free++; |
1088 |
|
|
|
1089 |
✓✓ |
133246 |
if (info->free == 1) { |
1090 |
|
|
/* Page became non-full */ |
1091 |
|
16862 |
listnum = getrbyte(d) % MALLOC_CHUNK_LISTS; |
1092 |
✓✗ |
16862 |
if (info->size != 0) |
1093 |
|
16862 |
mp = &d->chunk_dir[info->shift][listnum]; |
1094 |
|
|
else |
1095 |
|
|
mp = &d->chunk_dir[0][listnum]; |
1096 |
|
|
|
1097 |
✓✓ |
42606 |
LIST_INSERT_HEAD(mp, info, entries); |
1098 |
|
16862 |
return; |
1099 |
|
|
} |
1100 |
|
|
|
1101 |
✓✓ |
116384 |
if (info->free != info->total) |
1102 |
|
79916 |
return; |
1103 |
|
|
|
1104 |
✓✓ |
75380 |
LIST_REMOVE(info, entries); |
1105 |
|
|
|
1106 |
✗✓ |
36468 |
if (info->size == 0 && !mopts.malloc_freeunmap) |
1107 |
|
|
mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE); |
1108 |
|
36468 |
unmap(d, info->page, MALLOC_PAGESIZE, 0); |
1109 |
|
|
|
1110 |
|
36468 |
delete(d, r); |
1111 |
✓✗ |
36468 |
if (info->size != 0) |
1112 |
|
36468 |
mp = &d->chunk_info_list[info->shift]; |
1113 |
|
|
else |
1114 |
|
|
mp = &d->chunk_info_list[0]; |
1115 |
✓✗ |
109404 |
LIST_INSERT_HEAD(mp, info, entries); |
1116 |
|
169714 |
} |
1117 |
|
|
|
1118 |
|
|
|
1119 |
|
|
|
1120 |
|
|
static void * |
1121 |
|
|
omalloc(struct dir_info *pool, size_t sz, int zero_fill, void *f) |
1122 |
|
|
{ |
1123 |
|
|
void *p; |
1124 |
|
|
size_t psz; |
1125 |
|
|
|
1126 |
✓✓ |
281378 |
if (sz > MALLOC_MAXCHUNK) { |
1127 |
✗✓ |
4946 |
if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { |
1128 |
|
|
errno = ENOMEM; |
1129 |
|
|
return NULL; |
1130 |
|
|
} |
1131 |
|
4946 |
sz += mopts.malloc_guard; |
1132 |
|
4946 |
psz = PAGEROUND(sz); |
1133 |
|
4946 |
p = map(pool, NULL, psz, zero_fill); |
1134 |
✗✓ |
4946 |
if (p == MAP_FAILED) { |
1135 |
|
|
errno = ENOMEM; |
1136 |
|
|
return NULL; |
1137 |
|
|
} |
1138 |
✗✓ |
4946 |
if (insert(pool, p, sz, f)) { |
1139 |
|
|
unmap(pool, p, psz, 0); |
1140 |
|
|
errno = ENOMEM; |
1141 |
|
|
return NULL; |
1142 |
|
|
} |
1143 |
✗✓ |
4946 |
if (mopts.malloc_guard) { |
1144 |
|
|
if (mprotect((char *)p + psz - mopts.malloc_guard, |
1145 |
|
|
mopts.malloc_guard, PROT_NONE)) |
1146 |
|
|
wrterror(pool, "mprotect"); |
1147 |
|
|
STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); |
1148 |
|
|
} |
1149 |
|
|
|
1150 |
✓✓ |
4946 |
if (MALLOC_MOVE_COND(sz)) { |
1151 |
|
|
/* fill whole allocation */ |
1152 |
✗✓ |
2930 |
if (mopts.malloc_junk == 2) |
1153 |
|
|
memset(p, SOME_JUNK, psz - mopts.malloc_guard); |
1154 |
|
|
/* shift towards the end */ |
1155 |
|
2930 |
p = MALLOC_MOVE(p, sz); |
1156 |
|
|
/* fill zeros if needed and overwritten above */ |
1157 |
✗✓ |
2930 |
if (zero_fill && mopts.malloc_junk == 2) |
1158 |
|
|
memset(p, 0, sz - mopts.malloc_guard); |
1159 |
|
|
} else { |
1160 |
✗✓ |
2016 |
if (mopts.malloc_junk == 2) { |
1161 |
|
|
if (zero_fill) |
1162 |
|
|
memset((char *)p + sz - mopts.malloc_guard, |
1163 |
|
|
SOME_JUNK, psz - sz); |
1164 |
|
|
else |
1165 |
|
|
memset(p, SOME_JUNK, |
1166 |
|
|
psz - mopts.malloc_guard); |
1167 |
|
|
} |
1168 |
✗✓ |
2016 |
else if (mopts.chunk_canaries) |
1169 |
|
|
fill_canary(p, sz - mopts.malloc_guard, |
1170 |
|
|
psz - mopts.malloc_guard); |
1171 |
|
|
} |
1172 |
|
|
|
1173 |
|
|
} else { |
1174 |
|
|
/* takes care of SOME_JUNK */ |
1175 |
|
135743 |
p = malloc_bytes(pool, sz, f); |
1176 |
✓✓ |
135743 |
if (zero_fill && p != NULL && sz > 0) |
1177 |
|
42 |
memset(p, 0, sz); |
1178 |
|
|
} |
1179 |
|
|
|
1180 |
|
140689 |
return p; |
1181 |
|
140689 |
} |
1182 |
|
|
|
1183 |
|
|
/* |
1184 |
|
|
* Common function for handling recursion. Only |
1185 |
|
|
* print the error message once, to avoid making the problem |
1186 |
|
|
* potentially worse. |
1187 |
|
|
*/ |
1188 |
|
|
static void |
1189 |
|
|
malloc_recurse(struct dir_info *d) |
1190 |
|
|
{ |
1191 |
|
|
static int noprint; |
1192 |
|
|
|
1193 |
|
|
if (noprint == 0) { |
1194 |
|
|
noprint = 1; |
1195 |
|
|
wrterror(d, "recursive call"); |
1196 |
|
|
} |
1197 |
|
|
d->active--; |
1198 |
|
|
_MALLOC_UNLOCK(d->mutex); |
1199 |
|
|
errno = EDEADLK; |
1200 |
|
|
} |
1201 |
|
|
|
1202 |
|
|
void |
1203 |
|
|
_malloc_init(int from_rthreads) |
1204 |
|
|
{ |
1205 |
|
|
int i, max; |
1206 |
|
30 |
struct dir_info *d; |
1207 |
|
|
|
1208 |
✗✓ |
15 |
_MALLOC_LOCK(0); |
1209 |
✗✓ |
15 |
if (!from_rthreads && mopts.malloc_pool[0]) { |
1210 |
|
|
_MALLOC_UNLOCK(0); |
1211 |
|
|
return; |
1212 |
|
|
} |
1213 |
✓✗ |
15 |
if (!mopts.malloc_canary) |
1214 |
|
15 |
omalloc_init(); |
1215 |
|
|
|
1216 |
|
15 |
max = from_rthreads ? _MALLOC_MUTEXES : 1; |
1217 |
|
|
if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) |
1218 |
|
15 |
mprotect(&malloc_readonly, sizeof(malloc_readonly), |
1219 |
|
|
PROT_READ | PROT_WRITE); |
1220 |
✓✓ |
60 |
for (i = 0; i < max; i++) { |
1221 |
✓✗ |
15 |
if (mopts.malloc_pool[i]) |
1222 |
|
|
continue; |
1223 |
|
15 |
omalloc_poolinit(&d); |
1224 |
|
15 |
d->mutex = i; |
1225 |
|
15 |
mopts.malloc_pool[i] = d; |
1226 |
|
15 |
} |
1227 |
|
|
|
1228 |
✗✓ |
15 |
if (from_rthreads) |
1229 |
|
|
mopts.malloc_mt = 1; |
1230 |
|
|
else |
1231 |
|
15 |
mopts.internal_funcs = 1; |
1232 |
|
|
|
1233 |
|
|
/* |
1234 |
|
|
* Options have been set and will never be reset. |
1235 |
|
|
* Prevent further tampering with them. |
1236 |
|
|
*/ |
1237 |
|
|
if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) |
1238 |
|
15 |
mprotect(&malloc_readonly, sizeof(malloc_readonly), PROT_READ); |
1239 |
✗✓ |
15 |
_MALLOC_UNLOCK(0); |
1240 |
|
30 |
} |
1241 |
|
|
DEF_STRONG(_malloc_init); |
1242 |
|
|
|
1243 |
|
|
void * |
1244 |
|
|
malloc(size_t size) |
1245 |
|
|
{ |
1246 |
|
|
void *r; |
1247 |
|
|
struct dir_info *d; |
1248 |
|
268536 |
int saved_errno = errno; |
1249 |
|
|
|
1250 |
|
134268 |
d = getpool(); |
1251 |
✓✓ |
134268 |
if (d == NULL) { |
1252 |
|
15 |
_malloc_init(0); |
1253 |
|
15 |
d = getpool(); |
1254 |
|
15 |
} |
1255 |
✗✓ |
134268 |
_MALLOC_LOCK(d->mutex); |
1256 |
|
134268 |
d->func = "malloc"; |
1257 |
|
|
|
1258 |
✗✓ |
134268 |
if (d->active++) { |
1259 |
|
|
malloc_recurse(d); |
1260 |
|
|
return NULL; |
1261 |
|
|
} |
1262 |
|
134268 |
r = omalloc(d, size, 0, CALLER); |
1263 |
|
134268 |
d->active--; |
1264 |
✗✓ |
134268 |
_MALLOC_UNLOCK(d->mutex); |
1265 |
✗✓ |
134268 |
if (r == NULL && mopts.malloc_xmalloc) |
1266 |
|
|
wrterror(d, "out of memory"); |
1267 |
✓✗ |
134268 |
if (r != NULL) |
1268 |
|
134268 |
errno = saved_errno; |
1269 |
|
134268 |
return r; |
1270 |
|
134268 |
} |
1271 |
|
|
/*DEF_STRONG(malloc);*/ |
1272 |
|
|
|
1273 |
|
|
static void |
1274 |
|
|
validate_junk(struct dir_info *pool, void *p) |
1275 |
|
|
{ |
1276 |
|
|
struct region_info *r; |
1277 |
|
|
size_t byte, sz; |
1278 |
|
|
|
1279 |
✓✓ |
266944 |
if (p == NULL) |
1280 |
|
240 |
return; |
1281 |
|
133232 |
r = find(pool, p); |
1282 |
✗✓ |
133232 |
if (r == NULL) |
1283 |
|
|
wrterror(pool, "bogus pointer in validate_junk %p", p); |
1284 |
✗✓✓✗
|
666160 |
REALSIZE(sz, r); |
1285 |
|
133232 |
if (sz > CHUNK_CHECK_LENGTH) |
1286 |
|
|
sz = CHUNK_CHECK_LENGTH; |
1287 |
✓✓ |
8628288 |
for (byte = 0; byte < sz; byte++) { |
1288 |
✗✓ |
4180912 |
if (((unsigned char *)p)[byte] != SOME_FREEJUNK) |
1289 |
|
|
wrterror(pool, "use after free %p", p); |
1290 |
|
|
} |
1291 |
|
266704 |
} |
1292 |
|
|
|
1293 |
|
|
static void |
1294 |
|
|
ofree(struct dir_info *argpool, void *p, int clear, int check, size_t argsz) |
1295 |
|
|
{ |
1296 |
|
|
struct dir_info *pool; |
1297 |
|
|
struct region_info *r; |
1298 |
|
|
size_t sz; |
1299 |
|
|
int i; |
1300 |
|
|
|
1301 |
|
|
pool = argpool; |
1302 |
|
275926 |
r = find(pool, p); |
1303 |
✗✓ |
137963 |
if (r == NULL) { |
1304 |
|
|
if (mopts.malloc_mt) { |
1305 |
|
|
for (i = 0; i < _MALLOC_MUTEXES; i++) { |
1306 |
|
|
if (i == argpool->mutex) |
1307 |
|
|
continue; |
1308 |
|
|
pool->active--; |
1309 |
|
|
_MALLOC_UNLOCK(pool->mutex); |
1310 |
|
|
pool = mopts.malloc_pool[i]; |
1311 |
|
|
_MALLOC_LOCK(pool->mutex); |
1312 |
|
|
pool->active++; |
1313 |
|
|
r = find(pool, p); |
1314 |
|
|
if (r != NULL) |
1315 |
|
|
break; |
1316 |
|
|
} |
1317 |
|
|
} |
1318 |
|
|
if (r == NULL) |
1319 |
|
|
wrterror(pool, "bogus pointer (double free?) %p", p); |
1320 |
|
|
} |
1321 |
|
|
|
1322 |
✓✓✓✗
|
679965 |
REALSIZE(sz, r); |
1323 |
✗✓ |
137963 |
if (check) { |
1324 |
|
|
if (sz <= MALLOC_MAXCHUNK) { |
1325 |
|
|
if (mopts.chunk_canaries && sz > 0) { |
1326 |
|
|
struct chunk_info *info = |
1327 |
|
|
(struct chunk_info *)r->size; |
1328 |
|
|
uint32_t chunknum = |
1329 |
|
|
find_chunknum(pool, r, p, 0); |
1330 |
|
|
|
1331 |
|
|
if (info->bits[info->offset + chunknum] < |
1332 |
|
|
argsz) |
1333 |
|
|
wrterror(pool, "recorded size %hu" |
1334 |
|
|
" < %zu", |
1335 |
|
|
info->bits[info->offset + chunknum], |
1336 |
|
|
argsz); |
1337 |
|
|
} else { |
1338 |
|
|
if (sz < argsz) |
1339 |
|
|
wrterror(pool, "chunk size %zu < %zu", |
1340 |
|
|
sz, argsz); |
1341 |
|
|
} |
1342 |
|
|
} else if (sz - mopts.malloc_guard < argsz) { |
1343 |
|
|
wrterror(pool, "recorded size %zu < %zu", |
1344 |
|
|
sz - mopts.malloc_guard, argsz); |
1345 |
|
|
} |
1346 |
|
|
} |
1347 |
✓✓ |
137963 |
if (sz > MALLOC_MAXCHUNK) { |
1348 |
✓✓ |
4925 |
if (!MALLOC_MOVE_COND(sz)) { |
1349 |
✗✓ |
2001 |
if (r->p != p) |
1350 |
|
|
wrterror(pool, "bogus pointer %p", p); |
1351 |
✗✓ |
2001 |
if (mopts.chunk_canaries) |
1352 |
|
|
validate_canary(pool, p, |
1353 |
|
|
sz - mopts.malloc_guard, |
1354 |
|
|
PAGEROUND(sz - mopts.malloc_guard)); |
1355 |
|
|
} else { |
1356 |
|
|
/* shifted towards the end */ |
1357 |
✗✓ |
2924 |
if (p != MALLOC_MOVE(r->p, sz)) |
1358 |
|
|
wrterror(pool, "bogus moved pointer %p", p); |
1359 |
|
|
p = r->p; |
1360 |
|
|
} |
1361 |
✗✓ |
4925 |
if (mopts.malloc_guard) { |
1362 |
|
|
if (sz < mopts.malloc_guard) |
1363 |
|
|
wrterror(pool, "guard size"); |
1364 |
|
|
if (!mopts.malloc_freeunmap) { |
1365 |
|
|
if (mprotect((char *)p + PAGEROUND(sz) - |
1366 |
|
|
mopts.malloc_guard, mopts.malloc_guard, |
1367 |
|
|
PROT_READ | PROT_WRITE)) |
1368 |
|
|
wrterror(pool, "mprotect"); |
1369 |
|
|
} |
1370 |
|
|
STATS_SUB(pool->malloc_guarded, mopts.malloc_guard); |
1371 |
|
|
} |
1372 |
|
4925 |
unmap(pool, p, PAGEROUND(sz), clear); |
1373 |
|
4925 |
delete(pool, r); |
1374 |
|
4925 |
} else { |
1375 |
|
|
/* Validate and optionally canary check */ |
1376 |
|
133038 |
find_chunknum(pool, r, p, mopts.chunk_canaries); |
1377 |
✓✗ |
133038 |
if (!clear) { |
1378 |
|
|
void *tmp; |
1379 |
|
|
int i; |
1380 |
|
|
|
1381 |
✗✓ |
133038 |
if (mopts.malloc_freecheck) { |
1382 |
|
|
for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) |
1383 |
|
|
if (p == pool->delayed_chunks[i]) |
1384 |
|
|
wrterror(pool, |
1385 |
|
|
"double free %p", p); |
1386 |
|
|
} |
1387 |
✓✗ |
133038 |
if (mopts.malloc_junk && sz > 0) |
1388 |
|
133038 |
memset(p, SOME_FREEJUNK, sz); |
1389 |
|
133038 |
i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK; |
1390 |
|
|
tmp = p; |
1391 |
|
133038 |
p = pool->delayed_chunks[i]; |
1392 |
✗✓ |
133038 |
if (tmp == p) |
1393 |
|
|
wrterror(pool, "double free %p", tmp); |
1394 |
|
133038 |
pool->delayed_chunks[i] = tmp; |
1395 |
✓✗ |
133038 |
if (mopts.malloc_junk) |
1396 |
|
133038 |
validate_junk(pool, p); |
1397 |
✗✗ |
133038 |
} else if (sz > 0) |
1398 |
|
|
memset(p, 0, sz); |
1399 |
✓✓ |
133038 |
if (p != NULL) { |
1400 |
|
132798 |
r = find(pool, p); |
1401 |
✗✓ |
132798 |
if (r == NULL) |
1402 |
|
|
wrterror(pool, |
1403 |
|
|
"bogus pointer (double free?) %p", p); |
1404 |
|
132798 |
free_bytes(pool, r, p); |
1405 |
|
132798 |
} |
1406 |
|
|
} |
1407 |
|
|
|
1408 |
✗✓ |
137963 |
if (argpool != pool) { |
1409 |
|
|
pool->active--; |
1410 |
|
|
_MALLOC_UNLOCK(pool->mutex); |
1411 |
|
|
_MALLOC_LOCK(argpool->mutex); |
1412 |
|
|
argpool->active++; |
1413 |
|
|
} |
1414 |
|
137963 |
} |
1415 |
|
|
|
1416 |
|
|
void |
1417 |
|
|
free(void *ptr) |
1418 |
|
|
{ |
1419 |
|
|
struct dir_info *d; |
1420 |
|
2716838 |
int saved_errno = errno; |
1421 |
|
|
|
1422 |
|
|
/* This is legal. */ |
1423 |
✓✓ |
1358419 |
if (ptr == NULL) |
1424 |
|
1226552 |
return; |
1425 |
|
|
|
1426 |
|
131867 |
d = getpool(); |
1427 |
✗✓ |
131867 |
if (d == NULL) |
1428 |
|
|
wrterror(d, "free() called before allocation"); |
1429 |
✗✓ |
131867 |
_MALLOC_LOCK(d->mutex); |
1430 |
|
131867 |
d->func = "free"; |
1431 |
✗✓ |
131867 |
if (d->active++) { |
1432 |
|
|
malloc_recurse(d); |
1433 |
|
|
return; |
1434 |
|
|
} |
1435 |
|
131867 |
ofree(d, ptr, 0, 0, 0); |
1436 |
|
131867 |
d->active--; |
1437 |
✗✓ |
131867 |
_MALLOC_UNLOCK(d->mutex); |
1438 |
|
131867 |
errno = saved_errno; |
1439 |
|
1490286 |
} |
1440 |
|
|
/*DEF_STRONG(free);*/ |
1441 |
|
|
|
1442 |
|
|
static void |
1443 |
|
|
freezero_p(void *ptr, size_t sz) |
1444 |
|
|
{ |
1445 |
|
|
explicit_bzero(ptr, sz); |
1446 |
|
|
free(ptr); |
1447 |
|
|
} |
1448 |
|
|
|
1449 |
|
|
void |
1450 |
|
|
freezero(void *ptr, size_t sz) |
1451 |
|
|
{ |
1452 |
|
|
struct dir_info *d; |
1453 |
|
|
int saved_errno = errno; |
1454 |
|
|
|
1455 |
|
|
/* This is legal. */ |
1456 |
|
|
if (ptr == NULL) |
1457 |
|
|
return; |
1458 |
|
|
|
1459 |
|
|
if (!mopts.internal_funcs) { |
1460 |
|
|
freezero_p(ptr, sz); |
1461 |
|
|
return; |
1462 |
|
|
} |
1463 |
|
|
|
1464 |
|
|
d = getpool(); |
1465 |
|
|
if (d == NULL) |
1466 |
|
|
wrterror(d, "freezero() called before allocation"); |
1467 |
|
|
_MALLOC_LOCK(d->mutex); |
1468 |
|
|
d->func = "freezero"; |
1469 |
|
|
if (d->active++) { |
1470 |
|
|
malloc_recurse(d); |
1471 |
|
|
return; |
1472 |
|
|
} |
1473 |
|
|
ofree(d, ptr, 1, 1, sz); |
1474 |
|
|
d->active--; |
1475 |
|
|
_MALLOC_UNLOCK(d->mutex); |
1476 |
|
|
errno = saved_errno; |
1477 |
|
|
} |
1478 |
|
|
DEF_WEAK(freezero); |
1479 |
|
|
|
1480 |
|
|
static void * |
1481 |
|
|
orealloc(struct dir_info *argpool, void *p, size_t newsz, void *f) |
1482 |
|
|
{ |
1483 |
|
|
struct dir_info *pool; |
1484 |
|
|
struct region_info *r; |
1485 |
|
|
struct chunk_info *info; |
1486 |
|
|
size_t oldsz, goldsz, gnewsz; |
1487 |
|
|
void *q, *ret; |
1488 |
|
|
int i; |
1489 |
|
|
uint32_t chunknum; |
1490 |
|
|
|
1491 |
|
|
pool = argpool; |
1492 |
|
|
|
1493 |
✓✓ |
12686 |
if (p == NULL) |
1494 |
|
255 |
return omalloc(pool, newsz, 0, f); |
1495 |
|
|
|
1496 |
|
6088 |
r = find(pool, p); |
1497 |
✗✓ |
6088 |
if (r == NULL) { |
1498 |
|
|
if (mopts.malloc_mt) { |
1499 |
|
|
for (i = 0; i < _MALLOC_MUTEXES; i++) { |
1500 |
|
|
if (i == argpool->mutex) |
1501 |
|
|
continue; |
1502 |
|
|
pool->active--; |
1503 |
|
|
_MALLOC_UNLOCK(pool->mutex); |
1504 |
|
|
pool = mopts.malloc_pool[i]; |
1505 |
|
|
_MALLOC_LOCK(pool->mutex); |
1506 |
|
|
pool->active++; |
1507 |
|
|
r = find(pool, p); |
1508 |
|
|
if (r != NULL) |
1509 |
|
|
break; |
1510 |
|
|
} |
1511 |
|
|
} |
1512 |
|
|
if (r == NULL) |
1513 |
|
|
wrterror(pool, "bogus pointer (double free?) %p", p); |
1514 |
|
|
} |
1515 |
✗✓ |
6088 |
if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { |
1516 |
|
|
errno = ENOMEM; |
1517 |
|
|
ret = NULL; |
1518 |
|
|
goto done; |
1519 |
|
|
} |
1520 |
|
|
|
1521 |
✓✓✓✗
|
30428 |
REALSIZE(oldsz, r); |
1522 |
✗✓ |
6088 |
if (mopts.chunk_canaries && oldsz <= MALLOC_MAXCHUNK) { |
1523 |
|
|
chunknum = find_chunknum(pool, r, p, 0); |
1524 |
|
|
info = (struct chunk_info *)r->size; |
1525 |
|
|
} |
1526 |
|
|
|
1527 |
|
|
goldsz = oldsz; |
1528 |
✓✓ |
6088 |
if (oldsz > MALLOC_MAXCHUNK) { |
1529 |
✗✓ |
6 |
if (oldsz < mopts.malloc_guard) |
1530 |
|
|
wrterror(pool, "guard size"); |
1531 |
|
6 |
oldsz -= mopts.malloc_guard; |
1532 |
|
6 |
} |
1533 |
|
|
|
1534 |
|
|
gnewsz = newsz; |
1535 |
✓✓ |
6088 |
if (gnewsz > MALLOC_MAXCHUNK) |
1536 |
|
2854 |
gnewsz += mopts.malloc_guard; |
1537 |
|
|
|
1538 |
✓✓ |
12176 |
if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && |
1539 |
|
6088 |
!mopts.malloc_realloc) { |
1540 |
|
|
/* First case: from n pages sized allocation to m pages sized |
1541 |
|
|
allocation, m > n */ |
1542 |
|
6 |
size_t roldsz = PAGEROUND(goldsz); |
1543 |
|
6 |
size_t rnewsz = PAGEROUND(gnewsz); |
1544 |
|
|
|
1545 |
✓✗ |
6 |
if (rnewsz > roldsz) { |
1546 |
|
|
/* try to extend existing region */ |
1547 |
✓✗ |
6 |
if (!mopts.malloc_guard) { |
1548 |
|
6 |
void *hint = (char *)r->p + roldsz; |
1549 |
|
6 |
size_t needed = rnewsz - roldsz; |
1550 |
|
|
|
1551 |
|
|
STATS_INC(pool->cheap_realloc_tries); |
1552 |
|
6 |
q = map(pool, hint, needed, 0); |
1553 |
✓✗ |
6 |
if (q == hint) |
1554 |
|
|
goto gotit; |
1555 |
|
6 |
zapcacheregion(pool, hint, needed); |
1556 |
|
6 |
q = MQUERY(hint, needed); |
1557 |
✓✗ |
6 |
if (q == hint) |
1558 |
|
6 |
q = MMAPA(hint, needed); |
1559 |
|
|
else |
1560 |
|
|
q = MAP_FAILED; |
1561 |
✓✗ |
6 |
if (q == hint) { |
1562 |
|
|
gotit: |
1563 |
|
|
STATS_ADD(pool->malloc_used, needed); |
1564 |
✗✓ |
6 |
if (mopts.malloc_junk == 2) |
1565 |
|
|
memset(q, SOME_JUNK, needed); |
1566 |
|
6 |
r->size = gnewsz; |
1567 |
✓✗ |
6 |
if (r->p != p) { |
1568 |
|
|
/* old pointer is moved */ |
1569 |
|
6 |
memmove(r->p, p, oldsz); |
1570 |
|
6 |
p = r->p; |
1571 |
|
6 |
} |
1572 |
✗✓ |
6 |
if (mopts.chunk_canaries) |
1573 |
|
|
fill_canary(p, newsz, |
1574 |
|
|
PAGEROUND(newsz)); |
1575 |
|
|
STATS_SETF(r, f); |
1576 |
|
|
STATS_INC(pool->cheap_reallocs); |
1577 |
|
|
ret = p; |
1578 |
|
6 |
goto done; |
1579 |
|
|
} else if (q != MAP_FAILED) { |
1580 |
|
|
if (munmap(q, needed)) |
1581 |
|
|
wrterror(pool, "munmap %p", q); |
1582 |
|
|
} |
1583 |
|
|
} |
1584 |
|
|
} else if (rnewsz < roldsz) { |
1585 |
|
|
/* shrink number of pages */ |
1586 |
|
|
if (mopts.malloc_guard) { |
1587 |
|
|
if (mprotect((char *)r->p + roldsz - |
1588 |
|
|
mopts.malloc_guard, mopts.malloc_guard, |
1589 |
|
|
PROT_READ | PROT_WRITE)) |
1590 |
|
|
wrterror(pool, "mprotect"); |
1591 |
|
|
if (mprotect((char *)r->p + rnewsz - |
1592 |
|
|
mopts.malloc_guard, mopts.malloc_guard, |
1593 |
|
|
PROT_NONE)) |
1594 |
|
|
wrterror(pool, "mprotect"); |
1595 |
|
|
} |
1596 |
|
|
unmap(pool, (char *)r->p + rnewsz, roldsz - rnewsz, 0); |
1597 |
|
|
r->size = gnewsz; |
1598 |
|
|
if (MALLOC_MOVE_COND(gnewsz)) { |
1599 |
|
|
void *pp = MALLOC_MOVE(r->p, gnewsz); |
1600 |
|
|
memmove(pp, p, newsz); |
1601 |
|
|
p = pp; |
1602 |
|
|
} else if (mopts.chunk_canaries) |
1603 |
|
|
fill_canary(p, newsz, PAGEROUND(newsz)); |
1604 |
|
|
STATS_SETF(r, f); |
1605 |
|
|
ret = p; |
1606 |
|
|
goto done; |
1607 |
|
|
} else { |
1608 |
|
|
/* number of pages remains the same */ |
1609 |
|
|
void *pp = r->p; |
1610 |
|
|
|
1611 |
|
|
r->size = gnewsz; |
1612 |
|
|
if (MALLOC_MOVE_COND(gnewsz)) |
1613 |
|
|
pp = MALLOC_MOVE(r->p, gnewsz); |
1614 |
|
|
if (p != pp) { |
1615 |
|
|
memmove(pp, p, oldsz < newsz ? oldsz : newsz); |
1616 |
|
|
p = pp; |
1617 |
|
|
} |
1618 |
|
|
if (p == r->p) { |
1619 |
|
|
if (newsz > oldsz && mopts.malloc_junk == 2) |
1620 |
|
|
memset((char *)p + newsz, SOME_JUNK, |
1621 |
|
|
rnewsz - mopts.malloc_guard - |
1622 |
|
|
newsz); |
1623 |
|
|
if (mopts.chunk_canaries) |
1624 |
|
|
fill_canary(p, newsz, PAGEROUND(newsz)); |
1625 |
|
|
} |
1626 |
|
|
STATS_SETF(r, f); |
1627 |
|
|
ret = p; |
1628 |
|
|
goto done; |
1629 |
|
|
} |
1630 |
|
|
} |
1631 |
✓✓ |
18246 |
if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 && |
1632 |
|
12164 |
newsz <= MALLOC_MAXCHUNK && newsz > 0 && |
1633 |
✗✓ |
3234 |
1 << find_chunksize(newsz) == oldsz && !mopts.malloc_realloc) { |
1634 |
|
|
/* do not reallocate if new size fits good in existing chunk */ |
1635 |
|
|
if (mopts.malloc_junk == 2) |
1636 |
|
|
memset((char *)p + newsz, SOME_JUNK, oldsz - newsz); |
1637 |
|
|
if (mopts.chunk_canaries) { |
1638 |
|
|
info->bits[info->offset + chunknum] = newsz; |
1639 |
|
|
fill_canary(p, newsz, info->size); |
1640 |
|
|
} |
1641 |
|
|
STATS_SETF(r, f); |
1642 |
|
|
ret = p; |
1643 |
✓✗ |
6082 |
} else if (newsz != oldsz || mopts.malloc_realloc) { |
1644 |
|
|
/* create new allocation */ |
1645 |
|
6082 |
q = omalloc(pool, newsz, 0, f); |
1646 |
✗✓ |
6082 |
if (q == NULL) { |
1647 |
|
|
ret = NULL; |
1648 |
|
|
goto done; |
1649 |
|
|
} |
1650 |
✓✗ |
6082 |
if (newsz != 0 && oldsz != 0) |
1651 |
|
6082 |
memcpy(q, p, oldsz < newsz ? oldsz : newsz); |
1652 |
|
6082 |
ofree(pool, p, 0, 0, 0); |
1653 |
|
|
ret = q; |
1654 |
|
6082 |
} else { |
1655 |
|
|
/* oldsz == newsz */ |
1656 |
|
|
if (newsz != 0) |
1657 |
|
|
wrterror(pool, "realloc internal inconsistency"); |
1658 |
|
|
STATS_SETF(r, f); |
1659 |
|
|
ret = p; |
1660 |
|
|
} |
1661 |
|
|
done: |
1662 |
✗✓ |
6088 |
if (argpool != pool) { |
1663 |
|
|
pool->active--; |
1664 |
|
|
_MALLOC_UNLOCK(pool->mutex); |
1665 |
|
|
_MALLOC_LOCK(argpool->mutex); |
1666 |
|
|
argpool->active++; |
1667 |
|
|
} |
1668 |
|
6088 |
return ret; |
1669 |
|
6343 |
} |
1670 |
|
|
|
1671 |
|
|
void * |
1672 |
|
|
realloc(void *ptr, size_t size) |
1673 |
|
|
{ |
1674 |
|
|
struct dir_info *d; |
1675 |
|
|
void *r; |
1676 |
|
12686 |
int saved_errno = errno; |
1677 |
|
|
|
1678 |
|
6343 |
d = getpool(); |
1679 |
✗✓ |
6343 |
if (d == NULL) { |
1680 |
|
|
_malloc_init(0); |
1681 |
|
|
d = getpool(); |
1682 |
|
|
} |
1683 |
✗✓ |
6343 |
_MALLOC_LOCK(d->mutex); |
1684 |
|
6343 |
d->func = "realloc"; |
1685 |
✗✓ |
6343 |
if (d->active++) { |
1686 |
|
|
malloc_recurse(d); |
1687 |
|
|
return NULL; |
1688 |
|
|
} |
1689 |
|
6343 |
r = orealloc(d, ptr, size, CALLER); |
1690 |
|
|
|
1691 |
|
6343 |
d->active--; |
1692 |
✗✓ |
6343 |
_MALLOC_UNLOCK(d->mutex); |
1693 |
✗✓ |
6343 |
if (r == NULL && mopts.malloc_xmalloc) |
1694 |
|
|
wrterror(d, "out of memory"); |
1695 |
✓✗ |
6343 |
if (r != NULL) |
1696 |
|
6343 |
errno = saved_errno; |
1697 |
|
6343 |
return r; |
1698 |
|
6343 |
} |
1699 |
|
|
/*DEF_STRONG(realloc);*/ |
1700 |
|
|
|
1701 |
|
|
|
1702 |
|
|
/* |
1703 |
|
|
* This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX |
1704 |
|
|
* if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW |
1705 |
|
|
*/ |
1706 |
|
|
#define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) |
1707 |
|
|
|
1708 |
|
|
void * |
1709 |
|
|
calloc(size_t nmemb, size_t size) |
1710 |
|
|
{ |
1711 |
|
|
struct dir_info *d; |
1712 |
|
|
void *r; |
1713 |
|
84 |
int saved_errno = errno; |
1714 |
|
|
|
1715 |
|
42 |
d = getpool(); |
1716 |
✗✓ |
42 |
if (d == NULL) { |
1717 |
|
|
_malloc_init(0); |
1718 |
|
|
d = getpool(); |
1719 |
|
|
} |
1720 |
✗✓ |
42 |
_MALLOC_LOCK(d->mutex); |
1721 |
|
42 |
d->func = "calloc"; |
1722 |
✗✓✗✗
|
84 |
if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && |
1723 |
|
42 |
nmemb > 0 && SIZE_MAX / nmemb < size) { |
1724 |
|
|
_MALLOC_UNLOCK(d->mutex); |
1725 |
|
|
if (mopts.malloc_xmalloc) |
1726 |
|
|
wrterror(d, "out of memory"); |
1727 |
|
|
errno = ENOMEM; |
1728 |
|
|
return NULL; |
1729 |
|
|
} |
1730 |
|
|
|
1731 |
✗✓ |
42 |
if (d->active++) { |
1732 |
|
|
malloc_recurse(d); |
1733 |
|
|
return NULL; |
1734 |
|
|
} |
1735 |
|
|
|
1736 |
|
42 |
size *= nmemb; |
1737 |
|
42 |
r = omalloc(d, size, 1, CALLER); |
1738 |
|
|
|
1739 |
|
42 |
d->active--; |
1740 |
✗✓ |
42 |
_MALLOC_UNLOCK(d->mutex); |
1741 |
✗✓ |
42 |
if (r == NULL && mopts.malloc_xmalloc) |
1742 |
|
|
wrterror(d, "out of memory"); |
1743 |
✓✗ |
42 |
if (r != NULL) |
1744 |
|
42 |
errno = saved_errno; |
1745 |
|
42 |
return r; |
1746 |
|
42 |
} |
1747 |
|
|
/*DEF_STRONG(calloc);*/ |
1748 |
|
|
|
1749 |
|
|
static void * |
1750 |
|
|
orecallocarray(struct dir_info *argpool, void *p, size_t oldsize, |
1751 |
|
|
size_t newsize, void *f) |
1752 |
|
|
{ |
1753 |
|
|
struct dir_info *pool; |
1754 |
|
|
struct region_info *r; |
1755 |
|
|
void *newptr; |
1756 |
|
|
size_t sz; |
1757 |
|
|
int i; |
1758 |
|
|
|
1759 |
|
|
pool = argpool; |
1760 |
|
|
|
1761 |
|
|
if (p == NULL) |
1762 |
|
|
return omalloc(pool, newsize, 1, f); |
1763 |
|
|
|
1764 |
|
|
if (oldsize == newsize) |
1765 |
|
|
return p; |
1766 |
|
|
|
1767 |
|
|
r = find(pool, p); |
1768 |
|
|
if (r == NULL) { |
1769 |
|
|
if (mopts.malloc_mt) { |
1770 |
|
|
for (i = 0; i < _MALLOC_MUTEXES; i++) { |
1771 |
|
|
if (i == argpool->mutex) |
1772 |
|
|
continue; |
1773 |
|
|
pool->active--; |
1774 |
|
|
_MALLOC_UNLOCK(pool->mutex); |
1775 |
|
|
pool = mopts.malloc_pool[i]; |
1776 |
|
|
_MALLOC_LOCK(pool->mutex); |
1777 |
|
|
pool->active++; |
1778 |
|
|
r = find(pool, p); |
1779 |
|
|
if (r != NULL) |
1780 |
|
|
break; |
1781 |
|
|
} |
1782 |
|
|
} |
1783 |
|
|
if (r == NULL) |
1784 |
|
|
wrterror(pool, "bogus pointer (double free?) %p", p); |
1785 |
|
|
} |
1786 |
|
|
|
1787 |
|
|
REALSIZE(sz, r); |
1788 |
|
|
if (sz <= MALLOC_MAXCHUNK) { |
1789 |
|
|
if (mopts.chunk_canaries && sz > 0) { |
1790 |
|
|
struct chunk_info *info = (struct chunk_info *)r->size; |
1791 |
|
|
uint32_t chunknum = find_chunknum(pool, r, p, 0); |
1792 |
|
|
|
1793 |
|
|
if (info->bits[info->offset + chunknum] != oldsize) |
1794 |
|
|
wrterror(pool, "recorded old size %hu != %zu", |
1795 |
|
|
info->bits[info->offset + chunknum], |
1796 |
|
|
oldsize); |
1797 |
|
|
} |
1798 |
|
|
} else if (oldsize != sz - mopts.malloc_guard) |
1799 |
|
|
wrterror(pool, "recorded old size %zu != %zu", |
1800 |
|
|
sz - mopts.malloc_guard, oldsize); |
1801 |
|
|
|
1802 |
|
|
newptr = omalloc(pool, newsize, 0, f); |
1803 |
|
|
if (newptr == NULL) |
1804 |
|
|
goto done; |
1805 |
|
|
|
1806 |
|
|
if (newsize > oldsize) { |
1807 |
|
|
memcpy(newptr, p, oldsize); |
1808 |
|
|
memset((char *)newptr + oldsize, 0, newsize - oldsize); |
1809 |
|
|
} else |
1810 |
|
|
memcpy(newptr, p, newsize); |
1811 |
|
|
|
1812 |
|
|
ofree(pool, p, 1, 0, 0); |
1813 |
|
|
|
1814 |
|
|
done: |
1815 |
|
|
if (argpool != pool) { |
1816 |
|
|
pool->active--; |
1817 |
|
|
_MALLOC_UNLOCK(pool->mutex); |
1818 |
|
|
_MALLOC_LOCK(argpool->mutex); |
1819 |
|
|
argpool->active++; |
1820 |
|
|
} |
1821 |
|
|
|
1822 |
|
|
return newptr; |
1823 |
|
|
} |
1824 |
|
|
|
1825 |
|
|
static void * |
1826 |
|
|
recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) |
1827 |
|
|
{ |
1828 |
|
|
size_t oldsize, newsize; |
1829 |
|
|
void *newptr; |
1830 |
|
|
|
1831 |
|
|
if (ptr == NULL) |
1832 |
|
|
return calloc(newnmemb, size); |
1833 |
|
|
|
1834 |
|
|
if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && |
1835 |
|
|
newnmemb > 0 && SIZE_MAX / newnmemb < size) { |
1836 |
|
|
errno = ENOMEM; |
1837 |
|
|
return NULL; |
1838 |
|
|
} |
1839 |
|
|
newsize = newnmemb * size; |
1840 |
|
|
|
1841 |
|
|
if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && |
1842 |
|
|
oldnmemb > 0 && SIZE_MAX / oldnmemb < size) { |
1843 |
|
|
errno = EINVAL; |
1844 |
|
|
return NULL; |
1845 |
|
|
} |
1846 |
|
|
oldsize = oldnmemb * size; |
1847 |
|
|
|
1848 |
|
|
/* |
1849 |
|
|
* Don't bother too much if we're shrinking just a bit, |
1850 |
|
|
* we do not shrink for series of small steps, oh well. |
1851 |
|
|
*/ |
1852 |
|
|
if (newsize <= oldsize) { |
1853 |
|
|
size_t d = oldsize - newsize; |
1854 |
|
|
|
1855 |
|
|
if (d < oldsize / 2 && d < MALLOC_PAGESIZE) { |
1856 |
|
|
memset((char *)ptr + newsize, 0, d); |
1857 |
|
|
return ptr; |
1858 |
|
|
} |
1859 |
|
|
} |
1860 |
|
|
|
1861 |
|
|
newptr = malloc(newsize); |
1862 |
|
|
if (newptr == NULL) |
1863 |
|
|
return NULL; |
1864 |
|
|
|
1865 |
|
|
if (newsize > oldsize) { |
1866 |
|
|
memcpy(newptr, ptr, oldsize); |
1867 |
|
|
memset((char *)newptr + oldsize, 0, newsize - oldsize); |
1868 |
|
|
} else |
1869 |
|
|
memcpy(newptr, ptr, newsize); |
1870 |
|
|
|
1871 |
|
|
explicit_bzero(ptr, oldsize); |
1872 |
|
|
free(ptr); |
1873 |
|
|
|
1874 |
|
|
return newptr; |
1875 |
|
|
} |
1876 |
|
|
|
1877 |
|
|
void * |
1878 |
|
|
recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size) |
1879 |
|
|
{ |
1880 |
|
|
struct dir_info *d; |
1881 |
|
|
size_t oldsize = 0, newsize; |
1882 |
|
|
void *r; |
1883 |
|
|
int saved_errno = errno; |
1884 |
|
|
|
1885 |
|
|
if (!mopts.internal_funcs) |
1886 |
|
|
return recallocarray_p(ptr, oldnmemb, newnmemb, size); |
1887 |
|
|
|
1888 |
|
|
d = getpool(); |
1889 |
|
|
if (d == NULL) { |
1890 |
|
|
_malloc_init(0); |
1891 |
|
|
d = getpool(); |
1892 |
|
|
} |
1893 |
|
|
|
1894 |
|
|
_MALLOC_LOCK(d->mutex); |
1895 |
|
|
d->func = "recallocarray"; |
1896 |
|
|
|
1897 |
|
|
if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && |
1898 |
|
|
newnmemb > 0 && SIZE_MAX / newnmemb < size) { |
1899 |
|
|
_MALLOC_UNLOCK(d->mutex); |
1900 |
|
|
if (mopts.malloc_xmalloc) |
1901 |
|
|
wrterror(d, "out of memory"); |
1902 |
|
|
errno = ENOMEM; |
1903 |
|
|
return NULL; |
1904 |
|
|
} |
1905 |
|
|
newsize = newnmemb * size; |
1906 |
|
|
|
1907 |
|
|
if (ptr != NULL) { |
1908 |
|
|
if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && |
1909 |
|
|
oldnmemb > 0 && SIZE_MAX / oldnmemb < size) { |
1910 |
|
|
_MALLOC_UNLOCK(d->mutex); |
1911 |
|
|
errno = EINVAL; |
1912 |
|
|
return NULL; |
1913 |
|
|
} |
1914 |
|
|
oldsize = oldnmemb * size; |
1915 |
|
|
} |
1916 |
|
|
|
1917 |
|
|
if (d->active++) { |
1918 |
|
|
malloc_recurse(d); |
1919 |
|
|
return NULL; |
1920 |
|
|
} |
1921 |
|
|
|
1922 |
|
|
r = orecallocarray(d, ptr, oldsize, newsize, CALLER); |
1923 |
|
|
|
1924 |
|
|
d->active--; |
1925 |
|
|
_MALLOC_UNLOCK(d->mutex); |
1926 |
|
|
if (r == NULL && mopts.malloc_xmalloc) |
1927 |
|
|
wrterror(d, "out of memory"); |
1928 |
|
|
if (r != NULL) |
1929 |
|
|
errno = saved_errno; |
1930 |
|
|
return r; |
1931 |
|
|
} |
1932 |
|
|
DEF_WEAK(recallocarray); |
1933 |
|
|
|
1934 |
|
|
|
1935 |
|
|
static void * |
1936 |
|
|
mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill) |
1937 |
|
|
{ |
1938 |
|
|
char *p, *q; |
1939 |
|
|
|
1940 |
|
|
if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0) |
1941 |
|
|
wrterror(d, "mapalign bad alignment"); |
1942 |
|
|
if (sz != PAGEROUND(sz)) |
1943 |
|
|
wrterror(d, "mapalign round"); |
1944 |
|
|
|
1945 |
|
|
/* Allocate sz + alignment bytes of memory, which must include a |
1946 |
|
|
* subrange of size bytes that is properly aligned. Unmap the |
1947 |
|
|
* other bytes, and then return that subrange. |
1948 |
|
|
*/ |
1949 |
|
|
|
1950 |
|
|
/* We need sz + alignment to fit into a size_t. */ |
1951 |
|
|
if (alignment > SIZE_MAX - sz) |
1952 |
|
|
return MAP_FAILED; |
1953 |
|
|
|
1954 |
|
|
p = map(d, NULL, sz + alignment, zero_fill); |
1955 |
|
|
if (p == MAP_FAILED) |
1956 |
|
|
return MAP_FAILED; |
1957 |
|
|
q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1)); |
1958 |
|
|
if (q != p) { |
1959 |
|
|
if (munmap(p, q - p)) |
1960 |
|
|
wrterror(d, "munmap %p", p); |
1961 |
|
|
} |
1962 |
|
|
if (munmap(q + sz, alignment - (q - p))) |
1963 |
|
|
wrterror(d, "munmap %p", q + sz); |
1964 |
|
|
STATS_SUB(d->malloc_used, alignment); |
1965 |
|
|
|
1966 |
|
|
return q; |
1967 |
|
|
} |
1968 |
|
|
|
1969 |
|
|
static void * |
1970 |
|
|
omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill, |
1971 |
|
|
void *f) |
1972 |
|
|
{ |
1973 |
|
|
size_t psz; |
1974 |
|
|
void *p; |
1975 |
|
|
|
1976 |
|
|
/* If between half a page and a page, avoid MALLOC_MOVE. */ |
1977 |
|
|
if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE) |
1978 |
|
|
sz = MALLOC_PAGESIZE; |
1979 |
|
|
if (alignment <= MALLOC_PAGESIZE) { |
1980 |
|
|
/* |
1981 |
|
|
* max(size, alignment) is enough to assure the requested |
1982 |
|
|
* alignment, since the allocator always allocates |
1983 |
|
|
* power-of-two blocks. |
1984 |
|
|
*/ |
1985 |
|
|
if (sz < alignment) |
1986 |
|
|
sz = alignment; |
1987 |
|
|
return omalloc(pool, sz, zero_fill, f); |
1988 |
|
|
} |
1989 |
|
|
|
1990 |
|
|
if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) { |
1991 |
|
|
errno = ENOMEM; |
1992 |
|
|
return NULL; |
1993 |
|
|
} |
1994 |
|
|
|
1995 |
|
|
sz += mopts.malloc_guard; |
1996 |
|
|
psz = PAGEROUND(sz); |
1997 |
|
|
|
1998 |
|
|
p = mapalign(pool, alignment, psz, zero_fill); |
1999 |
|
|
if (p == MAP_FAILED) { |
2000 |
|
|
errno = ENOMEM; |
2001 |
|
|
return NULL; |
2002 |
|
|
} |
2003 |
|
|
|
2004 |
|
|
if (insert(pool, p, sz, f)) { |
2005 |
|
|
unmap(pool, p, psz, 0); |
2006 |
|
|
errno = ENOMEM; |
2007 |
|
|
return NULL; |
2008 |
|
|
} |
2009 |
|
|
|
2010 |
|
|
if (mopts.malloc_guard) { |
2011 |
|
|
if (mprotect((char *)p + psz - mopts.malloc_guard, |
2012 |
|
|
mopts.malloc_guard, PROT_NONE)) |
2013 |
|
|
wrterror(pool, "mprotect"); |
2014 |
|
|
STATS_ADD(pool->malloc_guarded, mopts.malloc_guard); |
2015 |
|
|
} |
2016 |
|
|
|
2017 |
|
|
if (mopts.malloc_junk == 2) { |
2018 |
|
|
if (zero_fill) |
2019 |
|
|
memset((char *)p + sz - mopts.malloc_guard, |
2020 |
|
|
SOME_JUNK, psz - sz); |
2021 |
|
|
else |
2022 |
|
|
memset(p, SOME_JUNK, psz - mopts.malloc_guard); |
2023 |
|
|
} |
2024 |
|
|
else if (mopts.chunk_canaries) |
2025 |
|
|
fill_canary(p, sz - mopts.malloc_guard, |
2026 |
|
|
psz - mopts.malloc_guard); |
2027 |
|
|
|
2028 |
|
|
return p; |
2029 |
|
|
} |
2030 |
|
|
|
2031 |
|
|
int |
2032 |
|
|
posix_memalign(void **memptr, size_t alignment, size_t size) |
2033 |
|
|
{ |
2034 |
|
|
struct dir_info *d; |
2035 |
|
|
int res, saved_errno = errno; |
2036 |
|
|
void *r; |
2037 |
|
|
|
2038 |
|
|
/* Make sure that alignment is a large enough power of 2. */ |
2039 |
|
|
if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *)) |
2040 |
|
|
return EINVAL; |
2041 |
|
|
|
2042 |
|
|
d = getpool(); |
2043 |
|
|
if (d == NULL) { |
2044 |
|
|
_malloc_init(0); |
2045 |
|
|
d = getpool(); |
2046 |
|
|
} |
2047 |
|
|
_MALLOC_LOCK(d->mutex); |
2048 |
|
|
d->func = "posix_memalign"; |
2049 |
|
|
if (d->active++) { |
2050 |
|
|
malloc_recurse(d); |
2051 |
|
|
goto err; |
2052 |
|
|
} |
2053 |
|
|
r = omemalign(d, alignment, size, 0, CALLER); |
2054 |
|
|
d->active--; |
2055 |
|
|
_MALLOC_UNLOCK(d->mutex); |
2056 |
|
|
if (r == NULL) { |
2057 |
|
|
if (mopts.malloc_xmalloc) |
2058 |
|
|
wrterror(d, "out of memory"); |
2059 |
|
|
goto err; |
2060 |
|
|
} |
2061 |
|
|
errno = saved_errno; |
2062 |
|
|
*memptr = r; |
2063 |
|
|
return 0; |
2064 |
|
|
|
2065 |
|
|
err: |
2066 |
|
|
res = errno; |
2067 |
|
|
errno = saved_errno; |
2068 |
|
|
return res; |
2069 |
|
|
} |
2070 |
|
|
/*DEF_STRONG(posix_memalign);*/ |
2071 |
|
|
|
2072 |
|
|
#ifdef MALLOC_STATS |
2073 |
|
|
|
2074 |
|
|
struct malloc_leak { |
2075 |
|
|
void *f; |
2076 |
|
|
size_t total_size; |
2077 |
|
|
int count; |
2078 |
|
|
}; |
2079 |
|
|
|
2080 |
|
|
struct leaknode { |
2081 |
|
|
RBT_ENTRY(leaknode) entry; |
2082 |
|
|
struct malloc_leak d; |
2083 |
|
|
}; |
2084 |
|
|
|
2085 |
|
|
static inline int |
2086 |
|
|
leakcmp(const struct leaknode *e1, const struct leaknode *e2) |
2087 |
|
|
{ |
2088 |
|
|
return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f; |
2089 |
|
|
} |
2090 |
|
|
|
2091 |
|
|
static RBT_HEAD(leaktree, leaknode) leakhead; |
2092 |
|
|
RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp); |
2093 |
|
|
RBT_GENERATE(leaktree, leaknode, entry, leakcmp); |
2094 |
|
|
|
2095 |
|
|
static void |
2096 |
|
|
putleakinfo(void *f, size_t sz, int cnt) |
2097 |
|
|
{ |
2098 |
|
|
struct leaknode key, *p; |
2099 |
|
|
static struct leaknode *page; |
2100 |
|
|
static int used; |
2101 |
|
|
|
2102 |
|
|
if (cnt == 0 || page == MAP_FAILED) |
2103 |
|
|
return; |
2104 |
|
|
|
2105 |
|
|
key.d.f = f; |
2106 |
|
|
p = RBT_FIND(leaktree, &leakhead, &key); |
2107 |
|
|
if (p == NULL) { |
2108 |
|
|
if (page == NULL || |
2109 |
|
|
used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) { |
2110 |
|
|
page = MMAP(MALLOC_PAGESIZE); |
2111 |
|
|
if (page == MAP_FAILED) |
2112 |
|
|
return; |
2113 |
|
|
used = 0; |
2114 |
|
|
} |
2115 |
|
|
p = &page[used++]; |
2116 |
|
|
p->d.f = f; |
2117 |
|
|
p->d.total_size = sz * cnt; |
2118 |
|
|
p->d.count = cnt; |
2119 |
|
|
RBT_INSERT(leaktree, &leakhead, p); |
2120 |
|
|
} else { |
2121 |
|
|
p->d.total_size += sz * cnt; |
2122 |
|
|
p->d.count += cnt; |
2123 |
|
|
} |
2124 |
|
|
} |
2125 |
|
|
|
2126 |
|
|
static struct malloc_leak *malloc_leaks; |
2127 |
|
|
|
2128 |
|
|
static void |
2129 |
|
|
dump_leaks(int fd) |
2130 |
|
|
{ |
2131 |
|
|
struct leaknode *p; |
2132 |
|
|
int i = 0; |
2133 |
|
|
|
2134 |
|
|
dprintf(fd, "Leak report\n"); |
2135 |
|
|
dprintf(fd, " f sum # avg\n"); |
2136 |
|
|
/* XXX only one page of summary */ |
2137 |
|
|
if (malloc_leaks == NULL) |
2138 |
|
|
malloc_leaks = MMAP(MALLOC_PAGESIZE); |
2139 |
|
|
if (malloc_leaks != MAP_FAILED) |
2140 |
|
|
memset(malloc_leaks, 0, MALLOC_PAGESIZE); |
2141 |
|
|
RBT_FOREACH(p, leaktree, &leakhead) { |
2142 |
|
|
dprintf(fd, "%18p %7zu %6u %6zu\n", p->d.f, |
2143 |
|
|
p->d.total_size, p->d.count, p->d.total_size / p->d.count); |
2144 |
|
|
if (malloc_leaks == MAP_FAILED || |
2145 |
|
|
i >= MALLOC_PAGESIZE / sizeof(struct malloc_leak)) |
2146 |
|
|
continue; |
2147 |
|
|
malloc_leaks[i].f = p->d.f; |
2148 |
|
|
malloc_leaks[i].total_size = p->d.total_size; |
2149 |
|
|
malloc_leaks[i].count = p->d.count; |
2150 |
|
|
i++; |
2151 |
|
|
} |
2152 |
|
|
} |
2153 |
|
|
|
2154 |
|
|
static void |
2155 |
|
|
dump_chunk(int fd, struct chunk_info *p, void *f, int fromfreelist) |
2156 |
|
|
{ |
2157 |
|
|
while (p != NULL) { |
2158 |
|
|
dprintf(fd, "chunk %18p %18p %4d %d/%d\n", |
2159 |
|
|
p->page, ((p->bits[0] & 1) ? NULL : f), |
2160 |
|
|
p->size, p->free, p->total); |
2161 |
|
|
if (!fromfreelist) { |
2162 |
|
|
if (p->bits[0] & 1) |
2163 |
|
|
putleakinfo(NULL, p->size, p->total - p->free); |
2164 |
|
|
else { |
2165 |
|
|
putleakinfo(f, p->size, 1); |
2166 |
|
|
putleakinfo(NULL, p->size, |
2167 |
|
|
p->total - p->free - 1); |
2168 |
|
|
} |
2169 |
|
|
break; |
2170 |
|
|
} |
2171 |
|
|
p = LIST_NEXT(p, entries); |
2172 |
|
|
if (p != NULL) |
2173 |
|
|
dprintf(fd, " "); |
2174 |
|
|
} |
2175 |
|
|
} |
2176 |
|
|
|
2177 |
|
|
static void |
2178 |
|
|
dump_free_chunk_info(int fd, struct dir_info *d) |
2179 |
|
|
{ |
2180 |
|
|
int i, j, count; |
2181 |
|
|
struct chunk_info *p; |
2182 |
|
|
|
2183 |
|
|
dprintf(fd, "Free chunk structs:\n"); |
2184 |
|
|
for (i = 0; i <= MALLOC_MAXSHIFT; i++) { |
2185 |
|
|
count = 0; |
2186 |
|
|
LIST_FOREACH(p, &d->chunk_info_list[i], entries) |
2187 |
|
|
count++; |
2188 |
|
|
for (j = 0; j < MALLOC_CHUNK_LISTS; j++) { |
2189 |
|
|
p = LIST_FIRST(&d->chunk_dir[i][j]); |
2190 |
|
|
if (p == NULL && count == 0) |
2191 |
|
|
continue; |
2192 |
|
|
dprintf(fd, "%2d) %3d ", i, count); |
2193 |
|
|
if (p != NULL) |
2194 |
|
|
dump_chunk(fd, p, NULL, 1); |
2195 |
|
|
else |
2196 |
|
|
dprintf(fd, "\n"); |
2197 |
|
|
} |
2198 |
|
|
} |
2199 |
|
|
|
2200 |
|
|
} |
2201 |
|
|
|
2202 |
|
|
static void |
2203 |
|
|
dump_free_page_info(int fd, struct dir_info *d) |
2204 |
|
|
{ |
2205 |
|
|
int i; |
2206 |
|
|
|
2207 |
|
|
dprintf(fd, "Free pages cached: %zu\n", d->free_regions_size); |
2208 |
|
|
for (i = 0; i < mopts.malloc_cache; i++) { |
2209 |
|
|
if (d->free_regions[i].p != NULL) { |
2210 |
|
|
dprintf(fd, "%2d) ", i); |
2211 |
|
|
dprintf(fd, "free at %p: %zu\n", |
2212 |
|
|
d->free_regions[i].p, d->free_regions[i].size); |
2213 |
|
|
} |
2214 |
|
|
} |
2215 |
|
|
} |
2216 |
|
|
|
2217 |
|
|
static void |
2218 |
|
|
malloc_dump1(int fd, int poolno, struct dir_info *d) |
2219 |
|
|
{ |
2220 |
|
|
size_t i, realsize; |
2221 |
|
|
|
2222 |
|
|
dprintf(fd, "Malloc dir of %s pool %d at %p\n", __progname, poolno, d); |
2223 |
|
|
if (d == NULL) |
2224 |
|
|
return; |
2225 |
|
|
dprintf(fd, "Region slots free %zu/%zu\n", |
2226 |
|
|
d->regions_free, d->regions_total); |
2227 |
|
|
dprintf(fd, "Finds %zu/%zu\n", d->finds, |
2228 |
|
|
d->find_collisions); |
2229 |
|
|
dprintf(fd, "Inserts %zu/%zu\n", d->inserts, |
2230 |
|
|
d->insert_collisions); |
2231 |
|
|
dprintf(fd, "Deletes %zu/%zu\n", d->deletes, |
2232 |
|
|
d->delete_moves); |
2233 |
|
|
dprintf(fd, "Cheap reallocs %zu/%zu\n", |
2234 |
|
|
d->cheap_reallocs, d->cheap_realloc_tries); |
2235 |
|
|
dprintf(fd, "In use %zu\n", d->malloc_used); |
2236 |
|
|
dprintf(fd, "Guarded %zu\n", d->malloc_guarded); |
2237 |
|
|
dump_free_chunk_info(fd, d); |
2238 |
|
|
dump_free_page_info(fd, d); |
2239 |
|
|
dprintf(fd, |
2240 |
|
|
"slot) hash d type page f size [free/n]\n"); |
2241 |
|
|
for (i = 0; i < d->regions_total; i++) { |
2242 |
|
|
if (d->r[i].p != NULL) { |
2243 |
|
|
size_t h = hash(d->r[i].p) & |
2244 |
|
|
(d->regions_total - 1); |
2245 |
|
|
dprintf(fd, "%4zx) #%4zx %zd ", |
2246 |
|
|
i, h, h - i); |
2247 |
|
|
REALSIZE(realsize, &d->r[i]); |
2248 |
|
|
if (realsize > MALLOC_MAXCHUNK) { |
2249 |
|
|
putleakinfo(d->r[i].f, realsize, 1); |
2250 |
|
|
dprintf(fd, |
2251 |
|
|
"pages %18p %18p %zu\n", d->r[i].p, |
2252 |
|
|
d->r[i].f, realsize); |
2253 |
|
|
} else |
2254 |
|
|
dump_chunk(fd, |
2255 |
|
|
(struct chunk_info *)d->r[i].size, |
2256 |
|
|
d->r[i].f, 0); |
2257 |
|
|
} |
2258 |
|
|
} |
2259 |
|
|
dump_leaks(fd); |
2260 |
|
|
dprintf(fd, "\n"); |
2261 |
|
|
} |
2262 |
|
|
|
2263 |
|
|
void |
2264 |
|
|
malloc_dump(int fd, int poolno, struct dir_info *pool) |
2265 |
|
|
{ |
2266 |
|
|
int i; |
2267 |
|
|
void *p; |
2268 |
|
|
struct region_info *r; |
2269 |
|
|
int saved_errno = errno; |
2270 |
|
|
|
2271 |
|
|
if (pool == NULL) |
2272 |
|
|
return; |
2273 |
|
|
for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) { |
2274 |
|
|
p = pool->delayed_chunks[i]; |
2275 |
|
|
if (p == NULL) |
2276 |
|
|
continue; |
2277 |
|
|
r = find(pool, p); |
2278 |
|
|
if (r == NULL) |
2279 |
|
|
wrterror(pool, "bogus pointer in malloc_dump %p", p); |
2280 |
|
|
free_bytes(pool, r, p); |
2281 |
|
|
pool->delayed_chunks[i] = NULL; |
2282 |
|
|
} |
2283 |
|
|
/* XXX leak when run multiple times */ |
2284 |
|
|
RBT_INIT(leaktree, &leakhead); |
2285 |
|
|
malloc_dump1(fd, poolno, pool); |
2286 |
|
|
errno = saved_errno; |
2287 |
|
|
} |
2288 |
|
|
DEF_WEAK(malloc_dump); |
2289 |
|
|
|
2290 |
|
|
void |
2291 |
|
|
malloc_gdump(int fd) |
2292 |
|
|
{ |
2293 |
|
|
int i; |
2294 |
|
|
int saved_errno = errno; |
2295 |
|
|
|
2296 |
|
|
for (i = 0; i < _MALLOC_MUTEXES; i++) |
2297 |
|
|
malloc_dump(fd, i, mopts.malloc_pool[i]); |
2298 |
|
|
|
2299 |
|
|
errno = saved_errno; |
2300 |
|
|
} |
2301 |
|
|
DEF_WEAK(malloc_gdump); |
2302 |
|
|
|
2303 |
|
|
static void |
2304 |
|
|
malloc_exit(void) |
2305 |
|
|
{ |
2306 |
|
|
int save_errno = errno, fd, i; |
2307 |
|
|
|
2308 |
|
|
fd = open("malloc.out", O_RDWR|O_APPEND); |
2309 |
|
|
if (fd != -1) { |
2310 |
|
|
dprintf(fd, "******** Start dump %s *******\n", __progname); |
2311 |
|
|
dprintf(fd, |
2312 |
|
|
"MT=%d I=%d F=%d U=%d J=%d R=%d X=%d C=%d cache=%u G=%zu\n", |
2313 |
|
|
mopts.malloc_mt, mopts.internal_funcs, |
2314 |
|
|
mopts.malloc_freecheck, |
2315 |
|
|
mopts.malloc_freeunmap, mopts.malloc_junk, |
2316 |
|
|
mopts.malloc_realloc, mopts.malloc_xmalloc, |
2317 |
|
|
mopts.chunk_canaries, mopts.malloc_cache, |
2318 |
|
|
mopts.malloc_guard); |
2319 |
|
|
|
2320 |
|
|
for (i = 0; i < _MALLOC_MUTEXES; i++) |
2321 |
|
|
malloc_dump(fd, i, mopts.malloc_pool[i]); |
2322 |
|
|
dprintf(fd, "******** End dump %s *******\n", __progname); |
2323 |
|
|
close(fd); |
2324 |
|
|
} else |
2325 |
|
|
dprintf(STDERR_FILENO, |
2326 |
|
|
"malloc() warning: Couldn't dump stats\n"); |
2327 |
|
|
errno = save_errno; |
2328 |
|
|
} |
2329 |
|
|
|
2330 |
|
|
#endif /* MALLOC_STATS */ |