1/*  $OpenBSD: malloc.c,v 1.300 2025/10/23 18:49:46 miod Exp $   */
2/*
3 * Copyright (c) 2008, 2010, 2011, 2016, 2023 Otto Moerbeek <[email protected]>
4 * Copyright (c) 2012 Matthew Dempsky <[email protected]>
5 * Copyright (c) 2008 Damien Miller <[email protected]>
6 * Copyright (c) 2000 Poul-Henning Kamp <[email protected]>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * If we meet some day, and you think this stuff is worth it, you
23 * can buy me a beer in return. Poul-Henning Kamp
24 */
25
26#ifndef MALLOC_SMALL
27#define MALLOC_STATS
28#endif
29
30#include <sys/types.h>
31#include <sys/queue.h>
32#include <sys/mman.h>
33#include <sys/sysctl.h>
34#include <errno.h>
35#include <stdarg.h>
36#include <stdint.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <unistd.h>
41
42#ifdef MALLOC_STATS
43#include <sys/tree.h>
44#include <sys/ktrace.h>
45#include <dlfcn.h>
46#endif
47
48#include "thread_private.h"
49#include <tib.h>
50
51#define MALLOC_PAGESHIFT    _MAX_PAGE_SHIFT
52
53#define MALLOC_MINSHIFT     4
54#define MALLOC_MAXSHIFT     (MALLOC_PAGESHIFT - 1)
55#define MALLOC_PAGESIZE     (1UL << MALLOC_PAGESHIFT)
56#define MALLOC_MINSIZE      (1UL << MALLOC_MINSHIFT)
57#define MALLOC_PAGEMASK     (MALLOC_PAGESIZE - 1)
58#define MASK_POINTER(p)     ((void *)(((uintptr_t)(p)) & ~MALLOC_PAGEMASK))
59
60#define MALLOC_MAXCHUNK     (1 << MALLOC_MAXSHIFT)
61#define MALLOC_MAXCACHE     256
62#define MALLOC_DELAYED_CHUNK_MASK   15
63#ifdef MALLOC_STATS
64#define MALLOC_INITIAL_REGIONS  512
65#else
66#define MALLOC_INITIAL_REGIONS  (MALLOC_PAGESIZE / sizeof(struct region_info))
67#endif
68#define MALLOC_DEFAULT_CACHE    64
69#define MALLOC_CHUNK_LISTS  4
70#define CHUNK_CHECK_LENGTH  32
71
72#define B2SIZE(b)       ((b) * MALLOC_MINSIZE)
73#define B2ALLOC(b)      ((b) == 0 ? MALLOC_MINSIZE : \
74                    (b) * MALLOC_MINSIZE)
75#define BUCKETS         (MALLOC_MAXCHUNK / MALLOC_MINSIZE)
76
77/*
78 * We move allocations between half a page and a whole page towards the end,
79 * subject to alignment constraints. This is the extra headroom we allow.
80 * Set to zero to be the most strict.
81 */
82#define MALLOC_LEEWAY       0
83#define MALLOC_MOVE_COND(sz)    ((sz) - mopts.malloc_guard <        \
84                    MALLOC_PAGESIZE - MALLOC_LEEWAY)
85#define MALLOC_MOVE(p, sz)      (((char *)(p)) +            \
86                    ((MALLOC_PAGESIZE - MALLOC_LEEWAY - \
87                        ((sz) - mopts.malloc_guard)) &  \
88                    ~(MALLOC_MINSIZE - 1)))
89
90#define PAGEROUND(x)  (((x) + (MALLOC_PAGEMASK)) & ~MALLOC_PAGEMASK)
91
92/*
93 * What to use for Junk.  This is the byte value we use to fill with
94 * when the 'J' option is enabled. Use SOME_JUNK right after alloc,
95 * and SOME_FREEJUNK right before free.
96 */
97#define SOME_JUNK       0xdb    /* deadbeef */
98#define SOME_FREEJUNK       0xdf    /* dead, free */
99#define SOME_FREEJUNK_ULL   0xdfdfdfdfdfdfdfdfULL
100
101#define MMAP(sz,f)  mmap(NULL, (sz), PROT_READ | PROT_WRITE, \
102    MAP_ANON | MAP_PRIVATE | (f), -1, 0)
103
104#define MMAPNONE(sz,f)  mmap(NULL, (sz), PROT_NONE, \
105    MAP_ANON | MAP_PRIVATE | (f), -1, 0)
106
107#define MMAPA(a,sz,f)   mmap((a), (sz), PROT_READ | PROT_WRITE, \
108    MAP_ANON | MAP_PRIVATE | (f), -1, 0)
109
110struct region_info {
111    void *p;        /* page; low bits used to mark chunks */
112    uintptr_t size;     /* size for pages, or chunk_info pointer */
113#ifdef MALLOC_STATS
114    void **f;       /* where allocated from */
115#endif
116};
117
118LIST_HEAD(chunk_head, chunk_info);
119
120/*
121 * Two caches, one for "small" regions, one for "big".
122 * Small cache is an array per size, big cache is one array with different
123 * sized regions
124 */
125#define MAX_SMALLCACHEABLE_SIZE 32
126#define MAX_BIGCACHEABLE_SIZE   512
127/* If the total # of pages is larger than this, evict before inserting */
128#define BIGCACHE_FILL(sz)   (MAX_BIGCACHEABLE_SIZE * (sz) / 4)
129
130struct smallcache {
131    void **pages;
132    ushort length;
133    ushort max;
134};
135
136struct bigcache {
137    void *page;
138    size_t psize;
139};
140
141#ifdef MALLOC_STATS
142#define NUM_FRAMES      4
143struct btnode {
144    RBT_ENTRY(btnode) entry;
145    void *caller[NUM_FRAMES];
146};
147RBT_HEAD(btshead, btnode);
148RBT_PROTOTYPE(btshead, btnode, entry, btcmp);
149#endif /* MALLOC_STATS */
150
151struct dir_info {
152    u_int32_t canary1;
153    int active;         /* status of malloc */
154    struct region_info *r;      /* region slots */
155    size_t regions_total;       /* number of region slots */
156    size_t regions_free;        /* number of free slots */
157    size_t rbytesused;      /* random bytes used */
158    const char *func;       /* current function */
159    int malloc_junk;        /* junk fill? */
160    int mmap_flag;          /* extra flag for mmap */
161    int mutex;
162    int malloc_mt;          /* multi-threaded mode? */
163                    /* lists of free chunk info structs */
164    struct chunk_head chunk_info_list[BUCKETS + 1];
165                    /* lists of chunks with free slots */
166    struct chunk_head chunk_dir[BUCKETS + 1][MALLOC_CHUNK_LISTS];
167                    /* delayed free chunk slots */
168    void *delayed_chunks[MALLOC_DELAYED_CHUNK_MASK + 1];
169    u_char rbytes[32];      /* random bytes */
170                    /* free pages cache */
171    struct smallcache smallcache[MAX_SMALLCACHEABLE_SIZE];
172    size_t bigcache_used;
173    size_t bigcache_size;
174    struct bigcache *bigcache;
175    void *chunk_pages;
176    size_t chunk_pages_used;
177#ifdef MALLOC_STATS
178    void *caller;
179    size_t inserts;
180    size_t insert_collisions;
181    size_t deletes;
182    size_t delete_moves;
183    size_t cheap_realloc_tries;
184    size_t cheap_reallocs;
185    size_t malloc_used;     /* bytes allocated */
186    size_t malloc_guarded;      /* bytes used for guards */
187    struct btshead btraces;     /* backtraces seen */
188    struct btnode *btnodes;     /* store of backtrace nodes */
189    size_t btnodesused;
190#define STATS_ADD(x,y)  ((x) += (y))
191#define STATS_SUB(x,y)  ((x) -= (y))
192#define STATS_INC(x)    ((x)++)
193#define STATS_ZERO(x)   ((x) = 0)
194#define STATS_SETF(x,y) ((x)->f = (y))
195#define STATS_SETFN(x,k,y)  ((x)->f[k] = (y))
196#define SET_CALLER(x,y) if (DO_STATS) ((x)->caller = (y))
197#else
198#define STATS_ADD(x,y)  /* nothing */
199#define STATS_SUB(x,y)  /* nothing */
200#define STATS_INC(x)    /* nothing */
201#define STATS_ZERO(x)   /* nothing */
202#define STATS_SETF(x,y) /* nothing */
203#define STATS_SETFN(x,k,y)  /* nothing */
204#define SET_CALLER(x,y) /* nothing */
205#endif /* MALLOC_STATS */
206    u_int32_t canary2;
207};
208
209static void unmap(struct dir_info *d, void *p, size_t sz, size_t clear);
210
211/*
212 * This structure describes a page worth of chunks.
213 *
214 * How many bits per u_short in the bitmap
215 */
216#define MALLOC_BITS     (NBBY * sizeof(u_short))
217struct chunk_info {
218    LIST_ENTRY(chunk_info) entries;
219    void *page;         /* pointer to the page */
220    /* number of shorts should add up to 8, check alloc_chunk_info() */
221    u_short canary;
222    u_short bucket;
223    u_short free;           /* how many free chunks */
224    u_short total;          /* how many chunks */
225    u_short offset;         /* requested size table offset */
226#define CHUNK_INFO_TAIL         3
227    u_short bits[CHUNK_INFO_TAIL];  /* which chunks are free */
228};
229
230#define CHUNK_FREE(i, n) ((i)->bits[(n) / MALLOC_BITS] & \
231    (1U << ((n) % MALLOC_BITS)))
232
233struct malloc_readonly {
234                    /* Main bookkeeping information */
235    struct dir_info *malloc_pool[_MALLOC_MUTEXES];
236    u_int   malloc_mutexes;     /* how much in actual use? */
237    int malloc_freecheck;   /* Extensive double free check */
238    int malloc_freeunmap;   /* mprotect free pages PROT_NONE? */
239    int def_malloc_junk;    /* junk fill? */
240    int malloc_realloc;     /* always realloc? */
241    int malloc_xmalloc;     /* xmalloc behaviour? */
242    u_int   chunk_canaries;     /* use canaries after chunks? */
243    int internal_funcs;     /* use better recallocarray/freezero? */
244    u_int   def_maxcache;       /* free pages we cache */
245    u_int   junk_loc;       /* variation in location of junk */
246    size_t  malloc_guard;       /* use guard pages after allocations? */
247#ifdef MALLOC_STATS
248    int malloc_stats;       /* save callers, dump leak report */
249    int malloc_verbose;     /* dump verbose statistics at end */
250#define DO_STATS    mopts.malloc_stats
251#else
252#define DO_STATS    0
253#endif
254    u_int32_t malloc_canary;    /* Matched against ones in pool */
255};
256
257
258/* This object is mapped PROT_READ after initialisation to prevent tampering */
259static union {
260    struct malloc_readonly mopts;
261    u_char _pad[MALLOC_PAGESIZE];
262} malloc_readonly __attribute__((aligned(MALLOC_PAGESIZE)))
263        __attribute__((section(".openbsd.mutable")));
264#define mopts   malloc_readonly.mopts
265
266/* compile-time options */
267const char *const malloc_options __attribute__((weak));
268
269static __dead void wrterror(struct dir_info *d, char *msg, ...)
270    __attribute__((__format__ (printf, 2, 3)));
271
272#ifdef MALLOC_STATS
273void malloc_dump(void);
274PROTO_NORMAL(malloc_dump);
275static void malloc_exit(void);
276static void print_chunk_details(struct dir_info *, void *, size_t, size_t);
277static void* store_caller(struct dir_info *, struct btnode *);
278
279/* below are the arches for which deeper caller info has been tested */
280#if defined(__aarch64__) || \
281    defined(__amd64__) || \
282    defined(__arm__) || \
283    defined(__i386__) || \
284    defined(__powerpc__)
285__attribute__((always_inline))
286static inline void*
287caller(struct dir_info *d)
288{
289    struct btnode p;
290    int level = DO_STATS;
291
292    if (level == 0)
293        return NULL;
294
295    memset(&p.caller, 0, sizeof(p.caller));
296    if (level >= 1)
297        p.caller[0] = __builtin_extract_return_addr(
298            __builtin_return_address(0));
299    if (p.caller[0] != NULL && level >= 2)
300        p.caller[1] = __builtin_extract_return_addr(
301            __builtin_return_address(1));
302    if (p.caller[1] != NULL && level >= 3)
303        p.caller[2] = __builtin_extract_return_addr(
304            __builtin_return_address(2));
305    if (p.caller[2] != NULL && level >= 4)
306        p.caller[3] = __builtin_extract_return_addr(
307            __builtin_return_address(3));
308    return store_caller(d, &p);
309}
310#else
311__attribute__((always_inline))
312static inline void* caller(struct dir_info *d)
313{
314    struct btnode p;
315
316    if (DO_STATS == 0)
317        return NULL;
318    memset(&p.caller, 0, sizeof(p.caller));
319    p.caller[0] = __builtin_extract_return_addr(__builtin_return_address(0));
320    return store_caller(d, &p);
321}
322#endif
323#endif /* MALLOC_STATS */
324
325/* low bits of r->p determine size: 0 means >= page size and r->size holding
326 * real size, otherwise low bits is the bucket + 1
327 */
328#define REALSIZE(sz, r)                     \
329    (sz) = (uintptr_t)(r)->p & MALLOC_PAGEMASK,     \
330    (sz) = ((sz) == 0 ? (r)->size : B2SIZE((sz) - 1))
331
332static inline size_t
333hash(void *p)
334{
335    size_t sum;
336    uintptr_t u;
337
338    u = (uintptr_t)p >> MALLOC_PAGESHIFT;
339    sum = u;
340    sum = (sum << 7) - sum + (u >> 16);
341#ifdef __LP64__
342    sum = (sum << 7) - sum + (u >> 32);
343    sum = (sum << 7) - sum + (u >> 48);
344#endif
345    return sum;
346}
347
348static inline struct dir_info *
349getpool(void)
350{
351    if (mopts.malloc_pool[1] == NULL || !mopts.malloc_pool[1]->malloc_mt)
352        return mopts.malloc_pool[1];
353    else    /* first one reserved for special pool */
354        return mopts.malloc_pool[1 + TIB_GET()->tib_tid %
355            (mopts.malloc_mutexes - 1)];
356}
357
358static __dead void
359wrterror(struct dir_info *d, char *msg, ...)
360{
361    int     saved_errno = errno;
362    va_list     ap;
363
364    dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
365        getpid(), (d != NULL && d->func) ? d->func : "unknown");
366    va_start(ap, msg);
367    vdprintf(STDERR_FILENO, msg, ap);
368    va_end(ap);
369    dprintf(STDERR_FILENO, "\n");
370
371#ifdef MALLOC_STATS
372    if (DO_STATS && mopts.malloc_verbose)
373        malloc_dump();
374#endif
375
376    errno = saved_errno;
377
378    abort();
379}
380
381static void
382rbytes_init(struct dir_info *d)
383{
384    arc4random_buf(d->rbytes, sizeof(d->rbytes));
385    /* add 1 to account for using d->rbytes[0] */
386    d->rbytesused = 1 + d->rbytes[0] % (sizeof(d->rbytes) / 2);
387}
388
389static inline u_char
390getrbyte(struct dir_info *d)
391{
392    u_char x;
393
394    if (d->rbytesused >= sizeof(d->rbytes))
395        rbytes_init(d);
396    x = d->rbytes[d->rbytesused++];
397    return x;
398}
399
400static void
401omalloc_parseopt(char opt)
402{
403    switch (opt) {
404    case '+':
405        mopts.malloc_mutexes <<= 1;
406        if (mopts.malloc_mutexes > _MALLOC_MUTEXES)
407            mopts.malloc_mutexes = _MALLOC_MUTEXES;
408        break;
409    case '-':
410        mopts.malloc_mutexes >>= 1;
411        if (mopts.malloc_mutexes < 2)
412            mopts.malloc_mutexes = 2;
413        break;
414    case '>':
415        mopts.def_maxcache <<= 1;
416        if (mopts.def_maxcache > MALLOC_MAXCACHE)
417            mopts.def_maxcache = MALLOC_MAXCACHE;
418        break;
419    case '<':
420        mopts.def_maxcache >>= 1;
421        break;
422    case 'c':
423        mopts.chunk_canaries = 0;
424        break;
425    case 'C':
426        mopts.chunk_canaries = 1;
427        break;
428#ifdef MALLOC_STATS
429    case 'd':
430        mopts.malloc_stats = 0;
431        break;
432    case 'D':
433    case '1':
434        mopts.malloc_stats = 1;
435        break;
436    case '2':
437        mopts.malloc_stats = 2;
438        break;
439    case '3':
440        mopts.malloc_stats = 3;
441        break;
442    case '4':
443        mopts.malloc_stats = 4;
444        break;
445#endif /* MALLOC_STATS */
446    case 'f':
447        mopts.malloc_freecheck = 0;
448        mopts.malloc_freeunmap = 0;
449        break;
450    case 'F':
451        mopts.malloc_freecheck = 1;
452        mopts.malloc_freeunmap = 1;
453        break;
454    case 'g':
455        mopts.malloc_guard = 0;
456        break;
457    case 'G':
458        mopts.malloc_guard = MALLOC_PAGESIZE;
459        break;
460    case 'j':
461        if (mopts.def_malloc_junk > 0)
462            mopts.def_malloc_junk--;
463        break;
464    case 'J':
465        if (mopts.def_malloc_junk < 2)
466            mopts.def_malloc_junk++;
467        break;
468    case 'r':
469        mopts.malloc_realloc = 0;
470        break;
471    case 'R':
472        mopts.malloc_realloc = 1;
473        break;
474    case 'u':
475        mopts.malloc_freeunmap = 0;
476        break;
477    case 'U':
478        mopts.malloc_freeunmap = 1;
479        break;
480#ifdef MALLOC_STATS
481    case 'v':
482        mopts.malloc_verbose = 0;
483        break;
484    case 'V':
485        mopts.malloc_verbose = 1;
486        break;
487#endif /* MALLOC_STATS */
488    case 'x':
489        mopts.malloc_xmalloc = 0;
490        break;
491    case 'X':
492        mopts.malloc_xmalloc = 1;
493        break;
494    default:
495        dprintf(STDERR_FILENO, "malloc() warning: "
496                    "unknown char in MALLOC_OPTIONS\n");
497        break;
498    }
499}
500
501static void
502omalloc_init(void)
503{
504    const char *p;
505    char *q, b[16];
506    int i, j;
507    const int mib[2] = { CTL_VM, VM_MALLOC_CONF };
508    size_t sb;
509
510    /*
511     * Default options
512     */
513    mopts.malloc_mutexes = 8;
514    mopts.def_malloc_junk = 1;
515    mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
516
517    for (i = 0; i < 3; i++) {
518        switch (i) {
519        case 0:
520            sb = sizeof(b);
521            j = sysctl(mib, 2, b, &sb, NULL, 0);
522            if (j != 0)
523                continue;
524            p = b;
525            break;
526        case 1:
527            if (issetugid() == 0)
528                p = getenv("MALLOC_OPTIONS");
529            else
530                continue;
531            break;
532        case 2:
533            p = malloc_options;
534            break;
535        default:
536            p = NULL;
537        }
538
539        for (; p != NULL && *p != '\0'; p++) {
540            switch (*p) {
541            case 'S':
542                for (q = "CFGJ"; *q != '\0'; q++)
543                    omalloc_parseopt(*q);
544                mopts.def_maxcache = 0;
545                break;
546            case 's':
547                for (q = "cfgj"; *q != '\0'; q++)
548                    omalloc_parseopt(*q);
549                mopts.def_maxcache = MALLOC_DEFAULT_CACHE;
550                break;
551            default:
552                omalloc_parseopt(*p);
553                break;
554            }
555        }
556    }
557
558#ifdef MALLOC_STATS
559    if (DO_STATS && (atexit(malloc_exit) == -1)) {
560        dprintf(STDERR_FILENO, "malloc() warning: atexit(3) failed."
561            " Will not be able to dump stats on exit\n");
562    }
563#endif
564
565    while ((mopts.malloc_canary = arc4random()) == 0)
566        ;
567    mopts.junk_loc = arc4random();
568    if (mopts.chunk_canaries)
569        do {
570            mopts.chunk_canaries = arc4random();
571        } while ((u_char)mopts.chunk_canaries == 0 ||
572            (u_char)mopts.chunk_canaries == SOME_FREEJUNK);
573}
574
575static void
576omalloc_poolinit(struct dir_info *d, int mmap_flag)
577{
578    u_int i, j;
579
580    d->r = NULL;
581    d->rbytesused = sizeof(d->rbytes);
582    d->regions_free = d->regions_total = 0;
583    for (i = 0; i <= BUCKETS; i++) {
584        LIST_INIT(&d->chunk_info_list[i]);
585        for (j = 0; j < MALLOC_CHUNK_LISTS; j++)
586            LIST_INIT(&d->chunk_dir[i][j]);
587    }
588    d->mmap_flag = mmap_flag;
589    d->malloc_junk = mopts.def_malloc_junk;
590#ifdef MALLOC_STATS
591    RBT_INIT(btshead, &d->btraces);
592#endif
593    d->canary1 = mopts.malloc_canary ^ (u_int32_t)(uintptr_t)d;
594    d->canary2 = ~d->canary1;
595}
596
597static int
598omalloc_grow(struct dir_info *d)
599{
600    size_t newtotal;
601    size_t newsize;
602    size_t mask;
603    size_t i, oldpsz;
604    struct region_info *p;
605
606    if (d->regions_total > SIZE_MAX / sizeof(struct region_info) / 2)
607        return 1;
608
609    newtotal = d->regions_total == 0 ? MALLOC_INITIAL_REGIONS :
610        d->regions_total * 2;
611    newsize = PAGEROUND(newtotal * sizeof(struct region_info));
612    mask = newtotal - 1;
613
614    /* Don't use cache here, we don't want user uaf touch this */
615    p = MMAP(newsize, d->mmap_flag);
616    if (p == MAP_FAILED)
617        return 1;
618
619    STATS_ADD(d->malloc_used, newsize);
620    STATS_ZERO(d->inserts);
621    STATS_ZERO(d->insert_collisions);
622    for (i = 0; i < d->regions_total; i++) {
623        void *q = d->r[i].p;
624        if (q != NULL) {
625            size_t index = hash(q) & mask;
626            STATS_INC(d->inserts);
627            while (p[index].p != NULL) {
628                index = (index - 1) & mask;
629                STATS_INC(d->insert_collisions);
630            }
631            p[index] = d->r[i];
632        }
633    }
634
635    if (d->regions_total > 0) {
636        oldpsz = PAGEROUND(d->regions_total *
637            sizeof(struct region_info));
638        /* clear to avoid meta info ending up in the cache */
639        unmap(d, d->r, oldpsz, oldpsz);
640    }
641    d->regions_free += newtotal - d->regions_total;
642    d->regions_total = newtotal;
643    d->r = p;
644    return 0;
645}
646
647/*
648 * The hashtable uses the assumption that p is never NULL. This holds since
649 * non-MAP_FIXED mappings with hint 0 start at BRKSIZ.
650 */
651static int
652insert(struct dir_info *d, void *p, size_t sz, void *f)
653{
654    size_t index;
655    size_t mask;
656    void *q;
657
658    if (d->regions_free * 4 < d->regions_total || d->regions_total == 0) {
659        if (omalloc_grow(d))
660            return 1;
661    }
662    mask = d->regions_total - 1;
663    index = hash(p) & mask;
664    q = d->r[index].p;
665    STATS_INC(d->inserts);
666    while (q != NULL) {
667        index = (index - 1) & mask;
668        q = d->r[index].p;
669        STATS_INC(d->insert_collisions);
670    }
671    d->r[index].p = p;
672    d->r[index].size = sz;
673    STATS_SETF(&d->r[index], f);
674    d->regions_free--;
675    return 0;
676}
677
678static struct region_info *
679find(struct dir_info *d, void *p)
680{
681    size_t index;
682    size_t mask = d->regions_total - 1;
683    void *q, *r;
684
685    if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
686        d->canary1 != ~d->canary2)
687        wrterror(d, "internal struct corrupt");
688    if (d->r == NULL)
689        return NULL;
690    p = MASK_POINTER(p);
691    index = hash(p) & mask;
692    r = d->r[index].p;
693    q = MASK_POINTER(r);
694    while (q != p && r != NULL) {
695        index = (index - 1) & mask;
696        r = d->r[index].p;
697        q = MASK_POINTER(r);
698    }
699    return (q == p && r != NULL) ? &d->r[index] : NULL;
700}
701
702static void
703delete(struct dir_info *d, struct region_info *ri)
704{
705    /* algorithm R, Knuth Vol III section 6.4 */
706    size_t mask = d->regions_total - 1;
707    size_t i, j, r;
708
709    if (d->regions_total & (d->regions_total - 1))
710        wrterror(d, "regions_total not 2^x");
711    d->regions_free++;
712    STATS_INC(d->deletes);
713
714    i = ri - d->r;
715    for (;;) {
716        d->r[i].p = NULL;
717        d->r[i].size = 0;
718        j = i;
719        for (;;) {
720            i = (i - 1) & mask;
721            if (d->r[i].p == NULL)
722                return;
723            r = hash(d->r[i].p) & mask;
724            if ((i <= r && r < j) || (r < j && j < i) ||
725                (j < i && i <= r))
726                continue;
727            d->r[j] = d->r[i];
728            STATS_INC(d->delete_moves);
729            break;
730        }
731
732    }
733}
734
735static inline void
736junk_free(int junk, void *p, size_t sz)
737{
738    size_t i, step = 1;
739    uint64_t *lp = p;
740
741    if (junk == 0 || sz == 0)
742        return;
743    sz /= sizeof(uint64_t);
744    if (junk == 1) {
745        if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
746            sz = MALLOC_PAGESIZE / sizeof(uint64_t);
747        step = sz / 4;
748        if (step == 0)
749            step = 1;
750    }
751    /* Do not always put the free junk bytes in the same spot.
752       There is modulo bias here, but we ignore that. */
753    for (i = mopts.junk_loc % step; i < sz; i += step)
754        lp[i] = SOME_FREEJUNK_ULL;
755}
756
757static inline void
758validate_junk(struct dir_info *pool, void *p, size_t argsz)
759{
760    size_t i, sz, step = 1;
761    uint64_t *lp = p;
762
763    if (pool->malloc_junk == 0 || argsz == 0)
764        return;
765    sz = argsz / sizeof(uint64_t);
766    if (pool->malloc_junk == 1) {
767        if (sz > MALLOC_PAGESIZE / sizeof(uint64_t))
768            sz = MALLOC_PAGESIZE / sizeof(uint64_t);
769        step = sz / 4;
770        if (step == 0)
771            step = 1;
772    }
773    /* see junk_free */
774    for (i = mopts.junk_loc % step; i < sz; i += step) {
775        if (lp[i] != SOME_FREEJUNK_ULL) {
776#ifdef MALLOC_STATS
777            if (DO_STATS && argsz <= MALLOC_MAXCHUNK)
778                print_chunk_details(pool, lp, argsz, i);
779            else
780#endif
781                wrterror(pool,
782                    "write to free mem %p[%zu..%zu]@%zu",
783                    lp, i * sizeof(uint64_t),
784                    (i + 1) * sizeof(uint64_t) - 1, argsz);
785        }
786    }
787}
788
789
790/*
791 * Cache maintenance.
792 * Opposed to the regular region data structure, the sizes in the
793 * cache are in MALLOC_PAGESIZE units.
794 */
795static void
796unmap(struct dir_info *d, void *p, size_t sz, size_t clear)
797{
798    size_t psz = sz >> MALLOC_PAGESHIFT;
799    void *r;
800    u_short i;
801    struct smallcache *cache;
802
803    if (sz != PAGEROUND(sz) || psz == 0)
804        wrterror(d, "munmap round");
805
806    if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
807        psz <= MAX_BIGCACHEABLE_SIZE) {
808        u_short base = getrbyte(d);
809        u_short j;
810
811        /* don't look through all slots */
812        for (j = 0; j < d->bigcache_size / 4; j++) {
813            i = (base + j) & (d->bigcache_size - 1);
814            if (d->bigcache_used <
815                BIGCACHE_FILL(d->bigcache_size))  {
816                if (d->bigcache[i].psize == 0)
817                    break;
818            } else {
819                if (d->bigcache[i].psize != 0)
820                    break;
821            }
822        }
823        /* if we didn't find a preferred slot, use random one */
824        if (d->bigcache[i].psize != 0) {
825            size_t tmp;
826
827            r = d->bigcache[i].page;
828            d->bigcache_used -= d->bigcache[i].psize;
829            tmp = d->bigcache[i].psize << MALLOC_PAGESHIFT;
830            if (!mopts.malloc_freeunmap)
831                validate_junk(d, r, tmp);
832            if (munmap(r, tmp))
833                 wrterror(d, "munmap %p", r);
834            STATS_SUB(d->malloc_used, tmp);
835        }
836
837        if (clear > 0)
838            explicit_bzero(p, clear);
839        if (mopts.malloc_freeunmap) {
840            if (mprotect(p, sz, PROT_NONE))
841                wrterror(d, "mprotect %p", r);
842        } else
843            junk_free(d->malloc_junk, p, sz);
844        d->bigcache[i].page = p;
845        d->bigcache[i].psize = psz;
846        d->bigcache_used += psz;
847        return;
848    }
849    if (psz > MAX_SMALLCACHEABLE_SIZE || d->smallcache[psz - 1].max == 0) {
850        if (munmap(p, sz))
851            wrterror(d, "munmap %p", p);
852        STATS_SUB(d->malloc_used, sz);
853        return;
854    }
855    cache = &d->smallcache[psz - 1];
856    if (cache->length == cache->max) {
857        int fresh;
858        /* use a random slot */
859        i = getrbyte(d) & (cache->max - 1);
860        r = cache->pages[i];
861        fresh = (uintptr_t)r & 1;
862        *(uintptr_t*)&r &= ~1UL;
863        if (!fresh && !mopts.malloc_freeunmap)
864            validate_junk(d, r, sz);
865        if (munmap(r, sz))
866            wrterror(d, "munmap %p", r);
867        STATS_SUB(d->malloc_used, sz);
868        cache->length--;
869    } else
870        i = cache->length;
871
872    /* fill slot */
873    if (clear > 0)
874        explicit_bzero(p, clear);
875    if (mopts.malloc_freeunmap)
876        mprotect(p, sz, PROT_NONE);
877    else
878        junk_free(d->malloc_junk, p, sz);
879    cache->pages[i] = p;
880    cache->length++;
881}
882
883static void *
884map(struct dir_info *d, size_t sz, int zero_fill)
885{
886    size_t psz = sz >> MALLOC_PAGESHIFT;
887    u_short i;
888    void *p;
889    struct smallcache *cache;
890
891    if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
892        d->canary1 != ~d->canary2)
893        wrterror(d, "internal struct corrupt");
894    if (sz != PAGEROUND(sz) || psz == 0)
895        wrterror(d, "map round");
896
897
898    if (d->bigcache_size > 0 && psz > MAX_SMALLCACHEABLE_SIZE &&
899        psz <= MAX_BIGCACHEABLE_SIZE) {
900        size_t base = getrbyte(d);
901        size_t cached = d->bigcache_used;
902        ushort j;
903
904        for (j = 0; j < d->bigcache_size && cached >= psz; j++) {
905            i = (j + base) & (d->bigcache_size - 1);
906            if (d->bigcache[i].psize == psz) {
907                p = d->bigcache[i].page;
908                d->bigcache_used -= psz;
909                d->bigcache[i].page = NULL;
910                d->bigcache[i].psize = 0;
911
912                if (!mopts.malloc_freeunmap)
913                    validate_junk(d, p, sz);
914                if (mopts.malloc_freeunmap)
915                    mprotect(p, sz, PROT_READ | PROT_WRITE);
916                if (zero_fill)
917                    memset(p, 0, sz);
918                else if (mopts.malloc_freeunmap)
919                    junk_free(d->malloc_junk, p, sz);
920                return p;
921            }
922            cached -= d->bigcache[i].psize;
923        }
924    }
925    if (psz <= MAX_SMALLCACHEABLE_SIZE && d->smallcache[psz - 1].max > 0) {
926        cache = &d->smallcache[psz - 1];
927        if (cache->length > 0) {
928            int fresh;
929            if (cache->length == 1)
930                p = cache->pages[--cache->length];
931            else {
932                i = getrbyte(d) % cache->length;
933                p = cache->pages[i];
934                cache->pages[i] = cache->pages[--cache->length];
935            }
936            /* check if page was not junked, i.e. "fresh
937               we use the lsb of the pointer for that */
938            fresh = (uintptr_t)p & 1UL;
939            *(uintptr_t*)&p &= ~1UL;
940            if (!fresh && !mopts.malloc_freeunmap)
941                validate_junk(d, p, sz);
942            if (mopts.malloc_freeunmap)
943                mprotect(p, sz, PROT_READ | PROT_WRITE);
944            if (zero_fill)
945                memset(p, 0, sz);
946            else if (mopts.malloc_freeunmap)
947                junk_free(d->malloc_junk, p, sz);
948            return p;
949        }
950        if (psz <= 1) {
951            p = MMAP(cache->max * sz, d->mmap_flag);
952            if (p != MAP_FAILED) {
953                STATS_ADD(d->malloc_used, cache->max * sz);
954                cache->length = cache->max - 1;
955                for (i = 0; i < cache->max - 1; i++) {
956                    void *q = (char*)p + i * sz;
957                    cache->pages[i] = q;
958                    /* mark pointer in slot as not junked */
959                    *(uintptr_t*)&cache->pages[i] |= 1UL;
960                }
961                if (mopts.malloc_freeunmap)
962                    mprotect(p, (cache->max - 1) * sz,
963                        PROT_NONE);
964                p = (char*)p + (cache->max - 1) * sz;
965                /* zero fill not needed, freshly mmapped */
966                return p;
967            }
968        }
969
970    }
971    p = MMAP(sz, d->mmap_flag);
972    if (p != MAP_FAILED)
973        STATS_ADD(d->malloc_used, sz);
974    /* zero fill not needed */
975    return p;
976}
977
978static void
979init_chunk_info(struct dir_info *d, struct chunk_info *p, u_int bucket)
980{
981    u_int i;
982
983    p->bucket = bucket;
984    p->total = p->free = MALLOC_PAGESIZE / B2ALLOC(bucket);
985    p->offset = howmany(p->total, MALLOC_BITS);
986    p->canary = (u_short)d->canary1;
987
988    /* set all valid bits in the bitmap */
989    i = p->total - 1;
990    memset(p->bits, 0xff, sizeof(p->bits[0]) * (i / MALLOC_BITS));
991    p->bits[i / MALLOC_BITS] = (2U << (i % MALLOC_BITS)) - 1;
992}
993
994static struct chunk_info *
995alloc_chunk_info(struct dir_info *d, u_int bucket)
996{
997    struct chunk_info *p;
998
999    if (LIST_EMPTY(&d->chunk_info_list[bucket])) {
1000        const size_t chunk_pages = 64;
1001        size_t size, count, i;
1002        char *q;
1003
1004        count = MALLOC_PAGESIZE / B2ALLOC(bucket);
1005
1006        size = howmany(count, MALLOC_BITS);
1007        /* see declaration of struct chunk_info */
1008        if (size <= CHUNK_INFO_TAIL)
1009            size = 0;
1010        else
1011            size -= CHUNK_INFO_TAIL;
1012        size = sizeof(struct chunk_info) + size * sizeof(u_short);
1013        if (mopts.chunk_canaries && bucket > 0)
1014            size += count * sizeof(u_short);
1015        size = _ALIGN(size);
1016        count = MALLOC_PAGESIZE / size;
1017
1018        /* Don't use cache here, we don't want user uaf touch this */
1019        if (d->chunk_pages_used == chunk_pages ||
1020             d->chunk_pages == NULL) {
1021            q = MMAP(MALLOC_PAGESIZE * chunk_pages, d->mmap_flag);
1022            if (q == MAP_FAILED)
1023                return NULL;
1024            d->chunk_pages = q;
1025            d->chunk_pages_used = 0;
1026            STATS_ADD(d->malloc_used, MALLOC_PAGESIZE *
1027                chunk_pages);
1028        }
1029        q = (char *)d->chunk_pages + d->chunk_pages_used *
1030            MALLOC_PAGESIZE;
1031        d->chunk_pages_used++;
1032
1033        for (i = 0; i < count; i++, q += size) {
1034            p = (struct chunk_info *)q;
1035            LIST_INSERT_HEAD(&d->chunk_info_list[bucket], p,
1036                entries);
1037        }
1038    }
1039    p = LIST_FIRST(&d->chunk_info_list[bucket]);
1040    LIST_REMOVE(p, entries);
1041    if (p->total == 0)
1042        init_chunk_info(d, p, bucket);
1043    return p;
1044}
1045
1046/*
1047 * Allocate a page of chunks
1048 */
1049static struct chunk_info *
1050omalloc_make_chunks(struct dir_info *d, u_int bucket, u_int listnum)
1051{
1052    struct chunk_info *bp;
1053    void *pp;
1054    void *ff = NULL;
1055
1056    /* Allocate a new bucket */
1057    pp = map(d, MALLOC_PAGESIZE, 0);
1058    if (pp == MAP_FAILED)
1059        return NULL;
1060    if (DO_STATS) {
1061        ff = map(d, MALLOC_PAGESIZE, 0);
1062        if (ff == MAP_FAILED)
1063            goto err;
1064        memset(ff, 0, sizeof(void *) * MALLOC_PAGESIZE /
1065            B2ALLOC(bucket));
1066    }
1067
1068    /* memory protect the page allocated in the malloc(0) case */
1069    if (bucket == 0 && mprotect(pp, MALLOC_PAGESIZE, PROT_NONE) == -1)
1070        goto err;
1071
1072    bp = alloc_chunk_info(d, bucket);
1073    if (bp == NULL)
1074        goto err;
1075    bp->page = pp;
1076
1077    if (insert(d, (void *)((uintptr_t)pp | (bucket + 1)), (uintptr_t)bp,
1078        ff))
1079        goto err;
1080    LIST_INSERT_HEAD(&d->chunk_dir[bucket][listnum], bp, entries);
1081
1082    if (bucket > 0 && d->malloc_junk != 0)
1083        memset(pp, SOME_FREEJUNK, MALLOC_PAGESIZE);
1084
1085    return bp;
1086
1087err:
1088    unmap(d, pp, MALLOC_PAGESIZE, 0);
1089    if (ff != NULL && ff != MAP_FAILED)
1090        unmap(d, ff, MALLOC_PAGESIZE, 0);
1091    return NULL;
1092}
1093
1094/* using built-in function version */
1095static inline unsigned int
1096lb(u_int x)
1097{
1098    /* I need an extension just for integer-length (: */
1099    return (sizeof(int) * CHAR_BIT - 1) - __builtin_clz(x);
1100}
1101
1102/* https://pvk.ca/Blog/2015/06/27/linear-log-bucketing-fast-versatile-simple/
1103   via Tony Finch */
1104static inline unsigned int
1105bin_of(unsigned int size)
1106{
1107    const unsigned int linear = 6;
1108    const unsigned int subbin = 2;
1109
1110    unsigned int mask, rounded, rounded_size;
1111    unsigned int n_bits, shift;
1112
1113    n_bits = lb(size | (1U << linear));
1114    shift = n_bits - subbin;
1115    mask = (1ULL << shift) - 1;
1116    rounded = size + mask; /* XXX: overflow. */
1117
1118    rounded_size = rounded & ~mask;
1119    return rounded_size;
1120}
1121
1122static inline u_short
1123find_bucket(u_short size)
1124{
1125    /* malloc(0) is special */
1126    if (size == 0)
1127        return 0;
1128    if (size < MALLOC_MINSIZE)
1129        size = MALLOC_MINSIZE;
1130    if (mopts.def_maxcache != 0)
1131        size = bin_of(size);
1132    return howmany(size, MALLOC_MINSIZE);
1133}
1134
1135static void
1136fill_canary(char *ptr, size_t sz, size_t allocated)
1137{
1138    size_t check_sz = allocated - sz;
1139
1140    if (check_sz > CHUNK_CHECK_LENGTH)
1141        check_sz = CHUNK_CHECK_LENGTH;
1142    memset(ptr + sz, mopts.chunk_canaries, check_sz);
1143}
1144
1145/*
1146 * Allocate a chunk
1147 */
1148static void *
1149malloc_bytes(struct dir_info *d, size_t size)
1150{
1151    u_int i, j, k, r, bucket, listnum;
1152    u_short *lp;
1153    struct chunk_info *bp;
1154    void *p;
1155
1156    if (mopts.malloc_canary != (d->canary1 ^ (u_int32_t)(uintptr_t)d) ||
1157        d->canary1 != ~d->canary2)
1158        wrterror(d, "internal struct corrupt");
1159
1160    bucket = find_bucket(size);
1161
1162    r = getrbyte(d);
1163    listnum = r % MALLOC_CHUNK_LISTS;
1164
1165    /* If it's empty, make a page more of that size chunks */
1166    if ((bp = LIST_FIRST(&d->chunk_dir[bucket][listnum])) == NULL) {
1167        bp = omalloc_make_chunks(d, bucket, listnum);
1168        if (bp == NULL)
1169            return NULL;
1170    }
1171
1172    if (bp->canary != (u_short)d->canary1 || bucket != bp->bucket)
1173        wrterror(d, "chunk info corrupted");
1174
1175    r /= MALLOC_CHUNK_LISTS;
1176    /* do we need more random bits? */
1177    if (bp->total > 256 / MALLOC_CHUNK_LISTS)
1178        r = r << 8 | getrbyte(d);
1179    /* bias, as bp->total is not a power of 2 */
1180    i = r % bp->total;
1181
1182    j = i % MALLOC_BITS;
1183    i /= MALLOC_BITS;
1184    lp = &bp->bits[i];
1185    /* potentially start somewhere in a short */
1186    if (j > 0 && *lp >> j)
1187        k = ffs(*lp >> j) + j;
1188    else {
1189        /* no bit halfway, go to next full short */
1190        for (;;) {
1191            if (*lp) {
1192                k = ffs(*lp);
1193                break;
1194            }
1195            if (++i >= bp->offset)
1196                i = 0;
1197            lp = &bp->bits[i];
1198        }
1199    }
1200    *lp ^= 1 << --k;
1201
1202    /* If there are no more free, remove from free-list */
1203    if (--bp->free == 0)
1204        LIST_REMOVE(bp, entries);
1205
1206    /* Adjust to the real offset of that chunk */
1207    k += i * MALLOC_BITS;
1208
1209    if (mopts.chunk_canaries && size > 0)
1210        bp->bits[bp->offset + k] = size;
1211
1212    if (DO_STATS) {
1213        struct region_info *r = find(d, bp->page);
1214        STATS_SETFN(r, k, d->caller);
1215    }
1216
1217    p = (char *)bp->page + k * B2ALLOC(bucket);
1218    if (bucket > 0) {
1219        validate_junk(d, p, B2SIZE(bucket));
1220        if (mopts.chunk_canaries)
1221            fill_canary(p, size, B2SIZE(bucket));
1222    }
1223    return p;
1224}
1225
1226static void
1227validate_canary(struct dir_info *d, u_char *ptr, size_t sz, size_t allocated)
1228{
1229    size_t check_sz = allocated - sz;
1230    u_char *p, *q;
1231
1232    if (check_sz > CHUNK_CHECK_LENGTH)
1233        check_sz = CHUNK_CHECK_LENGTH;
1234    p = ptr + sz;
1235    q = p + check_sz;
1236
1237    while (p < q) {
1238        if (*p != (u_char)mopts.chunk_canaries && *p != SOME_JUNK) {
1239            wrterror(d, "canary corrupted %p[%tu]@%zu/%zu%s",
1240                ptr, p - ptr, sz, allocated,
1241                *p == SOME_FREEJUNK ? " (double free?)" : "");
1242        }
1243        p++;
1244    }
1245}
1246
1247static inline uint32_t
1248find_chunknum(struct dir_info *d, struct chunk_info *info, void *ptr, int check)
1249{
1250    uint32_t chunknum;
1251
1252    if (info->canary != (u_short)d->canary1)
1253        wrterror(d, "chunk info corrupted");
1254
1255    /* Find the chunk number on the page */
1256    chunknum = ((uintptr_t)ptr & MALLOC_PAGEMASK) / B2ALLOC(info->bucket);
1257
1258    if ((uintptr_t)ptr & (MALLOC_MINSIZE - 1))
1259        wrterror(d, "modified chunk-pointer %p", ptr);
1260    if (CHUNK_FREE(info, chunknum))
1261        wrterror(d, "double free %p", ptr);
1262    if (check && info->bucket > 0) {
1263        validate_canary(d, ptr, info->bits[info->offset + chunknum],
1264            B2SIZE(info->bucket));
1265    }
1266    return chunknum;
1267}
1268
1269/*
1270 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1271 */
1272static void
1273free_bytes(struct dir_info *d, struct region_info *r, void *ptr)
1274{
1275    struct chunk_head *mp;
1276    struct chunk_info *info;
1277    uint32_t chunknum;
1278    uint32_t listnum;
1279
1280    info = (struct chunk_info *)r->size;
1281    chunknum = find_chunknum(d, info, ptr, 0);
1282
1283    info->bits[chunknum / MALLOC_BITS] |= 1U << (chunknum % MALLOC_BITS);
1284    info->free++;
1285
1286    if (info->free == 1) {
1287        /* Page became non-full */
1288        listnum = getrbyte(d) % MALLOC_CHUNK_LISTS;
1289        mp = &d->chunk_dir[info->bucket][listnum];
1290        LIST_INSERT_HEAD(mp, info, entries);
1291        return;
1292    }
1293
1294    if (info->free != info->total)
1295        return;
1296
1297    LIST_REMOVE(info, entries);
1298
1299    if (info->bucket == 0 && !mopts.malloc_freeunmap)
1300        mprotect(info->page, MALLOC_PAGESIZE, PROT_READ | PROT_WRITE);
1301    unmap(d, info->page, MALLOC_PAGESIZE, 0);
1302#ifdef MALLOC_STATS
1303    if (r->f != NULL) {
1304        unmap(d, r->f, MALLOC_PAGESIZE, MALLOC_PAGESIZE);
1305        r->f = NULL;
1306    }
1307#endif
1308
1309    delete(d, r);
1310    mp = &d->chunk_info_list[info->bucket];
1311    LIST_INSERT_HEAD(mp, info, entries);
1312}
1313
1314static void *
1315omalloc(struct dir_info *pool, size_t sz, int zero_fill)
1316{
1317    void *p, *caller = NULL;
1318    size_t psz;
1319
1320    if (sz > MALLOC_MAXCHUNK) {
1321        if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1322            errno = ENOMEM;
1323            return NULL;
1324        }
1325        sz += mopts.malloc_guard;
1326        psz = PAGEROUND(sz);
1327        p = map(pool, psz, zero_fill);
1328        if (p == MAP_FAILED) {
1329            errno = ENOMEM;
1330            return NULL;
1331        }
1332#ifdef MALLOC_STATS
1333        if (DO_STATS)
1334            caller = pool->caller;
1335#endif
1336        if (insert(pool, p, sz, caller)) {
1337            unmap(pool, p, psz, 0);
1338            errno = ENOMEM;
1339            return NULL;
1340        }
1341        if (mopts.malloc_guard) {
1342            if (mprotect((char *)p + psz - mopts.malloc_guard,
1343                mopts.malloc_guard, PROT_NONE))
1344                wrterror(pool, "mprotect");
1345            STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
1346        }
1347
1348        if (MALLOC_MOVE_COND(sz)) {
1349            /* fill whole allocation */
1350            if (pool->malloc_junk == 2)
1351                memset(p, SOME_JUNK, psz - mopts.malloc_guard);
1352            /* shift towards the end */
1353            p = MALLOC_MOVE(p, sz);
1354            /* fill zeros if needed and overwritten above */
1355            if (zero_fill && pool->malloc_junk == 2)
1356                memset(p, 0, sz - mopts.malloc_guard);
1357        } else {
1358            if (pool->malloc_junk == 2) {
1359                if (zero_fill)
1360                    memset((char *)p + sz -
1361                        mopts.malloc_guard, SOME_JUNK,
1362                        psz - sz);
1363                else
1364                    memset(p, SOME_JUNK,
1365                        psz - mopts.malloc_guard);
1366            } else if (mopts.chunk_canaries)
1367                fill_canary(p, sz - mopts.malloc_guard,
1368                    psz - mopts.malloc_guard);
1369        }
1370
1371    } else {
1372        /* takes care of SOME_JUNK */
1373        p = malloc_bytes(pool, sz);
1374        if (zero_fill && p != NULL && sz > 0)
1375            memset(p, 0, sz);
1376    }
1377
1378    return p;
1379}
1380
1381/*
1382 * Common function for handling recursion.  Only
1383 * print the error message once, to avoid making the problem
1384 * potentially worse.
1385 */
1386static void
1387malloc_recurse(struct dir_info *d)
1388{
1389    static int noprint;
1390
1391    if (noprint == 0) {
1392        noprint = 1;
1393        wrterror(d, "recursive call");
1394    }
1395    d->active--;
1396    _MALLOC_UNLOCK(d->mutex);
1397    errno = EDEADLK;
1398}
1399
1400void
1401_malloc_init(int from_rthreads)
1402{
1403    u_int i, j, nmutexes;
1404    struct dir_info *d;
1405
1406    _MALLOC_LOCK(1);
1407    if (!from_rthreads && mopts.malloc_pool[1]) {
1408        _MALLOC_UNLOCK(1);
1409        return;
1410    }
1411    if (!mopts.malloc_canary) {
1412        char *p;
1413        size_t sz, roundup_sz, d_avail;
1414
1415        omalloc_init();
1416        /*
1417         * Allocate dir_infos with a guard page on either side. Also
1418         * randomise offset inside the page at which the dir_infos
1419         * lay (subject to alignment by 1 << MALLOC_MINSHIFT)
1420         */
1421        sz = mopts.malloc_mutexes * sizeof(*d);
1422        roundup_sz = (sz + MALLOC_PAGEMASK) & ~MALLOC_PAGEMASK;
1423        if ((p = MMAPNONE(roundup_sz + 2 * MALLOC_PAGESIZE, 0)) ==
1424            MAP_FAILED)
1425            wrterror(NULL, "malloc_init mmap1 failed");
1426        if (mprotect(p + MALLOC_PAGESIZE, roundup_sz,
1427            PROT_READ | PROT_WRITE))
1428            wrterror(NULL, "malloc_init mprotect1 failed");
1429        if (mimmutable(p, roundup_sz + 2 * MALLOC_PAGESIZE))
1430            wrterror(NULL, "malloc_init mimmutable1 failed");
1431        d_avail = (roundup_sz - sz) >> MALLOC_MINSHIFT;
1432        d = (struct dir_info *)(p + MALLOC_PAGESIZE +
1433            (arc4random_uniform(d_avail) << MALLOC_MINSHIFT));
1434        STATS_ADD(d[1].malloc_used, roundup_sz + 2 * MALLOC_PAGESIZE);
1435        for (i = 0; i < mopts.malloc_mutexes; i++)
1436            mopts.malloc_pool[i] = &d[i];
1437        mopts.internal_funcs = 1;
1438        if (((uintptr_t)&malloc_readonly & MALLOC_PAGEMASK) == 0) {
1439            if (mprotect(&malloc_readonly, sizeof(malloc_readonly),
1440                PROT_READ))
1441                wrterror(NULL,
1442                    "malloc_init mprotect r/o failed");
1443            if (mimmutable(&malloc_readonly,
1444                sizeof(malloc_readonly)))
1445                wrterror(NULL,
1446                    "malloc_init mimmutable r/o failed");
1447        }
1448    }
1449
1450    nmutexes = from_rthreads ? mopts.malloc_mutexes : 2;
1451    for (i = 0; i < nmutexes; i++) {
1452        d = mopts.malloc_pool[i];
1453        d->malloc_mt = from_rthreads;
1454        if (d->canary1 == ~d->canary2)
1455            continue;
1456        if (i == 0) {
1457            omalloc_poolinit(d, MAP_CONCEAL);
1458            d->malloc_junk = 2;
1459            d->bigcache_size = 0;
1460            for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++)
1461                d->smallcache[j].max = 0;
1462        } else {
1463            size_t sz = 0;
1464
1465            omalloc_poolinit(d, 0);
1466            d->malloc_junk = mopts.def_malloc_junk;
1467            d->bigcache_size = mopts.def_maxcache;
1468            for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1469                d->smallcache[j].max =
1470                    mopts.def_maxcache >> (j / 8);
1471                sz += d->smallcache[j].max * sizeof(void *);
1472            }
1473            sz += d->bigcache_size * sizeof(struct bigcache);
1474            if (sz > 0) {
1475                void *p = MMAP(sz, 0);
1476                if (p == MAP_FAILED)
1477                    wrterror(NULL,
1478                        "malloc_init mmap2 failed");
1479                if (mimmutable(p, sz))
1480                    wrterror(NULL,
1481                        "malloc_init mimmutable2 failed");
1482                for (j = 0; j < MAX_SMALLCACHEABLE_SIZE; j++) {
1483                    d->smallcache[j].pages = p;
1484                    p = (char *)p + d->smallcache[j].max *
1485                        sizeof(void *);
1486                }
1487                d->bigcache = p;
1488            }
1489        }
1490        d->mutex = i;
1491    }
1492
1493    _MALLOC_UNLOCK(1);
1494}
1495DEF_STRONG(_malloc_init);
1496
1497#define PROLOGUE(p, fn)         \
1498    d = (p);            \
1499    if (d == NULL) {        \
1500        _malloc_init(0);    \
1501        d = (p);        \
1502    }               \
1503    _MALLOC_LOCK(d->mutex);     \
1504    d->func = fn;           \
1505    if (d->active++) {      \
1506        malloc_recurse(d);  \
1507        return NULL;        \
1508    }               \
1509
1510#define EPILOGUE()              \
1511    d->active--;                \
1512    _MALLOC_UNLOCK(d->mutex);       \
1513    if (r == NULL && mopts.malloc_xmalloc)  \
1514        wrterror(d, "out of memory");   \
1515    if (r != NULL)              \
1516        errno = saved_errno;        \
1517
1518void *
1519malloc(size_t size)
1520{
1521    void *r;
1522    struct dir_info *d;
1523    int saved_errno = errno;
1524
1525    PROLOGUE(getpool(), "malloc")
1526    SET_CALLER(d, caller(d));
1527    r = omalloc(d, size, 0);
1528    EPILOGUE()
1529    return r;
1530}
1531DEF_STRONG(malloc);
1532
1533void *
1534malloc_conceal(size_t size)
1535{
1536    void *r;
1537    struct dir_info *d;
1538    int saved_errno = errno;
1539
1540    PROLOGUE(mopts.malloc_pool[0], "malloc_conceal")
1541    SET_CALLER(d, caller(d));
1542    r = omalloc(d, size, 0);
1543    EPILOGUE()
1544    return r;
1545}
1546DEF_WEAK(malloc_conceal);
1547
1548static struct region_info *
1549findpool(void *p, struct dir_info *argpool, struct dir_info **foundpool,
1550    const char ** saved_function)
1551{
1552    struct dir_info *pool = argpool;
1553    struct region_info *r = find(pool, p);
1554
1555    if (r == NULL) {
1556        u_int i, nmutexes;
1557
1558        nmutexes = mopts.malloc_pool[1]->malloc_mt ?
1559            mopts.malloc_mutexes : 2;
1560        for (i = 1; i < nmutexes; i++) {
1561            u_int j = (argpool->mutex + i) & (nmutexes - 1);
1562
1563            pool->active--;
1564            _MALLOC_UNLOCK(pool->mutex);
1565            pool = mopts.malloc_pool[j];
1566            _MALLOC_LOCK(pool->mutex);
1567            pool->active++;
1568            r = find(pool, p);
1569            if (r != NULL) {
1570                *saved_function = pool->func;
1571                pool->func = argpool->func;
1572                break;
1573            }
1574        }
1575        if (r == NULL)
1576            wrterror(argpool, "bogus pointer (double free?) %p", p);
1577    }
1578    *foundpool = pool;
1579    return r;
1580}
1581
1582static void
1583ofree(struct dir_info **argpool, void *p, int clear, int check, size_t argsz)
1584{
1585    struct region_info *r;
1586    struct dir_info *pool;
1587    const char *saved_function;
1588    size_t sz;
1589
1590    r = findpool(p, *argpool, &pool, &saved_function);
1591
1592    REALSIZE(sz, r);
1593    if (pool->mmap_flag) {
1594        clear = 1;
1595        if (!check) {
1596            argsz = sz;
1597            if (sz > MALLOC_MAXCHUNK)
1598                argsz -= mopts.malloc_guard;
1599        }
1600    }
1601    if (check) {
1602        if (sz <= MALLOC_MAXCHUNK) {
1603            if (mopts.chunk_canaries && sz > 0) {
1604                struct chunk_info *info =
1605                    (struct chunk_info *)r->size;
1606                uint32_t chunknum =
1607                    find_chunknum(pool, info, p, 0);
1608
1609                if (info->bits[info->offset + chunknum] < argsz)
1610                    wrterror(pool, "recorded size %hu"
1611                        " < %zu",
1612                        info->bits[info->offset + chunknum],
1613                        argsz);
1614            } else {
1615                if (sz < argsz)
1616                    wrterror(pool, "chunk size %zu < %zu",
1617                        sz, argsz);
1618            }
1619        } else if (sz - mopts.malloc_guard < argsz) {
1620            wrterror(pool, "recorded size %zu < %zu",
1621                sz - mopts.malloc_guard, argsz);
1622        }
1623    }
1624    if (sz > MALLOC_MAXCHUNK) {
1625        if (!MALLOC_MOVE_COND(sz)) {
1626            if (r->p != p)
1627                wrterror(pool, "bogus pointer %p", p);
1628            if (mopts.chunk_canaries)
1629                validate_canary(pool, p,
1630                    sz - mopts.malloc_guard,
1631                    PAGEROUND(sz - mopts.malloc_guard));
1632        } else {
1633            /* shifted towards the end */
1634            if (p != MALLOC_MOVE(r->p, sz))
1635                wrterror(pool, "bogus moved pointer %p", p);
1636            p = r->p;
1637        }
1638        if (mopts.malloc_guard) {
1639            if (sz < mopts.malloc_guard)
1640                wrterror(pool, "guard size");
1641            if (!mopts.malloc_freeunmap) {
1642                if (mprotect((char *)p + PAGEROUND(sz) -
1643                    mopts.malloc_guard, mopts.malloc_guard,
1644                    PROT_READ | PROT_WRITE))
1645                    wrterror(pool, "mprotect");
1646            }
1647            STATS_SUB(pool->malloc_guarded, mopts.malloc_guard);
1648        }
1649        unmap(pool, p, PAGEROUND(sz), clear ? argsz : 0);
1650        delete(pool, r);
1651    } else {
1652        void *tmp;
1653        u_int i;
1654
1655        /* Validate and optionally canary check */
1656        struct chunk_info *info = (struct chunk_info *)r->size;
1657        if (B2SIZE(info->bucket) != sz)
1658            wrterror(pool, "internal struct corrupt");
1659        find_chunknum(pool, info, p, mopts.chunk_canaries);
1660
1661        if (mopts.malloc_freecheck) {
1662            for (i = 0; i <= MALLOC_DELAYED_CHUNK_MASK; i++) {
1663                tmp = pool->delayed_chunks[i];
1664                if (tmp == p)
1665                    wrterror(pool,
1666                        "double free %p", p);
1667                if (tmp != NULL) {
1668                    size_t tmpsz;
1669
1670                    r = find(pool, tmp);
1671                    if (r == NULL)
1672                        wrterror(pool,
1673                            "bogus pointer ("
1674                            "double free?) %p", tmp);
1675                    REALSIZE(tmpsz, r);
1676                    validate_junk(pool, tmp, tmpsz);
1677                }
1678            }
1679        }
1680
1681        if (clear && argsz > 0)
1682            explicit_bzero(p, argsz);
1683        junk_free(pool->malloc_junk, p, sz);
1684
1685        i = getrbyte(pool) & MALLOC_DELAYED_CHUNK_MASK;
1686        tmp = p;
1687        p = pool->delayed_chunks[i];
1688        if (tmp == p)
1689            wrterror(pool, "double free %p", p);
1690        pool->delayed_chunks[i] = tmp;
1691        if (p != NULL) {
1692            r = find(pool, p);
1693            if (r == NULL)
1694                wrterror(pool,
1695                    "bogus pointer (double free?) %p", p);
1696            if (!mopts.malloc_freecheck) {
1697                REALSIZE(sz, r);
1698                validate_junk(pool, p, sz);
1699            }
1700            free_bytes(pool, r, p);
1701        }
1702    }
1703
1704    if (*argpool != pool) {
1705        pool->func = saved_function;
1706        *argpool = pool;
1707    }
1708}
1709
1710void
1711free(void *ptr)
1712{
1713    struct dir_info *d;
1714    int saved_errno = errno;
1715
1716    /* This is legal. */
1717    if (ptr == NULL)
1718        return;
1719
1720    d = getpool();
1721    if (d == NULL)
1722        wrterror(d, "free() called before allocation");
1723    _MALLOC_LOCK(d->mutex);
1724    d->func = "free";
1725    if (d->active++) {
1726        malloc_recurse(d);
1727        return;
1728    }
1729    ofree(&d, ptr, 0, 0, 0);
1730    d->active--;
1731    _MALLOC_UNLOCK(d->mutex);
1732    errno = saved_errno;
1733}
1734DEF_STRONG(free);
1735
1736static void
1737freezero_p(void *ptr, size_t sz)
1738{
1739    explicit_bzero(ptr, sz);
1740    free(ptr);
1741}
1742
1743void
1744freezero(void *ptr, size_t sz)
1745{
1746    struct dir_info *d;
1747    int saved_errno = errno;
1748
1749    /* This is legal. */
1750    if (ptr == NULL)
1751        return;
1752
1753    if (!mopts.internal_funcs) {
1754        freezero_p(ptr, sz);
1755        return;
1756    }
1757
1758    d = getpool();
1759    if (d == NULL)
1760        wrterror(d, "freezero() called before allocation");
1761    _MALLOC_LOCK(d->mutex);
1762    d->func = "freezero";
1763    if (d->active++) {
1764        malloc_recurse(d);
1765        return;
1766    }
1767    ofree(&d, ptr, 1, 1, sz);
1768    d->active--;
1769    _MALLOC_UNLOCK(d->mutex);
1770    errno = saved_errno;
1771}
1772DEF_WEAK(freezero);
1773
1774static void *
1775orealloc(struct dir_info **argpool, void *p, size_t newsz)
1776{
1777    struct region_info *r;
1778    struct dir_info *pool;
1779    const char *saved_function;
1780    struct chunk_info *info;
1781    size_t oldsz, goldsz, gnewsz;
1782    void *q, *ret;
1783    uint32_t chunknum;
1784    int forced;
1785
1786    if (p == NULL)
1787        return omalloc(*argpool, newsz, 0);
1788
1789    if (newsz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
1790        errno = ENOMEM;
1791        return  NULL;
1792    }
1793
1794    r = findpool(p, *argpool, &pool, &saved_function);
1795
1796    REALSIZE(oldsz, r);
1797    if (oldsz <= MALLOC_MAXCHUNK) {
1798        if (DO_STATS || mopts.chunk_canaries) {
1799            info = (struct chunk_info *)r->size;
1800            chunknum = find_chunknum(pool, info, p, 0);
1801        }
1802    }
1803
1804    goldsz = oldsz;
1805    if (oldsz > MALLOC_MAXCHUNK) {
1806        if (oldsz < mopts.malloc_guard)
1807            wrterror(pool, "guard size");
1808        oldsz -= mopts.malloc_guard;
1809    }
1810
1811    gnewsz = newsz;
1812    if (gnewsz > MALLOC_MAXCHUNK)
1813        gnewsz += mopts.malloc_guard;
1814
1815    forced = mopts.malloc_realloc || pool->mmap_flag;
1816    if (newsz > MALLOC_MAXCHUNK && oldsz > MALLOC_MAXCHUNK && !forced) {
1817        /* First case: from n pages sized allocation to m pages sized
1818           allocation, m > n */
1819        size_t roldsz = PAGEROUND(goldsz);
1820        size_t rnewsz = PAGEROUND(gnewsz);
1821
1822        if (rnewsz < roldsz && rnewsz > roldsz / 2 &&
1823            roldsz - rnewsz < mopts.def_maxcache * MALLOC_PAGESIZE &&
1824            !mopts.malloc_guard) {
1825
1826            ret = p;
1827            goto done;
1828        }
1829
1830        if (rnewsz > roldsz) {
1831            /* try to extend existing region */
1832            if (!mopts.malloc_guard) {
1833                void *hint = (char *)r->p + roldsz;
1834                size_t needed = rnewsz - roldsz;
1835
1836                STATS_INC(pool->cheap_realloc_tries);
1837                q = MMAPA(hint, needed, MAP_FIXED |
1838                    __MAP_NOREPLACE | pool->mmap_flag);
1839                if (q == hint) {
1840                    STATS_ADD(pool->malloc_used, needed);
1841                    if (pool->malloc_junk == 2)
1842                        memset(q, SOME_JUNK, needed);
1843                    r->size = gnewsz;
1844                    if (r->p != p) {
1845                        /* old pointer is moved */
1846                        memmove(r->p, p, oldsz);
1847                        p = r->p;
1848                    }
1849                    if (mopts.chunk_canaries)
1850                        fill_canary(p, newsz,
1851                            PAGEROUND(newsz));
1852                    STATS_SETF(r, (*argpool)->caller);
1853                    STATS_INC(pool->cheap_reallocs);
1854                    ret = p;
1855                    goto done;
1856                }
1857            }
1858        } else if (rnewsz < roldsz) {
1859            /* shrink number of pages */
1860            if (mopts.malloc_guard) {
1861                if (mprotect((char *)r->p + rnewsz -
1862                    mopts.malloc_guard, mopts.malloc_guard,
1863                    PROT_NONE))
1864                    wrterror(pool, "mprotect");
1865            }
1866            if (munmap((char *)r->p + rnewsz, roldsz - rnewsz))
1867                wrterror(pool, "munmap %p", (char *)r->p +
1868                    rnewsz);
1869            STATS_SUB(pool->malloc_used, roldsz - rnewsz);
1870            r->size = gnewsz;
1871            if (MALLOC_MOVE_COND(gnewsz)) {
1872                void *pp = MALLOC_MOVE(r->p, gnewsz);
1873                memmove(pp, p, newsz);
1874                p = pp;
1875            } else if (mopts.chunk_canaries)
1876                fill_canary(p, newsz, PAGEROUND(newsz));
1877            STATS_SETF(r, (*argpool)->caller);
1878            ret = p;
1879            goto done;
1880        } else {
1881            /* number of pages remains the same */
1882            void *pp = r->p;
1883
1884            r->size = gnewsz;
1885            if (MALLOC_MOVE_COND(gnewsz))
1886                pp = MALLOC_MOVE(r->p, gnewsz);
1887            if (p != pp) {
1888                memmove(pp, p, oldsz < newsz ? oldsz : newsz);
1889                p = pp;
1890            }
1891            if (p == r->p) {
1892                if (newsz > oldsz && pool->malloc_junk == 2)
1893                    memset((char *)p + newsz, SOME_JUNK,
1894                        rnewsz - mopts.malloc_guard -
1895                        newsz);
1896                if (mopts.chunk_canaries)
1897                    fill_canary(p, newsz, PAGEROUND(newsz));
1898            }
1899            STATS_SETF(r, (*argpool)->caller);
1900            ret = p;
1901            goto done;
1902        }
1903    }
1904    if (oldsz <= MALLOC_MAXCHUNK && oldsz > 0 &&
1905        newsz <= MALLOC_MAXCHUNK && newsz > 0 &&
1906        !forced && find_bucket(newsz) == find_bucket(oldsz)) {
1907        /* do not reallocate if new size fits good in existing chunk */
1908        if (pool->malloc_junk == 2)
1909            memset((char *)p + newsz, SOME_JUNK, oldsz - newsz);
1910        if (mopts.chunk_canaries) {
1911            info->bits[info->offset + chunknum] = newsz;
1912            fill_canary(p, newsz, B2SIZE(info->bucket));
1913        }
1914        if (DO_STATS)
1915            STATS_SETFN(r, chunknum, (*argpool)->caller);
1916        ret = p;
1917    } else if (newsz != oldsz || forced) {
1918        /* create new allocation */
1919        q = omalloc(pool, newsz, 0);
1920        if (q == NULL) {
1921            ret = NULL;
1922            goto done;
1923        }
1924        if (newsz != 0 && oldsz != 0)
1925            memcpy(q, p, oldsz < newsz ? oldsz : newsz);
1926        ofree(&pool, p, 0, 0, 0);
1927        ret = q;
1928    } else {
1929        /* oldsz == newsz */
1930        if (newsz != 0)
1931            wrterror(pool, "realloc internal inconsistency");
1932        if (DO_STATS)
1933            STATS_SETFN(r, chunknum, (*argpool)->caller);
1934        ret = p;
1935    }
1936done:
1937    if (*argpool != pool) {
1938        pool->func = saved_function;
1939        *argpool = pool;
1940    }
1941    return ret;
1942}
1943
1944void *
1945realloc(void *ptr, size_t size)
1946{
1947    struct dir_info *d;
1948    void *r;
1949    int saved_errno = errno;
1950
1951    PROLOGUE(getpool(), "realloc")
1952    SET_CALLER(d, caller(d));
1953    r = orealloc(&d, ptr, size);
1954    EPILOGUE()
1955    return r;
1956}
1957DEF_STRONG(realloc);
1958
1959/*
1960 * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
1961 * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
1962 */
1963#define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
1964
1965void *
1966calloc(size_t nmemb, size_t size)
1967{
1968    struct dir_info *d;
1969    void *r;
1970    int saved_errno = errno;
1971
1972    PROLOGUE(getpool(), "calloc")
1973    SET_CALLER(d, caller(d));
1974    if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
1975        nmemb > 0 && SIZE_MAX / nmemb < size) {
1976        d->active--;
1977        _MALLOC_UNLOCK(d->mutex);
1978        if (mopts.malloc_xmalloc)
1979            wrterror(d, "out of memory");
1980        errno = ENOMEM;
1981        return NULL;
1982    }
1983
1984    size *= nmemb;
1985    r = omalloc(d, size, 1);
1986    EPILOGUE()
1987    return r;
1988}
1989DEF_STRONG(calloc);
1990
1991void *
1992calloc_conceal(size_t nmemb, size_t size)
1993{
1994    struct dir_info *d;
1995    void *r;
1996    int saved_errno = errno;
1997
1998    PROLOGUE(mopts.malloc_pool[0], "calloc_conceal")
1999    SET_CALLER(d, caller(d));
2000    if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2001        nmemb > 0 && SIZE_MAX / nmemb < size) {
2002        d->active--;
2003        _MALLOC_UNLOCK(d->mutex);
2004        if (mopts.malloc_xmalloc)
2005            wrterror(d, "out of memory");
2006        errno = ENOMEM;
2007        return NULL;
2008    }
2009
2010    size *= nmemb;
2011    r = omalloc(d, size, 1);
2012    EPILOGUE()
2013    return r;
2014}
2015DEF_WEAK(calloc_conceal);
2016
2017static void *
2018orecallocarray(struct dir_info **argpool, void *p, size_t oldsize,
2019    size_t newsize)
2020{
2021    struct region_info *r;
2022    struct dir_info *pool;
2023    const char *saved_function;
2024    void *newptr;
2025    size_t sz;
2026
2027    if (p == NULL)
2028        return omalloc(*argpool, newsize, 1);
2029
2030    if (oldsize == newsize)
2031        return p;
2032
2033    r = findpool(p, *argpool, &pool, &saved_function);
2034
2035    REALSIZE(sz, r);
2036    if (sz <= MALLOC_MAXCHUNK) {
2037        if (mopts.chunk_canaries && sz > 0) {
2038            struct chunk_info *info = (struct chunk_info *)r->size;
2039            uint32_t chunknum = find_chunknum(pool, info, p, 0);
2040
2041            if (info->bits[info->offset + chunknum] != oldsize)
2042                wrterror(pool, "recorded size %hu != %zu",
2043                    info->bits[info->offset + chunknum],
2044                    oldsize);
2045        } else {
2046            if (sz < oldsize)
2047                wrterror(pool, "chunk size %zu < %zu",
2048                    sz, oldsize);
2049        }
2050    } else {
2051        if (sz - mopts.malloc_guard < oldsize)
2052            wrterror(pool, "recorded size %zu < %zu",
2053                sz - mopts.malloc_guard, oldsize);
2054        if (oldsize < (sz - mopts.malloc_guard) / 2)
2055            wrterror(pool,
2056                "recorded size %zu inconsistent with %zu",
2057                sz - mopts.malloc_guard, oldsize);
2058    }
2059
2060    newptr = omalloc(pool, newsize, 0);
2061    if (newptr == NULL)
2062        goto done;
2063
2064    if (newsize > oldsize) {
2065        memcpy(newptr, p, oldsize);
2066        memset((char *)newptr + oldsize, 0, newsize - oldsize);
2067    } else
2068        memcpy(newptr, p, newsize);
2069
2070    ofree(&pool, p, 1, 0, oldsize);
2071
2072done:
2073    if (*argpool != pool) {
2074        pool->func = saved_function;
2075        *argpool = pool;
2076    }
2077
2078    return newptr;
2079}
2080
2081static void *
2082recallocarray_p(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2083{
2084    size_t oldsize, newsize;
2085    void *newptr;
2086
2087    if (ptr == NULL)
2088        return calloc(newnmemb, size);
2089
2090    if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2091        newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2092        errno = ENOMEM;
2093        return NULL;
2094    }
2095    newsize = newnmemb * size;
2096
2097    if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2098        oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2099        errno = EINVAL;
2100        return NULL;
2101    }
2102    oldsize = oldnmemb * size;
2103
2104    /*
2105     * Don't bother too much if we're shrinking just a bit,
2106     * we do not shrink for series of small steps, oh well.
2107     */
2108    if (newsize <= oldsize) {
2109        size_t d = oldsize - newsize;
2110
2111        if (d < oldsize / 2 && d < MALLOC_PAGESIZE) {
2112            memset((char *)ptr + newsize, 0, d);
2113            return ptr;
2114        }
2115    }
2116
2117    newptr = malloc(newsize);
2118    if (newptr == NULL)
2119        return NULL;
2120
2121    if (newsize > oldsize) {
2122        memcpy(newptr, ptr, oldsize);
2123        memset((char *)newptr + oldsize, 0, newsize - oldsize);
2124    } else
2125        memcpy(newptr, ptr, newsize);
2126
2127    explicit_bzero(ptr, oldsize);
2128    free(ptr);
2129
2130    return newptr;
2131}
2132
2133void *
2134recallocarray(void *ptr, size_t oldnmemb, size_t newnmemb, size_t size)
2135{
2136    struct dir_info *d;
2137    size_t oldsize = 0, newsize;
2138    void *r;
2139    int saved_errno = errno;
2140
2141    if (!mopts.internal_funcs)
2142        return recallocarray_p(ptr, oldnmemb, newnmemb, size);
2143
2144    PROLOGUE(getpool(), "recallocarray")
2145    SET_CALLER(d, caller(d));
2146
2147    if ((newnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2148        newnmemb > 0 && SIZE_MAX / newnmemb < size) {
2149        d->active--;
2150        _MALLOC_UNLOCK(d->mutex);
2151        if (mopts.malloc_xmalloc)
2152            wrterror(d, "out of memory");
2153        errno = ENOMEM;
2154        return NULL;
2155    }
2156    newsize = newnmemb * size;
2157
2158    if (ptr != NULL) {
2159        if ((oldnmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
2160            oldnmemb > 0 && SIZE_MAX / oldnmemb < size) {
2161            d->active--;
2162            _MALLOC_UNLOCK(d->mutex);
2163            errno = EINVAL;
2164            return NULL;
2165        }
2166        oldsize = oldnmemb * size;
2167    }
2168
2169    r = orecallocarray(&d, ptr, oldsize, newsize);
2170    EPILOGUE()
2171    return r;
2172}
2173DEF_WEAK(recallocarray);
2174
2175static void *
2176mapalign(struct dir_info *d, size_t alignment, size_t sz, int zero_fill)
2177{
2178    char *p, *q;
2179
2180    if (alignment < MALLOC_PAGESIZE || ((alignment - 1) & alignment) != 0)
2181        wrterror(d, "mapalign bad alignment");
2182    if (sz != PAGEROUND(sz))
2183        wrterror(d, "mapalign round");
2184
2185    /* Allocate sz + alignment bytes of memory, which must include a
2186     * subrange of size bytes that is properly aligned.  Unmap the
2187     * other bytes, and then return that subrange.
2188     */
2189
2190    /* We need sz + alignment to fit into a size_t. */
2191    if (alignment > SIZE_MAX - sz)
2192        return MAP_FAILED;
2193
2194    p = map(d, sz + alignment, zero_fill);
2195    if (p == MAP_FAILED)
2196        return MAP_FAILED;
2197    q = (char *)(((uintptr_t)p + alignment - 1) & ~(alignment - 1));
2198    if (q != p) {
2199        if (munmap(p, q - p))
2200            wrterror(d, "munmap %p", p);
2201    }
2202    if (munmap(q + sz, alignment - (q - p)))
2203        wrterror(d, "munmap %p", q + sz);
2204    STATS_SUB(d->malloc_used, alignment);
2205
2206    return q;
2207}
2208
2209static void *
2210omemalign(struct dir_info *pool, size_t alignment, size_t sz, int zero_fill)
2211{
2212    size_t psz;
2213    void *p, *caller = NULL;
2214
2215    /* If between half a page and a page, avoid MALLOC_MOVE. */
2216    if (sz > MALLOC_MAXCHUNK && sz < MALLOC_PAGESIZE)
2217        sz = MALLOC_PAGESIZE;
2218    if (alignment <= MALLOC_PAGESIZE) {
2219        size_t pof2;
2220        /*
2221         * max(size, alignment) rounded up to power of 2 is enough
2222         * to assure the requested alignment. Large regions are
2223         * always page aligned.
2224         */
2225        if (sz < alignment)
2226            sz = alignment;
2227        if (sz < MALLOC_PAGESIZE) {
2228            pof2 = MALLOC_MINSIZE;
2229            while (pof2 < sz)
2230                pof2 <<= 1;
2231        } else
2232            pof2 = sz;
2233        return omalloc(pool, pof2, zero_fill);
2234    }
2235
2236    if (sz >= SIZE_MAX - mopts.malloc_guard - MALLOC_PAGESIZE) {
2237        errno = ENOMEM;
2238        return NULL;
2239    }
2240
2241    if (sz < MALLOC_PAGESIZE)
2242        sz = MALLOC_PAGESIZE;
2243    sz += mopts.malloc_guard;
2244    psz = PAGEROUND(sz);
2245
2246    p = mapalign(pool, alignment, psz, zero_fill);
2247    if (p == MAP_FAILED) {
2248        errno = ENOMEM;
2249        return NULL;
2250    }
2251
2252#ifdef MALLOC_STATS
2253    if (DO_STATS)
2254        caller = pool->caller;
2255#endif
2256    if (insert(pool, p, sz, caller)) {
2257        unmap(pool, p, psz, 0);
2258        errno = ENOMEM;
2259        return NULL;
2260    }
2261
2262    if (mopts.malloc_guard) {
2263        if (mprotect((char *)p + psz - mopts.malloc_guard,
2264            mopts.malloc_guard, PROT_NONE))
2265            wrterror(pool, "mprotect");
2266        STATS_ADD(pool->malloc_guarded, mopts.malloc_guard);
2267    }
2268
2269    if (pool->malloc_junk == 2) {
2270        if (zero_fill)
2271            memset((char *)p + sz - mopts.malloc_guard,
2272                SOME_JUNK, psz - sz);
2273        else
2274            memset(p, SOME_JUNK, psz - mopts.malloc_guard);
2275    } else if (mopts.chunk_canaries)
2276        fill_canary(p, sz - mopts.malloc_guard,
2277            psz - mopts.malloc_guard);
2278
2279    return p;
2280}
2281
2282int
2283posix_memalign(void **memptr, size_t alignment, size_t size)
2284{
2285    struct dir_info *d;
2286    int res, saved_errno = errno;
2287    void *r;
2288
2289    /* Make sure that alignment is a large enough power of 2. */
2290    if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *))
2291        return EINVAL;
2292
2293    d = getpool();
2294    if (d == NULL) {
2295        _malloc_init(0);
2296        d = getpool();
2297    }
2298    _MALLOC_LOCK(d->mutex);
2299    d->func = "posix_memalign";
2300    if (d->active++) {
2301        malloc_recurse(d);
2302        goto err;
2303    }
2304    SET_CALLER(d, caller(d));
2305    r = omemalign(d, alignment, size, 0);
2306    d->active--;
2307    _MALLOC_UNLOCK(d->mutex);
2308    if (r == NULL) {
2309        if (mopts.malloc_xmalloc)
2310            wrterror(d, "out of memory");
2311        goto err;
2312    }
2313    errno = saved_errno;
2314    *memptr = r;
2315    return 0;
2316
2317err:
2318    res = errno;
2319    errno = saved_errno;
2320    return res;
2321}
2322DEF_STRONG(posix_memalign);
2323
2324void *
2325aligned_alloc(size_t alignment, size_t size)
2326{
2327    struct dir_info *d;
2328    int saved_errno = errno;
2329    void *r;
2330
2331    /* Make sure that alignment is a positive power of 2. */
2332    if (((alignment - 1) & alignment) != 0 || alignment == 0) {
2333        errno = EINVAL;
2334        return NULL;
2335    }
2336    /* Per spec, size should be a multiple of alignment */
2337    if ((size & (alignment - 1)) != 0) {
2338        errno = EINVAL;
2339        return NULL;
2340    }
2341
2342    PROLOGUE(getpool(), "aligned_alloc")
2343    SET_CALLER(d, caller(d));
2344    r = omemalign(d, alignment, size, 0);
2345    EPILOGUE()
2346    return r;
2347}
2348DEF_STRONG(aligned_alloc);
2349
2350#ifdef MALLOC_STATS
2351
2352static int
2353btcmp(const struct btnode *e1, const struct btnode *e2)
2354{
2355    return memcmp(e1->caller, e2->caller, sizeof(e1->caller));
2356}
2357
2358RBT_GENERATE(btshead, btnode, entry, btcmp);
2359
2360static void*
2361store_caller(struct dir_info *d, struct btnode *f)
2362{
2363    struct btnode *p;
2364
2365    if (DO_STATS == 0 || d->btnodes == MAP_FAILED)
2366        return NULL;
2367
2368    p = RBT_FIND(btshead, &d->btraces, f);
2369    if (p != NULL)
2370        return p;
2371    if (d->btnodes == NULL ||
2372        d->btnodesused >= MALLOC_PAGESIZE / sizeof(struct btnode)) {
2373        d->btnodes = map(d, MALLOC_PAGESIZE, 0);
2374        if (d->btnodes == MAP_FAILED)
2375            return NULL;
2376        d->btnodesused = 0;
2377    }
2378    p = &d->btnodes[d->btnodesused++];
2379    memcpy(p->caller, f->caller, sizeof(p->caller[0]) * DO_STATS);
2380    RBT_INSERT(btshead, &d->btraces, p);
2381    return p;
2382}
2383
2384static void fabstorel(const void *, char *, size_t);
2385
2386static void
2387print_chunk_details(struct dir_info *pool, void *p, size_t sz, size_t index)
2388{
2389    struct region_info *r;
2390    struct chunk_info *chunkinfo;
2391    struct btnode* btnode;
2392    uint32_t chunknum;
2393    int frame;
2394    char buf1[128];
2395    char buf2[128];
2396    const char *msg = "";
2397
2398    r = find(pool, p);
2399    chunkinfo = (struct chunk_info *)r->size;
2400    chunknum = find_chunknum(pool, chunkinfo, p, 0);
2401    btnode = (struct btnode *)r->f[chunknum];
2402    frame = DO_STATS - 1;
2403    if (btnode != NULL)
2404        fabstorel(btnode->caller[frame], buf1, sizeof(buf1));
2405    strlcpy(buf2, ". 0x0", sizeof(buf2));
2406    if (chunknum > 0) {
2407        chunknum--;
2408        btnode = (struct btnode *)r->f[chunknum];
2409        if (btnode != NULL)
2410            fabstorel(btnode->caller[frame], buf2, sizeof(buf2));
2411        if (CHUNK_FREE(chunkinfo, chunknum))
2412            msg = " (now free)";
2413    }
2414
2415    wrterror(pool,
2416        "write to free chunk %p[%zu..%zu]@%zu allocated at %s "
2417        "(preceding chunk %p allocated at %s%s)",
2418        p, index * sizeof(uint64_t), (index + 1) * sizeof(uint64_t) - 1,
2419        sz, buf1, p - sz, buf2, msg);
2420}
2421
2422static void
2423ulog(const char *format, ...)
2424{
2425    va_list ap;
2426    static char* buf;
2427    static size_t filled;
2428    int len;
2429
2430    if (buf == NULL)
2431        buf = MMAP(KTR_USER_MAXLEN, 0);
2432    if (buf == MAP_FAILED)
2433        return;
2434
2435    va_start(ap, format);
2436    len = vsnprintf(buf + filled, KTR_USER_MAXLEN - filled, format, ap);
2437    va_end(ap);
2438    if (len < 0)
2439        return;
2440    if ((size_t)len > KTR_USER_MAXLEN - filled)
2441        len = KTR_USER_MAXLEN - filled;
2442    filled += len;
2443    if (filled > 0) {
2444        if (filled == KTR_USER_MAXLEN || buf[filled - 1] == '\n') {
2445            utrace("malloc", buf, filled);
2446            filled = 0;
2447        }
2448    }
2449}
2450
2451struct malloc_leak {
2452    void *f;
2453    size_t total_size;
2454    int count;
2455};
2456
2457struct leaknode {
2458    RBT_ENTRY(leaknode) entry;
2459    struct malloc_leak d;
2460};
2461
2462static inline int
2463leakcmp(const struct leaknode *e1, const struct leaknode *e2)
2464{
2465    return e1->d.f < e2->d.f ? -1 : e1->d.f > e2->d.f;
2466}
2467
2468RBT_HEAD(leaktree, leaknode);
2469RBT_PROTOTYPE(leaktree, leaknode, entry, leakcmp);
2470RBT_GENERATE(leaktree, leaknode, entry, leakcmp);
2471
2472static void
2473wrtwarning(const char *func, char *msg, ...)
2474{
2475    int     saved_errno = errno;
2476    va_list     ap;
2477
2478    dprintf(STDERR_FILENO, "%s(%d) in %s(): ", __progname,
2479        getpid(), func != NULL ? func : "unknown");
2480    va_start(ap, msg);
2481    vdprintf(STDERR_FILENO, msg, ap);
2482    va_end(ap);
2483    dprintf(STDERR_FILENO, "\n");
2484
2485    errno = saved_errno;
2486}
2487
2488static void
2489putleakinfo(struct leaktree *leaks, void *f, size_t sz, int cnt)
2490{
2491    struct leaknode key, *p;
2492    static struct leaknode *page;
2493    static unsigned int used;
2494
2495    if (cnt == 0 || page == MAP_FAILED)
2496        return;
2497
2498    key.d.f = f;
2499    p = RBT_FIND(leaktree, leaks, &key);
2500    if (p == NULL) {
2501        if (page == NULL ||
2502            used >= MALLOC_PAGESIZE / sizeof(struct leaknode)) {
2503            page = MMAP(MALLOC_PAGESIZE, 0);
2504            if (page == MAP_FAILED) {
2505                wrtwarning(__func__, strerror(errno));
2506                return;
2507            }
2508            used = 0;
2509        }
2510        p = &page[used++];
2511        p->d.f = f;
2512        p->d.total_size = sz * cnt;
2513        p->d.count = cnt;
2514        RBT_INSERT(leaktree, leaks, p);
2515    } else {
2516        p->d.total_size += sz * cnt;
2517        p->d.count += cnt;
2518    }
2519}
2520
2521static void
2522fabstorel(const void *f, char *buf, size_t size)
2523{
2524    Dl_info info;
2525    const char *object = ".";
2526    const char *caller;
2527
2528    caller = f;
2529    if (caller != NULL && dladdr(f, &info) != 0) {
2530        caller -= (uintptr_t)info.dli_fbase;
2531        object = info.dli_fname;
2532    }
2533    snprintf(buf, size, "%s %p", object, caller);
2534}
2535
2536static void
2537dump_leak(struct leaknode *p)
2538{
2539    int i;
2540    char buf[128];
2541
2542    if (p->d.f == NULL) {
2543        fabstorel(NULL, buf, sizeof(buf));
2544        ulog("%18p %7zu %6u %6zu addr2line -e %s\n",
2545            p->d.f, p->d.total_size, p->d.count,
2546            p->d.total_size / p->d.count, buf);
2547        return;
2548    }
2549
2550    for (i = 0; i < DO_STATS; i++) {
2551        const char *abscaller;
2552
2553        abscaller = ((struct btnode*)p->d.f)->caller[i];
2554        if (abscaller == NULL)
2555            break;
2556        fabstorel(abscaller, buf, sizeof(buf));
2557        if (i == 0)
2558            ulog("%18p %7zu %6u %6zu addr2line -e %s\n",
2559                abscaller, p->d.total_size, p->d.count,
2560                p->d.total_size / p->d.count, buf);
2561        else
2562            ulog("%*p %*s %6s %6s addr2line -e %s\n",
2563                i + 18, abscaller, 7 - i, "-", "-", "-", buf);
2564    }
2565}
2566
2567static void
2568dump_leaks(struct leaktree *leaks)
2569{
2570    struct leaknode *p;
2571
2572    ulog("Leak report:\n");
2573    ulog("                 f     sum      #    avg\n");
2574
2575    RBT_FOREACH(p, leaktree, leaks)
2576        dump_leak(p);
2577}
2578
2579static void
2580dump_chunk(struct leaktree* leaks, struct chunk_info *p, void **f,
2581    int fromfreelist)
2582{
2583    while (p != NULL) {
2584        if (mopts.malloc_verbose)
2585            ulog("chunk %18p %18p %4zu %d/%d\n",
2586                p->page, NULL,
2587                B2SIZE(p->bucket), p->free, p->total);
2588        if (!fromfreelist) {
2589            size_t i, sz =  B2SIZE(p->bucket);
2590            for (i = 0; i < p->total; i++) {
2591                if (!CHUNK_FREE(p, i))
2592                    putleakinfo(leaks, f[i], sz, 1);
2593            }
2594            break;
2595        }
2596        p = LIST_NEXT(p, entries);
2597        if (mopts.malloc_verbose && p != NULL)
2598            ulog("       ->");
2599    }
2600}
2601
2602static void
2603dump_free_chunk_info(struct dir_info *d, struct leaktree *leaks)
2604{
2605    u_int i, j, count;
2606    struct chunk_info *p;
2607
2608    ulog("Free chunk structs:\n");
2609    ulog("Bkt) #CI                     page"
2610        "                  f size free/n\n");
2611    for (i = 0; i <= BUCKETS; i++) {
2612        count = 0;
2613        LIST_FOREACH(p, &d->chunk_info_list[i], entries)
2614            count++;
2615        for (j = 0; j < MALLOC_CHUNK_LISTS; j++) {
2616            p = LIST_FIRST(&d->chunk_dir[i][j]);
2617            if (p == NULL && count == 0)
2618                continue;
2619            if (j == 0)
2620                ulog("%3d) %3d ", i, count);
2621            else
2622                ulog("         ");
2623            if (p != NULL)
2624                dump_chunk(leaks, p, NULL, 1);
2625            else
2626                ulog(".\n");
2627        }
2628    }
2629
2630}
2631
2632static void
2633dump_free_page_info(struct dir_info *d)
2634{
2635    struct smallcache *cache;
2636    size_t i, total = 0;
2637
2638    ulog("Cached in small cache:\n");
2639    for (i = 0; i < MAX_SMALLCACHEABLE_SIZE; i++) {
2640        cache = &d->smallcache[i];
2641        if (cache->length != 0)
2642            ulog("%zu(%u): %u = %zu\n", i + 1, cache->max,
2643                cache->length, cache->length * (i + 1));
2644        total += cache->length * (i + 1);
2645    }
2646
2647    ulog("Cached in big cache: %zu/%zu\n", d->bigcache_used,
2648        d->bigcache_size);
2649    for (i = 0; i < d->bigcache_size; i++) {
2650        if (d->bigcache[i].psize != 0)
2651            ulog("%zu: %zu\n", i, d->bigcache[i].psize);
2652        total += d->bigcache[i].psize;
2653    }
2654    ulog("Free pages cached: %zu\n", total);
2655}
2656
2657static void
2658malloc_dump1(int poolno, struct dir_info *d, struct leaktree *leaks)
2659{
2660    size_t i, realsize;
2661
2662    if (mopts.malloc_verbose) {
2663        ulog("Malloc dir of %s pool %d at %p\n", __progname, poolno, d);
2664        ulog("MT=%d J=%d Fl=%#x\n", d->malloc_mt, d->malloc_junk,
2665            d->mmap_flag);
2666        ulog("Region slots free %zu/%zu\n",
2667            d->regions_free, d->regions_total);
2668        ulog("Inserts %zu/%zu\n", d->inserts, d->insert_collisions);
2669        ulog("Deletes %zu/%zu\n", d->deletes, d->delete_moves);
2670        ulog("Cheap reallocs %zu/%zu\n",
2671            d->cheap_reallocs, d->cheap_realloc_tries);
2672        ulog("In use %zu\n", d->malloc_used);
2673        ulog("Guarded %zu\n", d->malloc_guarded);
2674        dump_free_chunk_info(d, leaks);
2675        dump_free_page_info(d);
2676        ulog("Hash table:\n");
2677        ulog("slot)  hash d  type               page                  "
2678            "f size [free/n]\n");
2679    }
2680    for (i = 0; i < d->regions_total; i++) {
2681        if (d->r[i].p != NULL) {
2682            size_t h = hash(d->r[i].p) &
2683                (d->regions_total - 1);
2684            if (mopts.malloc_verbose)
2685                ulog("%4zx) #%4zx %zd ",
2686                    i, h, h - i);
2687            REALSIZE(realsize, &d->r[i]);
2688            if (realsize > MALLOC_MAXCHUNK) {
2689                putleakinfo(leaks, d->r[i].f, realsize, 1);
2690                if (mopts.malloc_verbose)
2691                    ulog("pages %18p %18p %zu\n", d->r[i].p,
2692                        d->r[i].f, realsize);
2693            } else
2694                dump_chunk(leaks,
2695                    (struct chunk_info *)d->r[i].size,
2696                    d->r[i].f, 0);
2697        }
2698    }
2699    if (mopts.malloc_verbose)
2700        ulog("\n");
2701}
2702
2703static void
2704malloc_dump0(int poolno, struct dir_info *pool, struct leaktree *leaks)
2705{
2706    int i;
2707    void *p;
2708    struct region_info *r;
2709
2710    if (pool == NULL || pool->r == NULL)
2711        return;
2712    for (i = 0; i < MALLOC_DELAYED_CHUNK_MASK + 1; i++) {
2713        p = pool->delayed_chunks[i];
2714        if (p == NULL)
2715            continue;
2716        r = find(pool, p);
2717        if (r == NULL)
2718            wrterror(pool, "bogus pointer in malloc_dump %p", p);
2719        free_bytes(pool, r, p);
2720        pool->delayed_chunks[i] = NULL;
2721    }
2722    malloc_dump1(poolno, pool, leaks);
2723}
2724
2725void
2726malloc_dump(void)
2727{
2728    u_int i;
2729    int saved_errno = errno;
2730
2731    /* XXX leak when run multiple times */
2732    struct leaktree leaks = RBT_INITIALIZER(&leaks);
2733
2734    for (i = 0; i < mopts.malloc_mutexes; i++)
2735        malloc_dump0(i, mopts.malloc_pool[i], &leaks);
2736
2737    dump_leaks(&leaks);
2738    ulog("\n");
2739    errno = saved_errno;
2740}
2741DEF_WEAK(malloc_dump);
2742
2743static void
2744malloc_exit(void)
2745{
2746    int save_errno = errno;
2747
2748    ulog("******** Start dump %s *******\n", __progname);
2749    ulog("M=%u I=%d F=%d U=%d J=%d R=%d X=%d C=%#x cache=%u "
2750        "G=%zu\n",
2751        mopts.malloc_mutexes,
2752        mopts.internal_funcs, mopts.malloc_freecheck,
2753        mopts.malloc_freeunmap, mopts.def_malloc_junk,
2754        mopts.malloc_realloc, mopts.malloc_xmalloc,
2755        mopts.chunk_canaries, mopts.def_maxcache,
2756        mopts.malloc_guard);
2757
2758    malloc_dump();
2759    ulog("******** End dump %s *******\n", __progname);
2760    errno = save_errno;
2761}
2762
2763#endif /* MALLOC_STATS */
2764