]> git.cameronkatri.com Git - bsdgames-darwin.git/blob - monop/malloc.c
use an sbrk() only malloc() because save and restore depends on saving
[bsdgames-darwin.git] / monop / malloc.c
1 /* $NetBSD: malloc.c,v 1.1 2003/04/21 01:23:06 christos Exp $ */
2
3 /*
4 * Copyright (c) 1983, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 #if defined(LIBC_SCCS) && !defined(lint)
38 #if 0
39 static char sccsid[] = "@(#)malloc.c 8.1 (Berkeley) 6/4/93";
40 #else
41 __RCSID("$NetBSD: malloc.c,v 1.1 2003/04/21 01:23:06 christos Exp $");
42 #endif
43 #endif /* LIBC_SCCS and not lint */
44
45 /*
46 * malloc.c (Caltech) 2/21/82
47 * Chris Kingsley, kingsley@cit-20.
48 *
49 * This is a very fast storage allocator. It allocates blocks of a small
50 * number of different sizes, and keeps free lists of each size. Blocks that
51 * don't exactly fit are passed up to the next larger size. In this
52 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
53 * This is designed for use in a virtual memory environment.
54 */
55
56 #include <sys/types.h>
57 #if defined(DEBUG) || defined(RCHECK)
58 #include <sys/uio.h>
59 #endif
60 #if defined(RCHECK) || defined(MSTATS)
61 #include <stdio.h>
62 #endif
63 #include <stdlib.h>
64 #include <string.h>
65 #include <unistd.h>
66 #include <threadlib.h>
67
68
69 /*
70 * The overhead on a block is at least 4 bytes. When free, this space
71 * contains a pointer to the next free block, and the bottom two bits must
72 * be zero. When in use, the first byte is set to MAGIC, and the second
73 * byte is the size index. The remaining bytes are for alignment.
74 * If range checking is enabled then a second word holds the size of the
75 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
76 * The order of elements is critical: ov_magic must overlay the low order
77 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
78 */
79 union overhead {
80 union overhead *ov_next; /* when free */
81 struct {
82 u_char ovu_magic; /* magic number */
83 u_char ovu_index; /* bucket # */
84 #ifdef RCHECK
85 u_short ovu_rmagic; /* range magic number */
86 u_long ovu_size; /* actual block size */
87 #endif
88 } ovu;
89 #define ov_magic ovu.ovu_magic
90 #define ov_index ovu.ovu_index
91 #define ov_rmagic ovu.ovu_rmagic
92 #define ov_size ovu.ovu_size
93 };
94
95 #define MAGIC 0xef /* magic # on accounting info */
96 #ifdef RCHECK
97 #define RMAGIC 0x5555 /* magic # on range info */
98 #endif
99
100 #ifdef RCHECK
101 #define RSLOP sizeof (u_short)
102 #else
103 #define RSLOP 0
104 #endif
105
106 /*
107 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
108 * smallest allocatable block is 8 bytes. The overhead information
109 * precedes the data area returned to the user.
110 */
111 #define NBUCKETS 30
112 static union overhead *nextf[NBUCKETS];
113
114 static long pagesz; /* page size */
115 static int pagebucket; /* page size bucket */
116
117 #ifdef MSTATS
118 /*
119 * nmalloc[i] is the difference between the number of mallocs and frees
120 * for a given block size.
121 */
122 static u_int nmalloc[NBUCKETS];
123 #endif
124
125 static mutex_t malloc_mutex = MUTEX_INITIALIZER;
126
127 static void morecore __P((int));
128 static int findbucket __P((union overhead *, int));
129 #ifdef MSTATS
130 void mstats __P((const char *));
131 #endif
132
133 #if defined(DEBUG) || defined(RCHECK)
134 #define ASSERT(p) if (!(p)) botch(__STRING(p))
135
136 static void botch __P((const char *));
137
138 /*
139 * NOTE: since this may be called while malloc_mutex is locked, stdio must not
140 * be used in this function.
141 */
142 static void
143 botch(s)
144 const char *s;
145 {
146 struct iovec iov[3];
147
148 iov[0].iov_base = "\nassertion botched: ";
149 iov[0].iov_len = 20;
150 iov[1].iov_base = (void *)s;
151 iov[1].iov_len = strlen(s);
152 iov[2].iov_base = "\n";
153 iov[2].iov_len = 1;
154
155 /*
156 * This place deserves a word of warning: a cancellation point will
157 * occur when executing writev(), and we might be still owning
158 * malloc_mutex. At this point we need to disable cancellation
159 * until `after' abort() because i) establishing a cancellation handler
160 * might, depending on the implementation, result in another malloc()
161 * to be executed, and ii) it is really not desirable to let execution
162 * continue. `Fix me.'
163 *
164 * Note that holding mutex_lock during abort() is safe.
165 */
166
167 (void)writev(STDERR_FILENO, iov, 3);
168 abort();
169 }
170 #else
171 #define ASSERT(p)
172 #endif
173
174 void *
175 malloc(nbytes)
176 size_t nbytes;
177 {
178 union overhead *op;
179 int bucket;
180 long n;
181 unsigned amt;
182
183 mutex_lock(&malloc_mutex);
184
185 /*
186 * First time malloc is called, setup page size and
187 * align break pointer so all data will be page aligned.
188 */
189 if (pagesz == 0) {
190 pagesz = n = getpagesize();
191 ASSERT(pagesz > 0);
192 op = (union overhead *)(void *)sbrk(0);
193 n = n - sizeof (*op) - ((long)op & (n - 1));
194 if (n < 0)
195 n += pagesz;
196 if (n) {
197 if (sbrk((int)n) == (void *)-1) {
198 mutex_unlock(&malloc_mutex);
199 return (NULL);
200 }
201 }
202 bucket = 0;
203 amt = 8;
204 while (pagesz > amt) {
205 amt <<= 1;
206 bucket++;
207 }
208 pagebucket = bucket;
209 }
210 /*
211 * Convert amount of memory requested into closest block size
212 * stored in hash buckets which satisfies request.
213 * Account for space used per block for accounting.
214 */
215 if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) {
216 #ifndef RCHECK
217 amt = 8; /* size of first bucket */
218 bucket = 0;
219 #else
220 amt = 16; /* size of first bucket */
221 bucket = 1;
222 #endif
223 n = -((long)sizeof (*op) + RSLOP);
224 } else {
225 amt = (unsigned)pagesz;
226 bucket = pagebucket;
227 }
228 while (nbytes > amt + n) {
229 amt <<= 1;
230 if (amt == 0)
231 return (NULL);
232 bucket++;
233 }
234 /*
235 * If nothing in hash bucket right now,
236 * request more memory from the system.
237 */
238 if ((op = nextf[bucket]) == NULL) {
239 morecore(bucket);
240 if ((op = nextf[bucket]) == NULL) {
241 mutex_unlock(&malloc_mutex);
242 return (NULL);
243 }
244 }
245 /* remove from linked list */
246 nextf[bucket] = op->ov_next;
247 op->ov_magic = MAGIC;
248 op->ov_index = bucket;
249 #ifdef MSTATS
250 nmalloc[bucket]++;
251 #endif
252 mutex_unlock(&malloc_mutex);
253 #ifdef RCHECK
254 /*
255 * Record allocated size of block and
256 * bound space with magic numbers.
257 */
258 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
259 op->ov_rmagic = RMAGIC;
260 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
261 #endif
262 return ((void *)(op + 1));
263 }
264
265 /*
266 * Allocate more memory to the indicated bucket.
267 */
268 static void
269 morecore(bucket)
270 int bucket;
271 {
272 union overhead *op;
273 long sz; /* size of desired block */
274 long amt; /* amount to allocate */
275 long nblks; /* how many blocks we get */
276
277 /*
278 * sbrk_size <= 0 only for big, FLUFFY, requests (about
279 * 2^30 bytes on a VAX, I think) or for a negative arg.
280 */
281 sz = 1 << (bucket + 3);
282 #ifdef DEBUG
283 ASSERT(sz > 0);
284 #else
285 if (sz <= 0)
286 return;
287 #endif
288 if (sz < pagesz) {
289 amt = pagesz;
290 nblks = amt / sz;
291 } else {
292 amt = sz + pagesz;
293 nblks = 1;
294 }
295 op = (union overhead *)(void *)sbrk((int)amt);
296 /* no more room! */
297 if ((long)op == -1)
298 return;
299 /*
300 * Add new memory allocated to that on
301 * free list for this hash bucket.
302 */
303 nextf[bucket] = op;
304 while (--nblks > 0) {
305 op->ov_next =
306 (union overhead *)(void *)((caddr_t)(void *)op+(size_t)sz);
307 op = op->ov_next;
308 }
309 }
310
311 void
312 free(cp)
313 void *cp;
314 {
315 long size;
316 union overhead *op;
317
318 if (cp == NULL)
319 return;
320 op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead));
321 #ifdef DEBUG
322 ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */
323 #else
324 if (op->ov_magic != MAGIC)
325 return; /* sanity */
326 #endif
327 #ifdef RCHECK
328 ASSERT(op->ov_rmagic == RMAGIC);
329 ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
330 #endif
331 size = op->ov_index;
332 ASSERT(size < NBUCKETS);
333 mutex_lock(&malloc_mutex);
334 op->ov_next = nextf[(unsigned int)size];/* also clobbers ov_magic */
335 nextf[(unsigned int)size] = op;
336 #ifdef MSTATS
337 nmalloc[(size_t)size]--;
338 #endif
339 mutex_unlock(&malloc_mutex);
340 }
341
342 /*
343 * When a program attempts "storage compaction" as mentioned in the
344 * old malloc man page, it realloc's an already freed block. Usually
345 * this is the last block it freed; occasionally it might be farther
346 * back. We have to search all the free lists for the block in order
347 * to determine its bucket: 1st we make one pass thru the lists
348 * checking only the first block in each; if that fails we search
349 * ``__realloc_srchlen'' blocks in each list for a match (the variable
350 * is extern so the caller can modify it). If that fails we just copy
351 * however many bytes was given to realloc() and hope it's not huge.
352 */
353 int __realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */
354
355 void *
356 realloc(cp, nbytes)
357 void *cp;
358 size_t nbytes;
359 {
360 u_long onb;
361 long i;
362 union overhead *op;
363 char *res;
364 int was_alloced = 0;
365
366 if (cp == NULL)
367 return (malloc(nbytes));
368 if (nbytes == 0) {
369 free (cp);
370 return (NULL);
371 }
372 op = (union overhead *)(void *)((caddr_t)cp - sizeof (union overhead));
373 mutex_lock(&malloc_mutex);
374 if (op->ov_magic == MAGIC) {
375 was_alloced++;
376 i = op->ov_index;
377 } else {
378 /*
379 * Already free, doing "compaction".
380 *
381 * Search for the old block of memory on the
382 * free list. First, check the most common
383 * case (last element free'd), then (this failing)
384 * the last ``__realloc_srchlen'' items free'd.
385 * If all lookups fail, then assume the size of
386 * the memory block being realloc'd is the
387 * largest possible (so that all "nbytes" of new
388 * memory are copied into). Note that this could cause
389 * a memory fault if the old area was tiny, and the moon
390 * is gibbous. However, that is very unlikely.
391 */
392 if ((i = findbucket(op, 1)) < 0 &&
393 (i = findbucket(op, __realloc_srchlen)) < 0)
394 i = NBUCKETS;
395 }
396 onb = (u_long)1 << (u_long)(i + 3);
397 if (onb < pagesz)
398 onb -= sizeof (*op) + RSLOP;
399 else
400 onb += pagesz - sizeof (*op) - RSLOP;
401 /* avoid the copy if same size block */
402 if (was_alloced) {
403 if (i) {
404 i = (long)1 << (long)(i + 2);
405 if (i < pagesz)
406 i -= sizeof (*op) + RSLOP;
407 else
408 i += pagesz - sizeof (*op) - RSLOP;
409 }
410 if (nbytes <= onb && nbytes > i) {
411 #ifdef RCHECK
412 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
413 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
414 #endif
415 mutex_unlock(&malloc_mutex);
416 return (cp);
417
418 }
419 #ifndef _REENT
420 else
421 free(cp);
422 #endif
423 }
424 mutex_unlock(&malloc_mutex);
425 if ((res = malloc(nbytes)) == NULL) {
426 #ifdef _REENT
427 free(cp);
428 #endif
429 return (NULL);
430 }
431 #ifndef _REENT
432 if (cp != res) /* common optimization if "compacting" */
433 (void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb));
434 #else
435 (void)memmove(res, cp, (size_t)((nbytes < onb) ? nbytes : onb));
436 free(cp);
437 #endif
438 return (res);
439 }
440
441 /*
442 * Search ``srchlen'' elements of each free list for a block whose
443 * header starts at ``freep''. If srchlen is -1 search the whole list.
444 * Return bucket number, or -1 if not found.
445 */
446 static int
447 findbucket(freep, srchlen)
448 union overhead *freep;
449 int srchlen;
450 {
451 union overhead *p;
452 int i, j;
453
454 for (i = 0; i < NBUCKETS; i++) {
455 j = 0;
456 for (p = nextf[i]; p && j != srchlen; p = p->ov_next) {
457 if (p == freep)
458 return (i);
459 j++;
460 }
461 }
462 return (-1);
463 }
464
465 #ifdef MSTATS
466 /*
467 * mstats - print out statistics about malloc
468 *
469 * Prints two lines of numbers, one showing the length of the free list
470 * for each size category, the second showing the number of mallocs -
471 * frees for each size category.
472 */
473 void
474 mstats(s)
475 char *s;
476 {
477 int i, j;
478 union overhead *p;
479 int totfree = 0,
480 totused = 0;
481
482 fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s);
483 for (i = 0; i < NBUCKETS; i++) {
484 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
485 ;
486 fprintf(stderr, " %d", j);
487 totfree += j * (1 << (i + 3));
488 }
489 fprintf(stderr, "\nused:\t");
490 for (i = 0; i < NBUCKETS; i++) {
491 fprintf(stderr, " %d", nmalloc[i]);
492 totused += nmalloc[i] * (1 << (i + 3));
493 }
494 fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n",
495 totused, totfree);
496 }
497 #endif