2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
23 * Free heap blocks are kept on one of several free lists,
24 * depending on the size of the block. Each free list is doubly linked.
25 * Adjacent free blocks are coalesced.
29 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
30 /* largest block we will allocate starting on a black */
31 /* listed block. Must be >= HBLKSIZE. */
34 # define UNIQUE_THRESHOLD 32
35 /* Sizes up to this many HBLKs each have their own free list */
36 # define HUGE_THRESHOLD 256
37 /* Sizes of at least this many heap blocks are mapped to a */
38 /* single free list. */
39 # define FL_COMPRESSION 8
40 /* In between sizes map this many distinct sizes to a single */
43 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
46 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
48 /* Map a number of blocks to the appropriate large block free list index. */
49 int GC_hblk_fl_from_blocks(blocks_needed)
52 if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
53 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
54 return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
59 # define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
60 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
61 # define NHDR(hhdr) HDR(hhdr -> hb_next)
64 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
65 # else /* !USE_MMAP */
66 # define IS_MAPPED(hhdr) 1
67 # endif /* USE_MUNMAP */
69 # if !defined(NO_DEBUGGING)
70 void GC_print_hblkfreelist()
78 for (i = 0; i <= N_HBLK_FLS; ++i) {
79 h = GC_hblkfreelist[i];
80 if (0 != h) GC_printf1("Free list %ld:\n", (unsigned long)i);
84 GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
86 if (GC_is_black_listed(h, HBLKSIZE) != 0) {
87 GC_printf0("start black listed\n");
88 } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
89 GC_printf0("partially black listed\n");
91 GC_printf0("not black listed\n");
96 if (total_free != GC_large_free_bytes) {
97 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
98 (unsigned long) GC_large_free_bytes);
100 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
103 /* Return the free list index on which the block described by the header */
104 /* appears, or -1 if it appears nowhere. */
105 int free_list_index_of(wanted)
112 for (i = 0; i <= N_HBLK_FLS; ++i) {
113 h = GC_hblkfreelist[i];
116 if (hhdr == wanted) return i;
123 void GC_dump_regions()
130 for (i = 0; i < GC_n_heap_sects; ++i) {
131 start = GC_heap_sects[i].hs_start;
132 bytes = GC_heap_sects[i].hs_bytes;
134 /* Merge in contiguous sections. */
135 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
137 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
139 GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
140 for (p = start; p < end;) {
142 GC_printf1("\t0x%lx ", (unsigned long)p);
143 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
144 GC_printf1("Missing header!!\n", hhdr);
148 if (HBLK_IS_FREE(hhdr)) {
149 int correct_index = GC_hblk_fl_from_blocks(
150 divHBLKSZ(hhdr -> hb_sz));
153 GC_printf1("\tfree block of size 0x%lx bytes",
154 (unsigned long)(hhdr -> hb_sz));
155 if (IS_MAPPED(hhdr)) {
158 GC_printf0("(unmapped)\n");
160 actual_index = free_list_index_of(hhdr);
161 if (-1 == actual_index) {
162 GC_printf1("\t\tBlock not on free list %ld!!\n",
164 } else if (correct_index != actual_index) {
165 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
166 actual_index, correct_index);
170 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
171 (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
172 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
178 # endif /* NO_DEBUGGING */
180 /* Initialize hdr for a block containing the indicated size and */
181 /* kind of objects. */
182 /* Return FALSE on failure. */
183 static GC_bool setup_header(hhdr, sz, kind, flags)
185 word sz; /* object size in words */
191 /* Add description of valid object pointers */
192 if (!GC_add_map_entry(sz)) return(FALSE);
193 hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
195 /* Set size, kind and mark proc fields */
197 hhdr -> hb_obj_kind = kind;
198 hhdr -> hb_flags = flags;
199 descr = GC_obj_kinds[kind].ok_descriptor;
200 if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
201 hhdr -> hb_descr = descr;
203 /* Clear mark bits */
204 GC_clear_hdr_marks(hhdr);
206 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
210 #define FL_UNKNOWN -1
212 * Remove hhdr from the appropriate free list.
213 * We assume it is on the nth free list, or on the size
214 * appropriate free list if n is FL_UNKNOWN.
216 void GC_remove_from_fl(hhdr, n)
220 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
221 if (hhdr -> hb_prev == 0) {
223 if (FL_UNKNOWN == n) {
224 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
228 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
229 GC_hblkfreelist[index] = hhdr -> hb_next;
231 PHDR(hhdr) -> hb_next = hhdr -> hb_next;
233 if (0 != hhdr -> hb_next) {
234 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
235 NHDR(hhdr) -> hb_prev = hhdr -> hb_prev;
240 * Return a pointer to the free block ending just before h, if any.
242 struct hblk * GC_free_block_ending_at(h)
245 struct hblk * p = h - 1;
248 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
249 p = FORWARDED_ADDR(p,phdr);
252 if (0 != phdr && HBLK_IS_FREE(phdr)) return p;
253 p = GC_prev_block(h - 1);
256 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
264 * Add hhdr to the appropriate free list.
265 * We maintain individual free lists sorted by address.
267 void GC_add_to_fl(h, hhdr)
271 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
272 struct hblk *second = GC_hblkfreelist[index];
273 # ifdef GC_ASSERTIONS
274 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
275 hdr * nexthdr = HDR(next);
276 struct hblk *prev = GC_free_block_ending_at(h);
277 hdr * prevhdr = HDR(prev);
278 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
279 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
281 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
282 GC_hblkfreelist[index] = h;
283 hhdr -> hb_next = second;
285 if (0 != second) HDR(second) -> hb_prev = h;
286 GC_invalidate_map(hhdr);
291 /* Unmap blocks that haven't been recently touched. This is the only way */
292 /* way blocks are ever unmapped. */
293 void GC_unmap_old(void)
298 unsigned short last_rec, threshold;
300 # define UNMAP_THRESHOLD 6
302 for (i = 0; i <= N_HBLK_FLS; ++i) {
303 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
305 if (!IS_MAPPED(hhdr)) continue;
306 threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
307 last_rec = hhdr -> hb_last_reclaimed;
308 if (last_rec > GC_gc_no
309 || last_rec < threshold && threshold < GC_gc_no
310 /* not recently wrapped */) {
312 GC_unmap((ptr_t)h, sz);
313 hhdr -> hb_flags |= WAS_UNMAPPED;
319 /* Merge all unmapped blocks that are adjacent to other free */
320 /* blocks. This may involve remapping, since all blocks are either */
321 /* fully mapped or fully unmapped. */
322 void GC_merge_unmapped(void)
324 struct hblk * h, *next;
325 hdr * hhdr, *nexthdr;
329 for (i = 0; i <= N_HBLK_FLS; ++i) {
330 h = GC_hblkfreelist[i];
334 next = (struct hblk *)((word)h + size);
336 /* Coalesce with successor, if possible */
337 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
338 nextsize = nexthdr -> hb_sz;
339 if (IS_MAPPED(hhdr)) {
340 GC_ASSERT(!IS_MAPPED(nexthdr));
341 /* make both consistent, so that we can merge */
342 if (size > nextsize) {
343 GC_remap((ptr_t)next, nextsize);
345 GC_unmap((ptr_t)h, size);
346 hhdr -> hb_flags |= WAS_UNMAPPED;
348 } else if (IS_MAPPED(nexthdr)) {
349 GC_ASSERT(!IS_MAPPED(hhdr));
350 if (size > nextsize) {
351 GC_unmap((ptr_t)next, nextsize);
353 GC_remap((ptr_t)h, size);
354 hhdr -> hb_flags &= ~WAS_UNMAPPED;
357 /* Unmap any gap in the middle */
358 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
360 /* If they are both unmapped, we merge, but leave unmapped. */
361 GC_remove_from_fl(hhdr, i);
362 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
363 hhdr -> hb_sz += nexthdr -> hb_sz;
364 GC_remove_header(next);
365 GC_add_to_fl(h, hhdr);
366 /* Start over at beginning of list */
367 h = GC_hblkfreelist[i];
368 } else /* not mergable with successor */ {
371 } /* while (h != 0) ... */
375 #endif /* USE_MUNMAP */
378 * Return a pointer to a block starting at h of length bytes.
379 * Memory for the block is mapped.
380 * Remove the block from its free list, and return the remainder (if any)
381 * to its appropriate free list.
382 * May fail by returning 0.
383 * The header for the returned block must be set up by the caller.
384 * If the return value is not 0, then hhdr is the header for it.
386 struct hblk * GC_get_first_part(h, hhdr, bytes, index)
392 word total_size = hhdr -> hb_sz;
396 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
397 GC_remove_from_fl(hhdr, index);
398 if (total_size == bytes) return h;
399 rest = (struct hblk *)((word)h + bytes);
400 if (!GC_install_header(rest)) return(0);
401 rest_hdr = HDR(rest);
402 rest_hdr -> hb_sz = total_size - bytes;
403 rest_hdr -> hb_flags = 0;
404 # ifdef GC_ASSERTIONS
405 // Mark h not free, to avoid assertion about adjacent free blocks.
408 GC_add_to_fl(rest, rest_hdr);
413 * H is a free block. N points at an address inside it.
414 * A new header for n has already been set up. Fix up h's header
415 * to reflect the fact that it is being split, move it to the
416 * appropriate free list.
417 * N replaces h in the original free list.
419 * Nhdr is not completely filled in, since it is about to allocated.
420 * It may in fact end up on the wrong free list for its size.
421 * (Hence adding it to a free list is silly. But this path is hopefully
422 * rare enough that it doesn't matter. The code is cleaner this way.)
424 void GC_split_block(h, hhdr, n, nhdr, index)
429 int index; /* Index of free list */
431 word total_size = hhdr -> hb_sz;
432 word h_size = (word)n - (word)h;
433 struct hblk *prev = hhdr -> hb_prev;
434 struct hblk *next = hhdr -> hb_next;
436 /* Replace h with n on its freelist */
437 nhdr -> hb_prev = prev;
438 nhdr -> hb_next = next;
439 nhdr -> hb_sz = total_size - h_size;
440 nhdr -> hb_flags = 0;
442 HDR(prev) -> hb_next = n;
444 GC_hblkfreelist[index] = n;
447 HDR(next) -> hb_prev = n;
449 # ifdef GC_ASSERTIONS
450 nhdr -> hb_map = 0; /* Don't fail test for consecutive */
451 /* free blocks in GC_add_to_fl. */
454 hhdr -> hb_last_reclaimed = GC_gc_no;
456 hhdr -> hb_sz = h_size;
457 GC_add_to_fl(h, hhdr);
458 GC_invalidate_map(nhdr);
461 struct hblk * GC_allochblk_nth();
464 * Allocate (and return pointer to) a heap block
465 * for objects of size sz words, searching the nth free list.
467 * NOTE: We set obj_map field in header correctly.
468 * Caller is responsible for building an object freelist in block.
470 * We clear the block if it is destined for large objects, and if
471 * kind requires that newly allocated objects be cleared.
474 GC_allochblk(sz, kind, flags)
477 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
479 int start_list = GC_hblk_fl_from_blocks(OBJ_SZ_TO_BLOCKS(sz));
481 for (i = start_list; i <= N_HBLK_FLS; ++i) {
482 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
483 if (0 != result) return result;
488 * The same, but with search restricted to nth free list.
491 GC_allochblk_nth(sz, kind, flags, n)
494 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
497 register struct hblk *hbp;
498 register hdr * hhdr; /* Header corr. to hbp */
499 register struct hblk *thishbp;
500 register hdr * thishdr; /* Header corr. to hbp */
501 signed_word size_needed; /* number of bytes in requested objects */
502 signed_word size_avail; /* bytes available in this block */
504 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
506 /* search for a big enough block in free list */
507 hbp = GC_hblkfreelist[n];
509 for(; 0 != hbp; hbp = hhdr -> hb_next, hhdr = HDR(hbp)) {
510 size_avail = hhdr->hb_sz;
511 if (size_avail < size_needed) continue;
512 # ifdef PRESERVE_LAST
513 if (size_avail != size_needed
514 && !GC_incremental && GC_should_collect()) {
518 /* If the next heap block is obviously better, go on. */
519 /* This prevents us from disassembling a single large block */
520 /* to get tiny blocks. */
522 signed_word next_size;
524 thishbp = hhdr -> hb_next;
526 thishdr = HDR(thishbp);
527 next_size = (signed_word)(thishdr -> hb_sz);
528 if (next_size < size_avail
529 && next_size >= size_needed
530 && !GC_is_black_listed(thishbp, (word)size_needed)) {
535 if ( !IS_UNCOLLECTABLE(kind) &&
536 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
537 struct hblk * lasthbp = hbp;
538 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
539 signed_word orig_avail = size_avail;
540 signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
545 while ((ptr_t)lasthbp <= search_end
546 && (thishbp = GC_is_black_listed(lasthbp,
547 (word)eff_size_needed))) {
550 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
552 if (size_avail >= size_needed) {
553 if (thishbp != hbp && GC_install_header(thishbp)) {
554 /* Make sure it's mapped before we mangle it. */
556 if (!IS_MAPPED(hhdr)) {
557 GC_remap((ptr_t)hbp, size_avail);
558 hhdr -> hb_flags &= ~WAS_UNMAPPED;
561 /* Split the block at thishbp */
562 thishdr = HDR(thishbp);
563 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
564 /* Advance to thishbp */
567 /* We must now allocate thishbp, since it may */
568 /* be on the wrong free list. */
570 } else if (size_needed > (signed_word)BL_LIMIT
571 && orig_avail - size_needed
572 > (signed_word)BL_LIMIT) {
573 /* Punt, since anything else risks unreasonable heap growth. */
574 WARN("Needed to allocate blacklisted block at 0x%lx\n",
576 size_avail = orig_avail;
577 } else if (size_avail == 0 && size_needed == HBLKSIZE
578 && IS_MAPPED(hhdr)) {
580 static unsigned count = 0;
582 /* The block is completely blacklisted. We need */
583 /* to drop some such blocks, since otherwise we spend */
584 /* all our time traversing them if pointerfree */
585 /* blocks are unpopular. */
586 /* A dropped block will be reconsidered at next GC. */
587 if ((++count & 3) == 0) {
588 /* Allocate and drop the block in small chunks, to */
589 /* maximize the chance that we will recover some */
591 word total_size = hhdr -> hb_sz;
592 struct hblk * limit = hbp + divHBLKSZ(total_size);
594 struct hblk * prev = hhdr -> hb_prev;
596 GC_words_wasted += total_size;
597 GC_large_free_bytes -= total_size;
598 GC_remove_from_fl(hhdr, n);
599 for (h = hbp; h < limit; h++) {
600 if (h == hbp || GC_install_header(h)) {
604 BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
605 PTRFREE, 0); /* Cant fail */
606 if (GC_debugging_started) {
607 BZERO(h + HDR_BYTES, HBLKSIZE - HDR_BYTES);
611 /* Restore hbp to point at free block */
614 return GC_allochblk_nth(sz, kind, flags, n);
621 if( size_avail >= size_needed ) {
623 if (!IS_MAPPED(hhdr)) {
624 GC_remap((ptr_t)hbp, size_avail);
625 hhdr -> hb_flags &= ~WAS_UNMAPPED;
628 /* hbp may be on the wrong freelist; the parameter n */
630 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
635 if (0 == hbp) return 0;
637 /* Notify virtual dirty bit implementation that we are about to write. */
640 /* Add it to map of valid blocks */
641 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
642 /* This leaks memory under very rare conditions. */
645 if (!setup_header(hhdr, sz, kind, flags)) {
646 GC_remove_counts(hbp, (word)size_needed);
647 return(0); /* ditto */
650 /* Clear block if necessary */
651 if (GC_debugging_started
652 || sz > MAXOBJSZ && GC_obj_kinds[kind].ok_init) {
653 BZERO(hbp + HDR_BYTES, size_needed - HDR_BYTES);
656 /* We just successfully allocated a block. Restart count of */
657 /* consecutive failures. */
659 extern unsigned GC_fail_count;
664 GC_large_free_bytes -= size_needed;
666 GC_ASSERT(IS_MAPPED(hhdr));
670 struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
675 * Coalesce the block with its neighbors if possible.
677 * All mark words are assumed to be cleared.
683 struct hblk *next, *prev;
684 hdr *hhdr, *prevhdr, *nexthdr;
690 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
691 GC_remove_counts(hbp, (word)size);
694 /* Check for duplicate deallocation in the easy case */
695 if (HBLK_IS_FREE(hhdr)) {
696 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
697 (unsigned long) hbp);
700 GC_ASSERT(IS_MAPPED(hhdr));
701 GC_invalidate_map(hhdr);
702 next = (struct hblk *)((word)hbp + size);
704 prev = GC_free_block_ending_at(hbp);
705 /* Coalesce with successor, if possible */
706 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
707 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
708 hhdr -> hb_sz += nexthdr -> hb_sz;
709 GC_remove_header(next);
711 /* Coalesce with predecessor, if possible. */
714 if (IS_MAPPED(prevhdr)) {
715 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
716 prevhdr -> hb_sz += hhdr -> hb_sz;
717 GC_remove_header(hbp);
723 GC_large_free_bytes += size;
724 GC_add_to_fl(hbp, hhdr);