2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
15 /* An incomplete test for the garbage collector. */
16 /* Some more obscure entry points are not tested at all. */
18 # if defined(mips) && defined(SYSTYPE_BSD43)
24 # include <assert.h> /* Not normally used, but handy for debugging. */
26 # include "gc_typed.h"
27 # include "gc_priv.h" /* For output, locking, and some statistics */
28 # include "gcconfig.h"
35 # include "th/PCR_ThCrSec.h"
36 # include "th/PCR_Th.h"
38 # define GC_printf0 printf
40 # define GC_printf1 printf
43 # ifdef SOLARIS_THREADS
48 # if defined(IRIX_THREADS) || defined(LINUX_THREADS)
54 static CRITICAL_SECTION incr_cs;
56 # if defined(PCR) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS)
61 long __stack = 200000;
64 # define FAIL (void)abort()
66 /* AT_END may be defined to excercise the interior pointer test */
67 /* if the collector is configured with ALL_INTERIOR_POINTERS. */
68 /* As it stands, this test should succeed with either */
69 /* configuration. In the FIND_LEAK configuration, it should */
70 /* find lots of leaks, since we free almost nothing. */
73 struct SEXPR * sexpr_car;
74 struct SEXPR * sexpr_cdr;
78 typedef struct SEXPR * sexpr;
80 # define INT_TO_SEXPR(x) ((sexpr)(unsigned long)(x))
83 # define nil (INT_TO_SEXPR(0))
84 # define car(x) ((x) -> sexpr_car)
85 # define cdr(x) ((x) -> sexpr_cdr)
86 # define is_nil(x) ((x) == nil)
89 int extra_count = 0; /* Amount of space wasted in cons node */
91 /* Silly implementation of Lisp cons. Intentionally wastes lots of space */
92 /* to test collector. */
99 register int my_extra = extra_count;
101 r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
103 (void)GC_printf0("Out of memory\n");
107 ((char *)p) < ((char *)r) + my_extra + sizeof(struct SEXPR); p++) {
109 (void)GC_printf1("Found nonzero at 0x%lx - allocator is broken\n",
116 r = (sexpr)((char *)r + (my_extra & ~7));
121 if ( my_extra >= 5000 ) {
124 extra_count = my_extra;
126 GC_END_STUBBORN_CHANGE((char *)r);
130 sexpr small_cons (x, y)
136 r = (sexpr) GC_MALLOC(sizeof(struct SEXPR));
138 (void)GC_printf0("Out of memory\n");
146 sexpr small_cons_uncollectable (x, y)
152 r = (sexpr) GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
153 assert(GC_is_marked(r));
155 (void)GC_printf0("Out of memory\n");
159 r -> sexpr_cdr = (sexpr)(~(unsigned long)y);
163 /* Return reverse(x) concatenated with y */
170 return( reverse1(cdr(x), cons(car(x), y)) );
177 return( reverse1(x, nil) );
186 return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
190 /* To check uncollectable allocation we build lists with disguised cdr */
191 /* pointers, and make sure they don't go away. */
192 sexpr uncollectable_ints(low, up)
198 return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
199 uncollectable_ints(low+1, up)));
203 void check_ints(list, low, up)
207 if ((int)(GC_word)(car(car(list))) != low) {
209 "List reversal produced incorrect list - collector is broken\n");
213 if (cdr(list) != nil) {
214 (void)GC_printf0("List too long - collector is broken\n");
218 check_ints(cdr(list), low+1, up);
222 # define UNCOLLECTABLE_CDR(x) (sexpr)(~(unsigned long)(cdr(x)))
224 void check_uncollectable_ints(list, low, up)
228 assert(GC_is_marked(list));
229 if ((int)(GC_word)(car(car(list))) != low) {
231 "Uncollectable list corrupted - collector is broken\n");
235 if (UNCOLLECTABLE_CDR(list) != nil) {
236 (void)GC_printf0("Uncollectable list too long - collector is broken\n");
240 check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
244 /* Not used, but useful for debugging: */
245 void print_int_list(x)
249 (void)GC_printf0("NIL\n");
251 (void)GC_printf1("(%ld)", (long)(car(car(x))));
252 if (!is_nil(cdr(x))) {
253 (void)GC_printf0(", ");
254 (void)print_int_list(cdr(x));
256 (void)GC_printf0("\n");
261 /* Try to force a to be strangely aligned */
269 * A tiny list reversal test to check thread creation.
273 # ifdef WIN32_THREADS
274 unsigned __stdcall tiny_reverse_test(void * arg)
276 void * tiny_reverse_test(void * arg)
279 check_ints(reverse(reverse(ints(1,10))), 1, 10);
283 # if defined(IRIX_THREADS) || defined(LINUX_THREADS) \
284 || defined(SOLARIS_PTHREADS)
289 if ((code = pthread_create(&t, 0, tiny_reverse_test, 0)) != 0) {
290 (void)GC_printf1("Small thread creation failed %lu\n",
291 (unsigned long)code);
294 if ((code = pthread_join(t, 0)) != 0) {
295 (void)GC_printf1("Small thread join failed %lu\n",
296 (unsigned long)code);
301 # elif defined(WIN32_THREADS)
306 h = (HANDLE)_beginthreadex(NULL, 0, tiny_reverse_test,
308 if (h == (HANDLE)-1) {
309 (void)GC_printf1("Small thread creation failed %lu\n",
310 (unsigned long)GetLastError());
313 if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
314 (void)GC_printf1("Small thread wait failed %lu\n",
315 (unsigned long)GetLastError());
320 /* # elif defined(SOLARIS_THREADS) */
324 # define fork_a_thread()
330 # define fork_a_thread()
335 * Repeatedly reverse lists built out of very different sized cons cells.
336 * Check that we didn't lose anything.
346 # if defined(MSWIN32) || defined(MACOS)
347 /* Win32S only allows 128K stacks */
351 /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
362 d = uncollectable_ints(1, 100);
363 e = uncollectable_ints(1, 1);
364 /* Check that realloc updates object descriptors correctly */
365 f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
366 f = (sexpr *)GC_REALLOC((GC_PTR)f, 6 * sizeof(sexpr));
368 g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
369 g = (sexpr *)GC_REALLOC((GC_PTR)g, 800 * sizeof(sexpr));
371 h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
372 h = (sexpr *)GC_REALLOC((GC_PTR)h, 2000 * sizeof(sexpr));
373 h[1999] = ints(1,19);
374 /* Try to force some collections and reuse of small list elements */
375 for (i = 0; i < 10; i++) {
378 /* Superficially test interior pointer recognition on stack */
379 c = (sexpr)((char *)c + sizeof(char *));
380 d = (sexpr)((char *)d + sizeof(char *));
389 for (i = 0; i < 50; i++) {
391 b = reverse(reverse(b));
395 for (i = 0; i < 60; i++) {
396 if (i % 10 == 0) fork_a_thread();
397 /* This maintains the invariant that a always points to a list of */
398 /* 49 integers. Thus this is thread safe without locks, */
399 /* assuming atomic pointer assignments. */
400 a = reverse(reverse(a));
401 # if !defined(AT_END) && !defined(THREADS)
402 /* This is not thread safe, since realloc explicitly deallocates */
404 a = (sexpr)GC_REALLOC((GC_PTR)a, 500);
406 a = (sexpr)GC_REALLOC((GC_PTR)a, 8200);
412 c = (sexpr)((char *)c - sizeof(char *));
413 d = (sexpr)((char *)d - sizeof(char *));
415 check_uncollectable_ints(d, 1, 100);
416 check_ints(f[5], 1,17);
417 check_ints(g[799], 1,18);
418 check_ints(h[1999], 1,19);
426 * The rest of this builds balanced binary trees, checks that they don't
427 * disappear, and tests finalization.
429 typedef struct treenode {
431 struct treenode * lchild;
432 struct treenode * rchild;
435 int finalizable_count = 0;
436 int finalized_count = 0;
437 VOLATILE int dropped_something = 0;
440 void finalizer(void * obj, void * client_data)
442 void finalizer(obj, client_data)
450 PCR_ThCrSec_EnterSys();
452 # ifdef SOLARIS_THREADS
453 static mutex_t incr_lock;
454 mutex_lock(&incr_lock);
456 # if defined(IRIX_THREADS) || defined(LINUX_THREADS)
457 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
458 pthread_mutex_lock(&incr_lock);
460 # ifdef WIN32_THREADS
461 EnterCriticalSection(&incr_cs);
463 if ((int)(GC_word)client_data != t -> level) {
464 (void)GC_printf0("Wrong finalization data - collector is broken\n");
469 PCR_ThCrSec_ExitSys();
471 # ifdef SOLARIS_THREADS
472 mutex_unlock(&incr_lock);
474 # if defined(IRIX_THREADS) || defined(LINUX_THREADS)
475 pthread_mutex_unlock(&incr_lock);
477 # ifdef WIN32_THREADS
478 LeaveCriticalSection(&incr_cs);
484 # define MAX_FINALIZED 8000
487 GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
489 /* Too big for THINK_C. have to allocate it dynamically. */
490 GC_word *live_indicators = 0;
493 int live_indicators_count = 0;
498 tn * result = (tn *)GC_MALLOC(sizeof(tn));
501 /* get around static data limitations. */
502 if (!live_indicators)
504 (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
505 if (!live_indicators) {
506 (void)GC_printf0("Out of memory\n");
510 if (n == 0) return(0);
512 (void)GC_printf0("Out of memory\n");
516 result -> lchild = mktree(n-1);
517 result -> rchild = mktree(n-1);
518 if (counter++ % 17 == 0 && n >= 2) {
519 tn * tmp = result -> lchild -> rchild;
521 result -> lchild -> rchild = result -> rchild -> lchild;
522 result -> rchild -> lchild = tmp;
524 if (counter++ % 119 == 0) {
529 PCR_ThCrSec_EnterSys();
531 # ifdef SOLARIS_THREADS
532 static mutex_t incr_lock;
533 mutex_lock(&incr_lock);
535 # if defined(IRIX_THREADS) || defined(LINUX_THREADS)
536 static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
537 pthread_mutex_lock(&incr_lock);
539 # ifdef WIN32_THREADS
540 EnterCriticalSection(&incr_cs);
542 /* Losing a count here causes erroneous report of failure. */
544 my_index = live_indicators_count++;
546 PCR_ThCrSec_ExitSys();
548 # ifdef SOLARIS_THREADS
549 mutex_unlock(&incr_lock);
551 # if defined(IRIX_THREADS) || defined(LINUX_THREADS)
552 pthread_mutex_unlock(&incr_lock);
554 # ifdef WIN32_THREADS
555 LeaveCriticalSection(&incr_cs);
559 GC_REGISTER_FINALIZER((GC_PTR)result, finalizer, (GC_PTR)(GC_word)n,
560 (GC_finalization_proc *)0, (GC_PTR *)0);
561 if (my_index >= MAX_FINALIZED) {
562 GC_printf0("live_indicators overflowed\n");
565 live_indicators[my_index] = 13;
566 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
567 (GC_PTR *)(&(live_indicators[my_index])),
568 (GC_PTR)result) != 0) {
569 GC_printf0("GC_general_register_disappearing_link failed\n");
572 if (GC_unregister_disappearing_link(
574 (&(live_indicators[my_index]))) == 0) {
575 GC_printf0("GC_unregister_disappearing_link failed\n");
578 if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
579 (GC_PTR *)(&(live_indicators[my_index])),
580 (GC_PTR)result) != 0) {
581 GC_printf0("GC_general_register_disappearing_link failed 2\n");
592 if (n == 0 && t != 0) {
593 (void)GC_printf0("Clobbered a leaf - collector is broken\n");
597 if (t -> level != n) {
598 (void)GC_printf1("Lost a node at level %lu - collector is broken\n",
602 if (counter++ % 373 == 0) (void) GC_MALLOC(counter%5001);
603 chktree(t -> lchild, n-1);
604 if (counter++ % 73 == 0) (void) GC_MALLOC(counter%373);
605 chktree(t -> rchild, n-1);
608 # if defined(SOLARIS_THREADS) && !defined(_SOLARIS_PTHREADS)
613 # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
614 return(GC_MALLOC(8));
616 void ** my_free_list_ptr;
619 if (thr_getspecific(fl_key, (void **)(&my_free_list_ptr)) != 0) {
620 (void)GC_printf0("thr_getspecific failed\n");
623 if (my_free_list_ptr == 0) {
624 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
625 if (thr_setspecific(fl_key, my_free_list_ptr) != 0) {
626 (void)GC_printf0("thr_setspecific failed\n");
630 my_free_list = *my_free_list_ptr;
631 if (my_free_list == 0) {
632 my_free_list = GC_malloc_many(8);
633 if (my_free_list == 0) {
634 (void)GC_printf0("alloc8bytes out of memory\n");
638 *my_free_list_ptr = GC_NEXT(my_free_list);
639 GC_NEXT(my_free_list) = 0;
640 return(my_free_list);
646 # if defined(_SOLARIS_PTHREADS) || defined(IRIX_THREADS) \
647 || defined(LINUX_THREADS)
648 pthread_key_t fl_key;
653 return(GC_malloc(8));
655 void ** my_free_list_ptr;
658 my_free_list_ptr = (void **)pthread_getspecific(fl_key);
659 if (my_free_list_ptr == 0) {
660 my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
661 if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
662 (void)GC_printf0("pthread_setspecific failed\n");
666 my_free_list = *my_free_list_ptr;
667 if (my_free_list == 0) {
668 my_free_list = GC_malloc_many(8);
669 if (my_free_list == 0) {
670 (void)GC_printf0("alloc8bytes out of memory\n");
674 *my_free_list_ptr = GC_NEXT(my_free_list);
675 GC_NEXT(my_free_list) = 0;
676 return(my_free_list);
681 # define alloc8bytes() GC_MALLOC_ATOMIC(8)
690 for (i = 0; i < n; i += 8) {
691 if (alloc8bytes() == 0) {
692 (void)GC_printf0("Out of memory\n");
698 # if defined(THREADS) && defined(GC_DEBUG)
699 # define TREE_HEIGHT 15
701 # define TREE_HEIGHT 16
708 root = mktree(TREE_HEIGHT);
709 alloc_small(5000000);
710 chktree(root, TREE_HEIGHT);
711 if (finalized_count && ! dropped_something) {
712 (void)GC_printf0("Premature finalization - collector is broken\n");
715 dropped_something = 1;
716 GC_noop(root); /* Root needs to remain live until */
717 /* dropped_something is set. */
718 root = mktree(TREE_HEIGHT);
719 chktree(root, TREE_HEIGHT);
720 for (i = TREE_HEIGHT; i >= 0; i--) {
724 alloc_small(5000000);
727 unsigned n_tests = 0;
729 GC_word bm_huge[10] = {
743 /* A very simple test of explicitly typed allocation */
746 GC_word * old, * new;
749 GC_word bm_large = 0xf7ff7fff;
750 GC_descr d1 = GC_make_descriptor(&bm3, 2);
751 GC_descr d2 = GC_make_descriptor(&bm2, 2);
753 GC_descr dummy = GC_make_descriptor(&bm_large, 32);
755 GC_descr d3 = GC_make_descriptor(&bm_large, 32);
756 GC_descr d4 = GC_make_descriptor(bm_huge, 320);
757 GC_word * x = (GC_word *)GC_malloc_explicitly_typed(2000, d4);
761 for (i = 0; i < 4000; i++) {
762 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
764 new[1] = (GC_word)old;
766 new = (GC_word *) GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
768 new[1] = (GC_word)old;
770 new = (GC_word *) GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
772 new[1] = (GC_word)old;
774 new = (GC_word *) GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
777 new[1] = (GC_word)old;
780 new = (GC_word *) GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
783 new = (GC_word *) GC_calloc_explicitly_typed(1001,
788 new[1] = (GC_word)old;
791 for (i = 0; i < 20000; i++) {
793 (void)GC_printf1("typed alloc failed at %lu\n",
799 new = (GC_word *)(old[1]);
818 void fail_proc1(GC_PTR x)
823 #endif /* __STDC__ */
826 # define TEST_FAIL_COUNT(n) 1
828 # define TEST_FAIL_COUNT(n) (fail_count >= (n))
837 char *y = (char *)(size_t)fail_proc1;
843 "This test program is not designed for leak detection mode\n");
844 (void)GC_printf0("Expect lots of problems.\n");
846 if (GC_size(GC_malloc(7)) != 8
847 || GC_size(GC_malloc(15)) != 16) {
848 (void)GC_printf0("GC_size produced unexpected results\n");
851 if (GC_size(GC_malloc(0)) != 4 && GC_size(GC_malloc(0)) != 8) {
852 (void)GC_printf0("GC_malloc(0) failed\n");
855 if (GC_size(GC_malloc_uncollectable(0)) != 4
856 && GC_size(GC_malloc_uncollectable(0)) != 8) {
857 (void)GC_printf0("GC_malloc_uncollectable(0) failed\n");
860 GC_is_valid_displacement_print_proc = fail_proc1;
861 GC_is_visible_print_proc = fail_proc1;
863 if (GC_base(x + 13) != x) {
864 (void)GC_printf0("GC_base(heap ptr) produced incorrect result\n");
868 if (GC_base(y) != 0) {
869 (void)GC_printf0("GC_base(fn_ptr) produced incorrect result\n");
873 if (GC_same_obj(x+5, x) != x + 5) {
874 (void)GC_printf0("GC_same_obj produced incorrect result\n");
877 if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
878 (void)GC_printf0("GC_is_visible produced incorrect result\n");
881 if (!TEST_FAIL_COUNT(1)) {
882 # if!(defined(RS6000) || defined(POWERPC))
883 /* ON RS6000s function pointers point to a descriptor in the */
884 /* data segment, so there should have been no failures. */
885 (void)GC_printf0("GC_is_visible produced wrong failure indication\n");
889 if (GC_is_valid_displacement(y) != y
890 || GC_is_valid_displacement(x) != x
891 || GC_is_valid_displacement(x + 3) != x + 3) {
893 "GC_is_valid_displacement produced incorrect result\n");
896 # ifndef ALL_INTERIOR_POINTERS
897 # if defined(RS6000) || defined(POWERPC)
898 if (!TEST_FAIL_COUNT(1)) {
900 if (!TEST_FAIL_COUNT(2)) {
902 (void)GC_printf0("GC_is_valid_displacement produced wrong failure indication\n");
906 /* Test floating point alignment */
907 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
908 *(double *)GC_MALLOC(sizeof(double)) = 1.0;
909 /* Repeated list reversal test. */
912 GC_printf0("-------------Finished reverse_test\n");
916 GC_printf0("-------------Finished typed_test\n");
922 /* GC_printf1("Finished %x\n", pthread_self()); */
925 void check_heap_stats()
927 unsigned long max_heap_sz;
930 int late_finalize_count = 0;
932 if (sizeof(char *) > 4) {
933 max_heap_sz = 15000000;
935 max_heap_sz = 11000000;
943 /* Garbage collect repeatedly so that all inaccessible objects */
944 /* can be finalized. */
945 while (GC_collect_a_little()) { }
946 for (i = 0; i < 16; i++) {
948 late_finalize_count += GC_invoke_finalizers();
950 (void)GC_printf1("Completed %lu tests\n", (unsigned long)n_tests);
951 (void)GC_printf2("Finalized %lu/%lu objects - ",
952 (unsigned long)finalized_count,
953 (unsigned long)finalizable_count);
954 # ifdef FINALIZE_ON_DEMAND
955 if (finalized_count != late_finalize_count) {
956 (void)GC_printf0("Demand finalization error\n");
960 if (finalized_count > finalizable_count
961 || finalized_count < finalizable_count/2) {
962 (void)GC_printf0("finalization is probably broken\n");
965 (void)GC_printf0("finalization is probably ok\n");
968 for (i = 0; i < MAX_FINALIZED; i++) {
969 if (live_indicators[i] != 0) {
973 i = finalizable_count - finalized_count - still_live;
976 ("%lu disappearing links remain and %lu more objects were not finalized\n",
977 (unsigned long) still_live, (unsigned long)i);
979 GC_printf0("\tVery suspicious!\n");
981 GC_printf0("\tSlightly suspicious, but probably OK.\n");
984 (void)GC_printf1("Total number of bytes allocated is %lu\n",
986 WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc));
987 (void)GC_printf1("Final heap size is %lu bytes\n",
988 (unsigned long)GC_get_heap_size());
989 if (WORDS_TO_BYTES(GC_words_allocd + GC_words_allocd_before_gc)
990 < 33500000*n_tests) {
991 (void)GC_printf0("Incorrect execution - missed some allocations\n");
994 if (GC_get_heap_size() > max_heap_sz*n_tests) {
995 (void)GC_printf0("Unexpected heap growth - collector may be broken\n");
998 (void)GC_printf0("Collector appears to work\n");
1002 void SetMinimumStack(long minSize)
1006 if (minSize > LMGetDefltStack())
1008 newApplLimit = (long) GetApplLimit()
1009 - (minSize - LMGetDefltStack());
1010 SetApplLimit((Ptr) newApplLimit);
1015 #define cMinStackSpace (512L * 1024L)
1020 void warn_proc(char *msg, GC_word p)
1022 void warn_proc(msg, p)
1027 GC_printf1(msg, (unsigned long)p);
1032 #if !defined(PCR) && !defined(SOLARIS_THREADS) && !defined(WIN32_THREADS) \
1033 && !defined(IRIX_THREADS) && !defined(LINUX_THREADS) || defined(LINT)
1035 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
1046 /* No good way to determine stack base from library; do it */
1047 /* manually on this platform. */
1048 GC_stackbottom = (GC_PTR)(&dummy);
1051 /* Make sure we have lots and lots of stack space. */
1052 SetMinimumStack(cMinStackSpace);
1053 /* Cheat and let stdio initialize toolbox for us. */
1054 printf("Testing GC Macintosh port.\n");
1056 GC_INIT(); /* Only needed if gc is dynamic library. */
1057 (void) GC_set_warn_proc(warn_proc);
1058 # if defined(MPROTECT_VDB) || defined(PROC_VDB)
1059 GC_enable_incremental();
1060 (void) GC_printf0("Switched to incremental mode\n");
1061 # if defined(MPROTECT_VDB)
1062 (void)GC_printf0("Emulating dirty bits with mprotect/signals\n");
1064 (void)GC_printf0("Reading dirty bits from /proc\n");
1069 (void)fflush(stdout);
1071 /* Entry points we should be testing, but aren't. */
1072 /* Some can be tested by defining GC_DEBUG at the top of this file */
1073 /* This is a bit SunOS4 specific. */
1074 GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
1075 GC_register_disappearing_link,
1076 GC_register_finalizer_ignore_self,
1077 GC_debug_register_displacement,
1078 GC_print_obj, GC_debug_change_stubborn,
1079 GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
1080 GC_debug_free, GC_debug_realloc, GC_generic_malloc_words_small,
1081 GC_init, GC_make_closure, GC_debug_invoke_finalizer,
1082 GC_page_was_ever_dirty, GC_is_fresh,
1083 GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page,
1084 GC_set_max_heap_size, GC_get_bytes_since_gc,
1085 GC_pre_incr, GC_post_incr);
1088 GC_win32_free_heap();
1094 #ifdef WIN32_THREADS
1096 unsigned __stdcall thr_run_one_test(void *arg)
1104 int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
1112 GC_enable_incremental();
1114 InitializeCriticalSection(&incr_cs);
1115 (void) GC_set_warn_proc(warn_proc);
1116 for (i = 0; i < NTEST; i++) {
1117 h[i] = (HANDLE)_beginthreadex(NULL, 0, thr_run_one_test, 0, 0, &thread_id);
1118 if (h[i] == (HANDLE)-1) {
1119 (void)GC_printf1("Thread creation failed %lu\n", (unsigned long)GetLastError());
1124 for (i = 0; i < NTEST; i++)
1125 if (WaitForSingleObject(h[i], INFINITE) != WAIT_OBJECT_0) {
1126 (void)GC_printf1("Thread wait failed %lu\n", (unsigned long)GetLastError());
1130 (void)fflush(stdout);
1134 #endif /* WIN32_THREADS */
1145 /* GC_enable_incremental(); */
1146 (void) GC_set_warn_proc(warn_proc);
1147 th1 = PCR_Th_Fork(run_one_test, 0);
1148 th2 = PCR_Th_Fork(run_one_test, 0);
1150 if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1151 != PCR_ERes_okay || code != 0) {
1152 (void)GC_printf0("Thread 1 failed\n");
1154 if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
1155 != PCR_ERes_okay || code != 0) {
1156 (void)GC_printf0("Thread 2 failed\n");
1159 (void)fflush(stdout);
1164 #if defined(SOLARIS_THREADS) || defined(IRIX_THREADS) || defined(LINUX_THREADS)
1165 void * thr_run_one_test(void * arg)
1172 # define GC_free GC_debug_free
1175 #ifdef SOLARIS_THREADS
1183 GC_INIT(); /* Only needed if gc is dynamic library. */
1184 GC_enable_incremental();
1185 (void) GC_set_warn_proc(warn_proc);
1186 if (thr_keycreate(&fl_key, GC_free) != 0) {
1187 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
1190 if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, 0, &th1)) != 0) {
1191 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
1194 if ((code = thr_create(0, 1024*1024, thr_run_one_test, 0, THR_NEW_LWP, &th2)) != 0) {
1195 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
1199 if ((code = thr_join(th1, 0, 0)) != 0) {
1200 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
1203 if (thr_join(th2, 0, 0) != 0) {
1204 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
1208 (void)fflush(stdout);
1211 #else /* pthreads */
1216 pthread_attr_t attr;
1219 # ifdef IRIX_THREADS
1220 /* Force a larger stack to be preallocated */
1221 /* Since the initial cant always grow later. */
1222 *((volatile char *)&code - 1024*1024) = 0; /* Require 1 Mb */
1223 # endif /* IRIX_THREADS */
1224 pthread_attr_init(&attr);
1225 # ifdef IRIX_THREADS
1226 pthread_attr_setstacksize(&attr, 1000000);
1229 # ifdef MPROTECT_VDB
1230 GC_enable_incremental();
1231 (void) GC_printf0("Switched to incremental mode\n");
1232 (void) GC_printf0("Emulating dirty bits with mprotect/signals\n");
1234 (void) GC_set_warn_proc(warn_proc);
1235 if (pthread_key_create(&fl_key, 0) != 0) {
1236 (void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
1239 if ((code = pthread_create(&th1, &attr, thr_run_one_test, 0)) != 0) {
1240 (void)GC_printf1("Thread 1 creation failed %lu\n", (unsigned long)code);
1243 if ((code = pthread_create(&th2, &attr, thr_run_one_test, 0)) != 0) {
1244 (void)GC_printf1("Thread 2 creation failed %lu\n", (unsigned long)code);
1248 if ((code = pthread_join(th1, 0)) != 0) {
1249 (void)GC_printf1("Thread 1 failed %lu\n", (unsigned long)code);
1252 if (pthread_join(th2, 0) != 0) {
1253 (void)GC_printf1("Thread 2 failed %lu\n", (unsigned long)code);
1257 (void)fflush(stdout);
1258 pthread_attr_destroy(&attr);
1259 GC_printf1("Completed %d collections\n", GC_gc_no);
1262 #endif /* pthreads */
1263 #endif /* SOLARIS_THREADS || IRIX_THREADS || LINUX_THREADS */