pike.git / src / gc.c

version» Context lines:

pike.git/src/gc.c:1:   /*   || This file is part of Pike. For copyright information see COPYRIGHT.   || Pike is distributed under GPL, LGPL and MPL. See the file COPYING   || for more information. - || $Id: gc.c,v 1.329 2008/10/11 18:44:11 grubba Exp $ + || $Id: gc.c,v 1.330 2008/10/12 21:49:56 mast Exp $   */      #include "global.h"      struct callback *gc_evaluator_callback=0;      #include "array.h"   #include "multiset.h"   #include "mapping.h"   #include "object.h"
pike.git/src/gc.c:4206:    * First the starting point things are labelled internal and put into    * the work queue (mc_work_queue).    *    * mc_pass is set to MC_PASS_LOOKAHEAD:    *    * We do a breadth-first recursion through the things in the work    * queue until the lookahead count reaches zero, always starting with    * the things with the highest count.    *    * Every time we visit something we calculate its lookahead count as -  * either max (if it's found to be internal or a candidate), the same -  * as the source thing (if the followed ref is REF_TYPE_INTERNAL), or -  * the next lower count (otherwise). If the count is above zero and -  * the thing is either new or its old count was lower, it's added to -  * the work list. +  * either max (if it's found to be referenced from an internal or +  * candidate thing), the same as the source thing (if the followed ref +  * is REF_TYPE_INTERNAL), or the next lower count (otherwise). If the +  * count is above zero and the thing is either new or its old count +  * was lower, it's added to the work list.    *    * mc_work_queue is a priority queue which always has the thing with    * the highest lookahead count first, thereby ensuring breadth-first    * recursion also when things have their count raised.    *    * int_refs and la_refs are updated when things are visited. They    * become internal if int_refs add up to the refcount. Otherwise they    * are put in the incomplete or complete sets as appropriate.    *    * mc_pass is set to MC_PASS_MARK_EXTERNAL:    *    * At this point the set of lookahead things is complete (as far as we    * are concerned), and it's divided into complete and incomplete    * lookahead things. All references in the incomplete list are    * followed to build up the set of indirectly incomplete things. The    * incomplete and indirectly incomplete things are referenced    * externally and should not be memory counted.    *    * If there's anything left in the complete list then it's internal -  * cyclic stuff. In that case we put those things into the work list -  * at max lookahead count, move the indirectly incomplete list back to -  * complete and repeat MC_PASS_LOOKAHEAD. Otherwise we're done. +  * cyclic stuff. In that case we put those things into the work list, +  * move the indirectly incomplete list back to complete and repeat +  * MC_PASS_LOOKAHEAD. Otherwise we're done.    */      /* #define MEMORY_COUNT_DEBUG */      #define MC_WQ_START_SIZE 1024      PMOD_EXPORT int mc_pass;   PMOD_EXPORT size_t mc_counted_bytes;      static int mc_lookahead, mc_block_pike_cycle_depth; - static unsigned mc_count_revisits; +    static TYPE_FIELD mc_block_lookahead;   static TYPE_FIELD mc_block_lookahead_default = BIT_PROGRAM|BIT_STRING|BIT_TYPE;   /* Strings are blocked because they don't contain refs. Types are    * blocked because they are acyclic and don't contain refs to anything -  * else. */ +  * but strings and other types. */    -  + static int mc_enqueued_noninternal; + /* Set whenever something is enqueued in MC_PASS_LOOKAHEAD that isn't +  * internal already. This is used to detect whether another +  * MC_PASS_MARK_EXTERNAL is necessary. */ +    static unsigned mc_ext_toggle_bias = 0;      #define MC_PASS_LOOKAHEAD 1   #define MC_PASS_MARK_EXTERNAL 2    - /* Set in when the refs emanating from a thing have been counted. */ - #define MC_FLAG_REFCOUNTED 0x01 -  +    /* Set when a thing has become internal. */ - #define MC_FLAG_INTERNAL 0x02 + #define MC_FLAG_INTERNAL 0x01    -  + /* Set when an internal thing has been visited, i.e. after its refs +  * has been gone through for the first time. This implies that the +  * thing has been memory counted, and taken off mc_incomplete or +  * mc_complete if it was there. */ + #define MC_FLAG_INT_VISITED 0x02 +  + /* Set when a non-internal thing has been visited. If +  * MC_FLAG_INT_VISITED isn't then the thing is on one of mc_incomplete, +  * mc_complete, or (in MC_PASS_MARK_EXTERNAL) mc_indirect. */ + #define MC_FLAG_LA_VISITED 0x04 +    /* Set when a thing has become a candidate (i.e. complete and    * referenced directly from an internal or candidate thing). This    * flag is meaningless when MC_FLAG_INTERNAL is set. */ - #define MC_FLAG_CANDIDATE 0x04 + #define MC_FLAG_CANDIDATE 0x08    - /* Set when a thing is visited directly from a candidate thing. */ - #define MC_FLAG_CANDIDATE_REF 0x08 + /* Set when a thing is visited directly from an internal or candidate +  * thing. */ + #define MC_FLAG_CANDIDATE_REF 0x10    - /* Set when a thing has been called with VISIT_COUNT_BYTES (after the -  * visit function has returned). */ - #define MC_FLAG_MEMCOUNTED 0x10 + /* The lookahead count should not change. Use when it has been lowered +  * from pike_cycle_depth. */ + #define MC_FLAG_LA_COUNT_FIXED 0x20      /* A toggle flag to mark external (i.e. incomplete and indirectly    * incomplete) things in MC_PASS_MARK_EXTERNAL so that we don't    * recurse them repeatedly. If mc_ext_toggle_bias is zero then it's    * external if this is set. If mc_ext_toggle_bias is one then it's    * external if this is cleared. mc_ext_toggle_bias toggles every time    * we leave MC_PASS_MARK_EXTERNAL, thus we avoid the work to go    * through the externals clear the flag for the next round. */ - #define MC_FLAG_EXT_TOGGLE 0x20 + #define MC_FLAG_EXT_TOGGLE 0x40      /* The value of IS_EXTERNAL is meaningless when MC_FLAG_INTERNAL is set. */   #define IS_EXTERNAL(M) \    (((M)->flags ^ mc_ext_toggle_bias) & MC_FLAG_EXT_TOGGLE)      #define INIT_CLEARED_EXTERNAL(M) do { \    struct mc_marker *_m = (M); \    if (mc_ext_toggle_bias) _m->flags |= MC_FLAG_EXT_TOGGLE; \    } while (0)   #define FLAG_EXTERNAL(M) do { \
pike.git/src/gc.c:4342:    assert (thing);    assert (visit_fn);    m->thing = thing;    m->visit_fn = visit_fn;    m->extra = extra;    m->int_refs = m->la_refs = m->flags = 0;    INIT_CLEARED_EXTERNAL (m);    m->queuepos = MAX_UINT32;   #ifdef PIKE_DEBUG    m->dl_prev = m->dl_next = (void *) (ptrdiff_t) -1; -  m->la_count = 0; +  m->la_count = ((unsigned INT16) -1) >> 1;   #endif    return m;   }      #if defined (PIKE_DEBUG) || defined (MEMORY_COUNT_DEBUG)   static void describe_mc_marker (struct mc_marker *m)   {    fprintf (stderr, "%s %p: refs %d, int %d, la %d, cnt %d",    get_name_of_type (type_from_visit_fn (m->visit_fn)),    m->thing, *(INT32 *) m->thing, m->int_refs, m->la_refs, m->la_count);    if (m->queuepos != MAX_UINT32) fprintf (stderr, ", wq %u", m->queuepos); -  if (m->flags & MC_FLAG_REFCOUNTED) fputs (", RC", stderr); -  if (m->flags & MC_FLAG_INTERNAL) fputs (", INT", stderr); +  if (m->flags & MC_FLAG_INTERNAL) fputs (", I", stderr); +  if (m->flags & MC_FLAG_INT_VISITED) fputs (", IV", stderr); +  if (m->flags & MC_FLAG_LA_VISITED) fputs (", LAV", stderr);    if (m->flags & MC_FLAG_CANDIDATE) fputs (", C", stderr);    if (m->flags & MC_FLAG_CANDIDATE_REF) fputs (", CR", stderr); -  +  if (m->flags & MC_FLAG_LA_COUNT_FIXED) fputs (", CF", stderr);    if (IS_EXTERNAL (m)) -  fputs (m->flags & MC_FLAG_INTERNAL ? ", (EXT)" : ", EXT", stderr); -  if (m->flags & MC_FLAG_MEMCOUNTED) fputs (", MC", stderr); +  fputs (m->flags & MC_FLAG_INTERNAL ? ", (E)" : ", E", stderr);   }   #endif    - static struct mc_marker mc_incomplete = { +    /* Sentinel for the incomplete lookaheads list. */ -  + static struct mc_marker mc_incomplete = {    (void *) (ptrdiff_t) -1,    &mc_incomplete, &mc_incomplete,    (void *) (ptrdiff_t) -1, (visit_thing_fn *) (ptrdiff_t) -1,    (void *) (ptrdiff_t) -1,    -1, -1, MAX_UINT32, 0, (unsigned INT16) -1   };    -  + /* Sentinel for the complete lookaheads list. The reason all complete +  * things are tracked and not only the candidates is that elements +  * then can be easily moved to mc_indirect (and back) without special +  * cases when noncandidate complete things become indirectly +  * incomplete. */   static struct mc_marker mc_complete = { -  /* Sentinel for the complete lookaheads list. */ +     (void *) (ptrdiff_t) -1,    &mc_complete, &mc_complete,    (void *) (ptrdiff_t) -1, (visit_thing_fn *) (ptrdiff_t) -1,    (void *) (ptrdiff_t) -1,    -1, -1, MAX_UINT32, 0, (unsigned INT16) -1   };    - static struct mc_marker mc_indirect = { +    /* Sentinel for the indirectly incomplete lookaheads list. */ -  + static struct mc_marker mc_indirect = {    (void *) (ptrdiff_t) -1,    &mc_indirect, &mc_indirect,    (void *) (ptrdiff_t) -1, (visit_thing_fn *) (ptrdiff_t) -1,    (void *) (ptrdiff_t) -1,    -1, -1, MAX_UINT32, 0, (unsigned INT16) -1   };      #define DL_IS_EMPTY(LIST) (LIST.dl_next == &LIST)      #define DL_ADD_LAST(LIST, M) do { \
pike.git/src/gc.c:4417:    struct mc_marker *_list_prev = _m->dl_prev; \    struct mc_marker *_list_next = _m->dl_next; \    assert (_m->dl_prev != (void *) (ptrdiff_t) -1); \    assert (_m->dl_next != (void *) (ptrdiff_t) -1); \    _list_prev->dl_next = _list_next; \    _list_next->dl_prev = _list_prev; \    DO_IF_DEBUG (_m->dl_prev = _m->dl_next = (void *) (ptrdiff_t) -1); \    } while (0)      #define DL_MOVE(FROM_LIST, TO_LIST) do { \ -  assert (TO_LIST.dl_prev == &TO_LIST); \ -  assert (TO_LIST.dl_next == &TO_LIST); \ +     if (FROM_LIST.dl_next != &FROM_LIST) { \ -  +  struct mc_marker *to_list_last = TO_LIST.dl_prev; \    TO_LIST.dl_prev = FROM_LIST.dl_prev; \ -  TO_LIST.dl_next = FROM_LIST.dl_next; \ +  to_list_last->dl_next = FROM_LIST.dl_next; \    FROM_LIST.dl_prev->dl_next = &TO_LIST; \ -  FROM_LIST.dl_next->dl_prev = &TO_LIST; \ +  FROM_LIST.dl_next->dl_prev = to_list_last; \    FROM_LIST.dl_prev = FROM_LIST.dl_next = &FROM_LIST; \    } \    } while (0)      #define DL_MAKE_EMPTY(LIST) do { \    LIST.dl_prev = LIST.dl_next = &LIST; \    } while (0)      static struct mc_marker *mc_ref_from = (void *) (ptrdiff_t) -1;   
pike.git/src/gc.c:4533:      static void mc_wq_enqueue (struct mc_marker *m)   /* m may already be in the queue, provided that m->la_count isn't    * lower than its old value. */   {    struct mc_marker *n;    unsigned pos;    int m_la_count = m->la_count;       assert (mc_work_queue); + #ifdef PIKE_DEBUG +  assert (mc_lookahead < 0 || m_la_count != ((unsigned INT16) -1) >> 1); + #endif       if (m->queuepos != MAX_UINT32) {    assert (m->queuepos < mc_wq_used);    assert (m->queuepos * 2 >= mc_wq_used ||    m_la_count >= mc_work_queue[m->queuepos * 2]->la_count);    assert (m->queuepos * 2 + 1 >= mc_wq_used ||    m_la_count >= mc_work_queue[m->queuepos * 2 + 1]->la_count);    pos = m->queuepos;    }   
pike.git/src/gc.c:4618:    return -1;    }       return val.u.integer;   }      static void pass_lookahead_visit_ref (void *thing, int ref_type,    visit_thing_fn *visit_fn, void *extra)   {    struct mc_marker *ref_to = find_mc_marker (thing); -  int ref_from_flags; -  int old_la_count; /* -1 flags new ref_to. */ -  int new_cand_ref, enqueue = 0; -  TYPE_T type; +  int ref_from_flags, ref_to_flags, old_la_count, ref_to_la_count; +  int ref_added = 0, check_new_candidate = 0, la_count_handled = 0;    -  +  assert (mc_lookahead >= 0);    assert (mc_pass == MC_PASS_LOOKAHEAD);   #ifdef PIKE_DEBUG    assert (mc_ref_from != (void *) (ptrdiff_t) -1); -  +  assert (mc_ref_from->la_count != ((unsigned INT16) -1) >> 1);   #endif       ref_from_flags = mc_ref_from->flags;       /* Create mc_marker if necessary. */       if (!ref_to) {    ref_to = my_make_mc_marker (thing, visit_fn, extra);    MC_DEBUG_MSG (ref_to, "visiting new thing"); -  assert (!(ref_from_flags & MC_FLAG_REFCOUNTED)); -  ref_to->la_count = 0; -  old_la_count = -1; +  assert (!(ref_from_flags & (MC_FLAG_INT_VISITED | MC_FLAG_LA_VISITED))); +  ref_to_la_count = old_la_count = 0;    }    else if (ref_to->flags & MC_FLAG_INTERNAL) {    /* Ignore refs to internal things. Can't treat them like other    * things anyway since the int_refs aren't valid for the starting    * points. */    MC_DEBUG_MSG (ref_to, "ignored internal"); -  +  assert (ref_to->la_count != ((unsigned INT16) -1) >> 1);    return;    }    else {    MC_DEBUG_MSG (ref_to, "visiting old thing"); -  old_la_count = ref_to->la_count; +  ref_to_la_count = old_la_count = ref_to->la_count; +  assert (ref_to->la_count != ((unsigned INT16) -1) >> 1);    }    -  /* Update int_refs and la_refs. */ +  ref_to_flags = ref_to->flags;    -  if (ref_from_flags & MC_FLAG_INTERNAL) { -  if (!(ref_from_flags & MC_FLAG_REFCOUNTED)) { -  ref_to->int_refs++; -  MC_DEBUG_MSG (ref_to, "added internal ref"); -  } -  else { + #define SET_LA_COUNT_FOR_INT_OR_CF() do { \ +  if (!(ref_to_flags & MC_FLAG_LA_COUNT_FIXED)) { \ +  int cycle_depth; \ +  if (visit_fn == (visit_thing_fn *) &visit_object && \ +  !mc_block_pike_cycle_depth && \ +  (cycle_depth = \ +  mc_cycle_depth_from_obj ((struct object *) thing)) >= 0) { \ +  ref_to_la_count = cycle_depth; \ +  ref_to_flags |= MC_FLAG_LA_COUNT_FIXED; \ +  MC_DEBUG_MSG (ref_to, "la_count set to pike_cycle_depth"); \ +  } \ +  \ +  else { \ +  int count_from_source = ref_type & REF_TYPE_INTERNAL ? \ +  mc_ref_from->la_count : mc_ref_from->la_count - 1; \ +  \ +  if (ref_to_la_count < mc_lookahead) { \ +  ref_to_la_count = mc_lookahead; \ +  MC_DEBUG_MSG (ref_to, "la_count raised to mc_lookahead"); \ +  } \ +  \ +  if (ref_to_la_count < count_from_source) { \ +  ref_to_la_count = count_from_source; \ +  MC_DEBUG_MSG (ref_to, count_from_source == mc_ref_from->la_count ? \ +  "la_count raised to source count" : \ +  "la_count raised to source count - 1"); \ +  } \ +  } \ +  } \ +  } while (0) +  +  if ((ref_from_flags & (MC_FLAG_INTERNAL | MC_FLAG_INT_VISITED)) == +  MC_FLAG_INTERNAL) { +  if (ref_from_flags & MC_FLAG_LA_VISITED) {    /* mc_ref_from is a former lookahead thing that has become internal. */    assert (ref_to->la_refs > 0);    ref_to->la_refs--;    ref_to->int_refs++;    MC_DEBUG_MSG (ref_to, "converted lookahead ref to internal");    } -  +  else { +  ref_to->int_refs++; +  MC_DEBUG_MSG (ref_to, "added internal ref"); +  ref_added = 1;    } -  else -  if (!(ref_from_flags & MC_FLAG_REFCOUNTED)) { -  ref_to->la_refs++; -  MC_DEBUG_MSG (ref_to, "added lookahead ref"); -  } +     assert (ref_to->int_refs + ref_to->la_refs <= *(INT32 *) thing);    -  /* Update candidate ref. */ +  /* Handle the target becoming internal. */    -  if (ref_from_flags & (MC_FLAG_INTERNAL | MC_FLAG_CANDIDATE)) { -  ref_to->flags |= MC_FLAG_CANDIDATE_REF; -  new_cand_ref = 1; -  } -  else -  new_cand_ref = 0; +  if (ref_to->int_refs == *(INT32 *) thing) { +  assert (!(ref_to_flags & MC_FLAG_INTERNAL)); +  assert (!(ref_to_flags & MC_FLAG_INT_VISITED)); +  ref_to_flags |= MC_FLAG_INTERNAL;    -  /* Calculate new lookahead count from source thing. */ +  SET_LA_COUNT_FOR_INT_OR_CF();    -  if (ref_type & REF_TYPE_INTERNAL) { -  if (old_la_count < mc_ref_from->la_count) { -  ref_to->la_count = mc_ref_from->la_count; -  MC_DEBUG_MSG (ref_to, "lookahead raised to source count"); -  enqueue = 1; +  ref_to->flags = ref_to_flags; +  ref_to->la_count = ref_to_la_count; +  if (ref_to->queuepos != MAX_UINT32 && old_la_count == ref_to_la_count) +  MC_DEBUG_MSG (ref_to, "already in queue"); +  else { +  assert (ref_to->la_count >= old_la_count); +  mc_wq_enqueue (ref_to); +  MC_DEBUG_MSG (ref_to, "enqueued internal");    } -  +  return;    } -  else -  if (old_la_count < mc_ref_from->la_count - 1) { -  ref_to->la_count = mc_ref_from->la_count - 1; -  MC_DEBUG_MSG (ref_to, "lookahead raised to source count - 1"); -  if (ref_to->la_count > 0) enqueue = 1; +     }    -  /* Update internal/candidate/complete/incomplete status. */ +  if ((ref_from_flags & (MC_FLAG_INTERNAL | MC_FLAG_CANDIDATE)) && +  !(ref_to_flags & MC_FLAG_CANDIDATE_REF)) { +  ref_to_flags |= MC_FLAG_CANDIDATE_REF; +  MC_DEBUG_MSG (ref_to, "got candidate ref");    -  if (!(ref_from_flags & MC_FLAG_REFCOUNTED)) { -  /* Only do this when traversing the link for the first time. */ +  SET_LA_COUNT_FOR_INT_OR_CF();    -  if (ref_to->int_refs == *(INT32 *) thing) { -  ref_to->flags |= MC_FLAG_INTERNAL; -  if (old_la_count >= 0) DL_REMOVE (ref_to); -  if (ref_to->la_count < mc_lookahead) { -  ref_to->la_count = mc_lookahead; -  MC_DEBUG_MSG (ref_to, "made internal and raised count"); -  enqueue = 1; +  check_new_candidate = la_count_handled = 1;    } -  else -  MC_DEBUG_MSG (ref_to, "made internal"); +  +  if (!(ref_from_flags & (MC_FLAG_INTERNAL | MC_FLAG_LA_VISITED))) { +  assert (ref_to->int_refs + ref_to->la_refs < *(INT32 *) thing); +  ref_to->la_refs++; +  MC_DEBUG_MSG (ref_to, "added lookahead ref"); +  ref_added = 1;    }    -  else if (ref_to->int_refs + ref_to->la_refs == *(INT32 *) thing) { -  if (old_la_count >= 0) DL_REMOVE (ref_to); +  if (ref_added && (ref_to->int_refs + ref_to->la_refs == *(INT32 *) thing)) { +  MC_DEBUG_MSG (ref_to, "refs got complete"); +  check_new_candidate = 1; +  +  if (ref_to_flags & MC_FLAG_LA_VISITED) { +  /* Move to mc_complete if it has been lookahead visited. In other +  * cases this is handled after the lookahead visit is done. */ +  DL_REMOVE (ref_to);    DL_ADD_LAST (mc_complete, ref_to); -  if (ref_to->flags & MC_FLAG_CANDIDATE_REF) { -  ref_to->flags |= MC_FLAG_CANDIDATE; -  if (ref_to->la_count < mc_lookahead) { -  ref_to->la_count = mc_lookahead; -  MC_DEBUG_MSG (ref_to, "refs got complete for candidate ref'd, " -  "raised count"); +  MC_DEBUG_MSG (ref_to, "moved to complete list");    } -  else -  MC_DEBUG_MSG (ref_to, "refs got complete for candidate ref'd"); -  enqueue = 1; +     } -  else -  MC_DEBUG_MSG (ref_to, "refs got complete"); +  +  /* Handle the target becoming a candidate. */ +  +  if (check_new_candidate && +  (ref_to_flags & MC_FLAG_CANDIDATE_REF) && +  ref_to->int_refs + ref_to->la_refs == *(INT32 *) thing) { +  assert (!(ref_to_flags & MC_FLAG_CANDIDATE)); +  assert (ref_to->la_refs > 0); +  ref_to_flags |= MC_FLAG_CANDIDATE; +  MC_DEBUG_MSG (ref_to, "made candidate"); +  +  ref_to->flags = ref_to_flags; +  ref_to->la_count = ref_to_la_count; +  +  if (mc_block_lookahead & (1 << type_from_visit_fn (visit_fn))) { +  MC_DEBUG_MSG (ref_to, "type is blocked - not enqueued"); +  return;    }    -  else -  if (old_la_count < 0) { -  DL_ADD_LAST (mc_incomplete, ref_to); -  MC_DEBUG_MSG (ref_to, "added to mc_incomplete"); +  if (ref_to_la_count > 0) { +  /* Always enqueue if the count allows it, even if it hasn't +  * increased. That since MC_FLAG_CANDIDATE_REF must be propagated. */ +  if (ref_to->queuepos != MAX_UINT32 && old_la_count == ref_to_la_count) +  MC_DEBUG_MSG (ref_to, "already in queue"); +  else { +  assert (ref_to->la_count >= old_la_count); +  mc_wq_enqueue (ref_to); +  MC_DEBUG_MSG (ref_to, "enqueued candidate"); +  mc_enqueued_noninternal = 1;    }    } -  +     else -  /* Check ref_from_flags instead of (ref_to->flags & MC_FLAG_CANDIDATE_REF) -  * since we're only interested if this link is the candidate ref. */ -  if (ref_from_flags & (MC_FLAG_INTERNAL | MC_FLAG_CANDIDATE) && -  !(ref_to->flags & MC_FLAG_CANDIDATE)) { -  ref_to->flags |= MC_FLAG_CANDIDATE; -  if (ref_to->la_count < mc_lookahead) { -  ref_to->la_count = mc_lookahead; -  MC_DEBUG_MSG (ref_to, "complete thing got candidate ref, " -  "raised count"); +  MC_DEBUG_MSG (ref_to, "candidate not enqueued due to zero count"); +  return;    } -  else -  MC_DEBUG_MSG (ref_to, "complete thing got candidate ref"); -  enqueue = 1; -  } +     -  /* Check mc_block_lookahead. */ -  /* Note that we still need markers on these since they might -  * eventually become internal in which case the type block should -  * be bypassed. */ +  /* Normal handling. */    -  type = type_from_visit_fn (visit_fn); -  -  if (mc_block_lookahead & (1 << type) && !(ref_to->flags & MC_FLAG_INTERNAL)) +  if (mc_block_lookahead & (1 << type_from_visit_fn (visit_fn))) { +  ref_to->flags = ref_to_flags; +  ref_to->la_count = ref_to_la_count;    MC_DEBUG_MSG (ref_to, "type is blocked - not enqueued"); -  +  return; +  }    -  else { -  /* Check pike_cycle_depth. */ +  if (!la_count_handled && !(ref_to_flags & MC_FLAG_LA_COUNT_FIXED)) { +  int cycle_depth; +  int count_from_source = ref_type & REF_TYPE_INTERNAL ? +  mc_ref_from->la_count : mc_ref_from->la_count - 1;    -  if (new_cand_ref) { -  /* Got a new candidate reference. Check if we should set the -  * count based on pike_cycle_depth. */ -  if (!mc_block_pike_cycle_depth && type == T_OBJECT) { -  int cycle_depth = mc_cycle_depth_from_obj ((struct object *) thing); -  if (cycle_depth >= 0) { -  if (cycle_depth > ref_to->la_count) enqueue = 1; -  ref_to->la_count = cycle_depth; -  MC_DEBUG_MSG (ref_to, "set count to pike_cycle_depth"); +  if (ref_to_la_count < count_from_source) { +  ref_to_la_count = count_from_source; +  MC_DEBUG_MSG (ref_to, count_from_source == mc_ref_from->la_count ? +  "la_count raised to source count" : +  "la_count raised to source count - 1");    } -  +  +  if (visit_fn == (visit_thing_fn *) &visit_object && +  !mc_block_pike_cycle_depth && +  (cycle_depth = +  mc_cycle_depth_from_obj ((struct object *) thing)) >= 0 && +  cycle_depth < ref_to_la_count) { +  /* pike_cycle_depth is only allowed to lower the lookahead count +  * for things that aren't internal, candidates, or candidate ref'd. */ +  ref_to_la_count = cycle_depth; +  ref_to_flags |= MC_FLAG_LA_COUNT_FIXED; +  MC_DEBUG_MSG (ref_to, "la_count lowered to pike_cycle_depth");    }    }    -  /* Enqueue if the lookahead count is raised. */ -  -  if (enqueue) { +  ref_to->flags = ref_to_flags; +  ref_to->la_count = ref_to_la_count; +  assert (ref_to->la_count >= old_la_count); +  if (ref_to->la_count > old_la_count) {    mc_wq_enqueue (ref_to); -  if (old_la_count >= 0) mc_count_revisits++; +     MC_DEBUG_MSG (ref_to, "enqueued"); -  +  mc_enqueued_noninternal = 1;    }    else    MC_DEBUG_MSG (ref_to, "not enqueued");   } - } +       static void pass_mark_external_visit_ref (void *thing, int ref_type,    visit_thing_fn *visit_fn, void *extra)   {    struct mc_marker *ref_to = find_mc_marker (thing);       assert (mc_pass == MC_PASS_MARK_EXTERNAL);       if (ref_to) { -  if (!(ref_to->flags & MC_FLAG_INTERNAL)) { -  /* Only interested in existing lookahead things. */ +  if ((ref_to->flags & (MC_FLAG_INT_VISITED | MC_FLAG_LA_VISITED)) == +  MC_FLAG_LA_VISITED) { +  /* Only interested in existing lookahead things, except those on +  * the "fringe" that haven't been visited. */    -  if (!IS_EXTERNAL (ref_to)) { -  DL_REMOVE (ref_to); +  if (IS_EXTERNAL (ref_to)) +  MC_DEBUG_MSG (ref_to, "already external"); +  else {    FLAG_EXTERNAL (ref_to); -  +  DL_REMOVE (ref_to);    DL_ADD_LAST (mc_indirect, ref_to); -  MC_DEBUG_MSG (ref_to, "marked external"); +  MC_DEBUG_MSG (ref_to, "marked external - moved to indirect list");    assert (ref_to->int_refs + ref_to->la_refs == *(INT32 *) thing);    } -  +  }    else -  MC_DEBUG_MSG (ref_to, "already external"); +  MC_DEBUG_MSG (ref_to, ref_to->flags & MC_FLAG_INTERNAL ? +  "ignored internal" : "ignored fringe thing");    } -  + } +  + static void current_only_visit_ref (void *thing, int ref_type, +  visit_thing_fn *visit_fn, void *extra) + /* This is used when count_memory has a negative lookahead. It only +  * recurses through REF_TYPE_INTERNAL references. Note that most +  * fields in mc_marker aren't used. */ + { +  struct mc_marker *ref_to = find_mc_marker (thing); +  int ref_from_flags; +  +  assert (mc_lookahead < 0); + #ifdef PIKE_DEBUG +  assert (mc_ref_from != (void *) (ptrdiff_t) -1); + #endif +  +  ref_from_flags = mc_ref_from->flags; +  assert (ref_from_flags & MC_FLAG_INTERNAL); +  assert (!(ref_from_flags & MC_FLAG_INT_VISITED)); +  +  if (!ref_to) { +  ref_to = my_make_mc_marker (thing, visit_fn, extra); +  MC_DEBUG_MSG (ref_to, "got new thing"); +  } +  else if (ref_to->flags & MC_FLAG_INTERNAL) { +  /* Ignore refs to the starting points. Can't treat them like other +  * things anyway since the int_refs aren't valid. */ +  MC_DEBUG_MSG (ref_to, "ignored starting point"); +  return; +  }    else -  MC_DEBUG_MSG (ref_to, "ignored internal"); +  MC_DEBUG_MSG (ref_to, "got old thing"); +  +  if (!(ref_type & REF_TYPE_INTERNAL)) { +  MC_DEBUG_MSG (ref_to, "ignored non-internal ref"); +  return;    } -  +  +  ref_to->int_refs++; +  MC_DEBUG_MSG (ref_to, "added really internal ref"); +  assert (ref_to->int_refs <= *(INT32 *) thing); +  +  if (ref_to->int_refs == *(INT32 *) thing) { +  ref_to->flags |= MC_FLAG_INTERNAL; +  mc_wq_enqueue (ref_to); +  MC_DEBUG_MSG (ref_to, "enqueued internal");    } -  + }      PMOD_EXPORT int mc_count_bytes (void *thing)   {    if (mc_pass == MC_PASS_LOOKAHEAD) {    struct mc_marker *m = find_mc_marker (thing);   #ifdef PIKE_DEBUG    if (!m) Pike_fatal ("mc_marker not found for %p.\n", thing);   #endif -  if ((m->flags & (MC_FLAG_INTERNAL|MC_FLAG_MEMCOUNTED)) == MC_FLAG_INTERNAL) +  if ((m->flags & (MC_FLAG_INTERNAL|MC_FLAG_INT_VISITED)) == MC_FLAG_INTERNAL)    return 1;    }    return 0;   }    - /*! @decl int count_memory (int(0..)|mapping(string:int) options, @ + /*! @decl int count_memory (int|mapping(string:int) options, @    *! array|multiset|mapping|object|program|string|type|int... things)    *! @appears Pike.count_memory    *!    *! This function calculates the number of bytes that all @[things]    *! occupy. Or put another way, it calculates the number of bytes that    *! would be freed if all those things would lose their references at    *! the same time, i.e. not only the memory in the things themselves,    *! but also in all the things that are directly and indirectly    *! referenced from those things and not from anywhere else.    *!
pike.git/src/gc.c:4881:    *! anymore. If the lookahead is too long then unnecessary time might    *! be spent searching through things that really have external    *! references.    *!    *! Objects that are known to be part of cyclic structures are    *! encouraged to have an integer constant or variable    *! @expr{pike_cycle_depth@} that specifies the lookahead needed to    *! discover those cycles. When @[Pike.count_memory] visits such    *! objects, it uses that as the lookahead when going through the    *! references emanating from them. Thus, assuming objects adhere to -  *! this convention, you should rarely have to specify a higher -  *! lookahead than 1 to this function. +  *! this convention, you should rarely have to specify a lookahead +  *! higher than zero to this function.    *!    *! Note that @expr{pike_cycle_depth@} can also be set to zero to    *! effectively stop the lookahead from continuing through the object.    *! That can be useful to put in objects you know have global    *! references, to speed up the traversal.    *!    *! @param options    *! If this is an integer, it specifies the maximum lookahead -  *! distance. 0 counts only the memory of the given @[things], -  *! without following any references, 1 extends the count to all +  *! distance. -1 counts only the memory of the given @[things], +  *! without following any references, 0 extends the count to all    *! their referenced things as long as there are no cycles (except -  *! if @expr{pike_cycle_depth@} is found in objects - see above), 2 -  *! makes it cover cycles of length 2 (i.e. where two things point -  *! at each other), and so on. +  *! if @expr{pike_cycle_depth@} is found in objects - see above), 1 +  *! makes it cover cycles of length 1 (e.g. a thing points to +  *! itself), 2 handles cycles of length 2 (e.g. where two things +  *! point at each other), and so on.    *!    *! However, the lookahead is by default blocked by programs, i.e.    *! it never follows references emanating from programs. That since    *! programs seldom are part of dynamic data structures, and they    *! also typically contain numerous references to global data which    *! would add a lot of work to the lookahead search.    *!    *! To control the search in more detail, @[options] can be a    *! mapping instead:    *!    *! @mapping -  *! @member int(0..) lookahead +  *! @member int lookahead    *! The maximum lookahead distance, as described above. Defaults -  *! to 1 if missing. +  *! to 0 if missing.    *! @member int block_arrays    *! @member int block_mappings    *! @member int block_multisets    *! @member int block_objects    *! @member int block_programs    *! When any of these are given with a nonzero value, the    *! corresponding type is blocked when lookahead references are    *! followed. They are unblocked if the flag is given with a    *! zero value. Only programs are blocked by default.    *! @member int block_pike_cycle_depth    *! Do not heed @expr{pike_cycle_depth@} values found in -  *! objects. This happens by default if the lookahead is 0. +  *! objects. This is implicit if the lookahead is negative.    *! @member int return_count    *! Return the number of things that memory was counted for,    *! instead of the byte count. (This is the same number    *! @expr{internal@} contains if @expr{collect_stats@} is set.)    *! @member int collect_internals    *! If this is nonzero then its value is replaced with an array    *! that contains the things that memory was counted for.    *! @member int collect_externals    *! If set then the value is replaced with an array containing    *! the things that were visited but turned out to have external    *! references (within the limited lookahead). -  +  *! @member int collect_direct_externals +  *! If set then the value is replaced with an array containing +  *! the things found during the lookahead that (appears to) have +  *! direct external references. This list is a subset of the +  *! @expr{collect_externals@} list. It is useful if you get +  *! unexpected global references to your data structure which +  *! you want to track down.    *! @member int collect_stats    *! If this is nonzero then the mapping is extended with more    *! elements containing statistics from the search; see below.    *! @endmapping    *!    *! When the @expr{collect_stats@} flag is set, the mapping is    *! extended with these elements:    *!    *! @mapping    *! @member int internal    *! Number of things that were marked internal and hence memory    *! counted. It includes the things given as arguments.    *! @member int cyclic    *! Number of things that were marked internal only after    *! resolving cycles.    *! @member int external    *! Number of things that were visited through the lookahead but    *! were found to be external.    *! @member int visits -  *! Number of times things were visited in total. +  *! Number of times things were visited in total. This figure +  *! includes visits to various internal things that aren't +  *! visible from the pike level, so it might be larger than what +  *! is apparently motivated by the numbers above.    *! @member int revisits    *! Number of times the same things were revisited. This can    *! occur in the lookahead when a thing is encountered through a -  *! shorter path than the one it first got visited through. +  *! shorter path than the one it first got visited through. It +  *! also occurs in resolved cycles. Like @expr{visits@}, this +  *! count can include things that aren't visible from pike.    *! @member int rounds -  *! Number of search rounds. Whenever the lookahead discovers a -  *! cycle, another round has to be made to search for more -  *! internal things referenced from the cycle. +  *! Number of search rounds. This is usually 1 or 2. More rounds +  *! are necessary only when blocked types turn out to be +  *! (acyclic) internal, so that they need to be counted and +  *! recursed anyway.    *! @member int work_queue_alloc    *! The number of elements that was allocated to store the work    *! queue which is used to keep track of the things to visit    *! during the lookahead. This is usually bigger than the    *! maximum number of things the queue actually held.    *! @member int size    *! The memory occupied by the internal things. This is the same -  *! as the return value, but it's put here too for convenience. +  *! as the normal return value, but it's put here too for +  *! convenience.    *! @endmapping    *!    *! @param things    *! One or more things to count memory size for. Only things passed    *! by reference are allowed, except for functions which are    *! forbidden because a meaningful size calculation can't be done    *! for them.    *!    *! Integers are allowed because they are bignum objects when they    *! become sufficiently large. However, passing an integer that is
pike.git/src/gc.c:5009:    *!    *! @note    *! Things (normally programs) that are blocked in the lookahead    *! search are still recursed and memory counted properly if they are    *! given as arguments or only got internal references.    */   void f_count_memory (INT32 args)   {    struct svalue *collect_internal = NULL;    unsigned count_internal, count_cyclic, count_visited; -  unsigned count_visits, count_rounds; +  unsigned count_visits, count_revisits, count_rounds;    int collect_stats = 0, return_count = 0;       if (args < 1)    SIMPLE_TOO_FEW_ARGS_ERROR ("count_memory", 1);       mc_block_lookahead = mc_block_lookahead_default;    mc_block_pike_cycle_depth = 0;       if (Pike_sp[-args].type == T_MAPPING) {    struct mapping *opts = Pike_sp[-args].u.mapping;    struct pike_string *ind;    struct svalue *val;       MAKE_CONST_STRING (ind, "lookahead");    if ((val = low_mapping_string_lookup (opts, ind))) { -  if (val->type != T_INT || val->u.integer < 0) +  if (val->type != T_INT)    SIMPLE_ARG_ERROR ("count_memory", 1, -  "\"lookahead\" must be a non-negative integer."); -  mc_lookahead = val->u.integer > (unsigned INT16) -1 ? -  (unsigned INT16) -1 : val->u.integer; +  "\"lookahead\" must be an integer."); +  mc_lookahead = +  val->u.integer > (unsigned INT16) -1 ? (unsigned INT16) -1 : +  val->u.integer < 0 ? -1 : +  val->u.integer;    }    else -  mc_lookahead = 1; +  mc_lookahead = 0;      #define CHECK_BLOCK_FLAG(NAME, TYPE_BIT) do { \    MAKE_CONST_STRING (ind, NAME); \    if ((val = low_mapping_string_lookup (opts, ind))) { \    if (UNSAFE_IS_ZERO (val)) \    mc_block_lookahead &= ~TYPE_BIT; \    else \    mc_block_lookahead |= TYPE_BIT; \    } \    } while (0)
pike.git/src/gc.c:5067:    MAKE_CONST_STRING (ind, "collect_internals");    if ((val = low_mapping_string_lookup (opts, ind)) && !UNSAFE_IS_ZERO (val))    collect_internal = Pike_sp; /* Value doesn't matter. */       MAKE_CONST_STRING (ind, "collect_stats");    if ((val = low_mapping_string_lookup (opts, ind)) && !UNSAFE_IS_ZERO (val))    collect_stats = 1;    }       else { -  if (Pike_sp[-args].type != T_INT || Pike_sp[-args].u.integer < 0) -  SIMPLE_ARG_TYPE_ERROR ("count_memory", 1, "int(0..)|mapping(string:int)"); -  mc_lookahead = Pike_sp[-args].u.integer > (unsigned INT16) -1 ? -  (unsigned INT16) -1 : Pike_sp[-args].u.integer; +  if (Pike_sp[-args].type != T_INT) +  SIMPLE_ARG_TYPE_ERROR ("count_memory", 1, "int|mapping(string:int)"); +  mc_lookahead = +  Pike_sp[-args].u.integer > (unsigned INT16) -1 ? (unsigned INT16) -1 : +  Pike_sp[-args].u.integer < 0 ? -1 : +  Pike_sp[-args].u.integer;    }       init_mc_marker_hash();    -  if (!mc_lookahead) mc_block_pike_cycle_depth = 1; +     if (pike_cycle_depth_str.type == PIKE_T_FREE) {    pike_cycle_depth_str.type = T_STRING;    MAKE_CONST_STRING (pike_cycle_depth_str.u.string, "pike_cycle_depth");    }       assert (mc_work_queue == NULL);    mc_work_queue = malloc (MC_WQ_START_SIZE * sizeof (mc_work_queue[0]));    if (!mc_work_queue) {    exit_mc_marker_hash();    SIMPLE_OUT_OF_MEMORY_ERROR ("Pike.count_memory",
pike.git/src/gc.c:5139:    move_svalue (s, &s2);    }       if (find_mc_marker (s->u.ptr)) {    /* The user passed the same thing several times. Ignore it. */    }       else {    struct mc_marker *m =    my_make_mc_marker (s->u.ptr, visit_fn_from_type[s->type], NULL); -  m->flags = MC_FLAG_INTERNAL; +  m->flags |= MC_FLAG_INTERNAL;    if (!mc_block_pike_cycle_depth && s->type == T_OBJECT) {    int cycle_depth = mc_cycle_depth_from_obj (s->u.object);    if (throw_value.type != PIKE_T_FREE) {    exit_mc_marker_hash();    free (mc_work_queue + 1);    mc_work_queue = NULL;    throw_severity = THROW_ERROR;    pike_throw();    } -  m->la_count = cycle_depth == -1 ? mc_lookahead : cycle_depth; +  m->la_count = cycle_depth >= 0 ? cycle_depth : mc_lookahead;    }    else    m->la_count = mc_lookahead;    mc_wq_enqueue (m);    MC_DEBUG_MSG (m, "enqueued starting point");    }    }    }    }   
pike.git/src/gc.c:5176:    assert (mc_incomplete.dl_prev == &mc_incomplete);    assert (mc_incomplete.dl_next == &mc_incomplete);    assert (mc_complete.dl_prev == &mc_complete);    assert (mc_complete.dl_next == &mc_complete);   #ifdef PIKE_DEBUG    assert (mc_ref_from == (void *) (ptrdiff_t) -1);   #endif       mc_counted_bytes = 0;    count_internal = count_cyclic = count_visited = 0; -  count_visits = mc_count_revisits = count_rounds = 0; +  count_visits = count_revisits = count_rounds = 0;    -  +  visit_ref = mc_lookahead < 0 ? +  current_only_visit_ref : pass_lookahead_visit_ref; +     do {    count_rounds++; -  +  mc_enqueued_noninternal = 0;      #ifdef MEMORY_COUNT_DEBUG -  fputs ("MC_PASS_LOOKAHEAD\n", stderr); +  fprintf (stderr, "[%d] MC_PASS_LOOKAHEAD\n", count_rounds);   #endif    mc_pass = MC_PASS_LOOKAHEAD; -  visit_ref = pass_lookahead_visit_ref; +        while ((mc_ref_from = mc_wq_dequeue())) {    int action;    -  if ((mc_ref_from->flags & (MC_FLAG_INTERNAL|MC_FLAG_MEMCOUNTED)) == -  MC_FLAG_INTERNAL) { +  assert (!(mc_ref_from->flags & MC_FLAG_INT_VISITED)); +  +  if (mc_ref_from->flags & MC_FLAG_INTERNAL) {    action = VISIT_COUNT_BYTES; /* Memory count this. */    MC_DEBUG_MSG (NULL, "enter with byte counting");    -  +  mc_ref_from->visit_fn (mc_ref_from->thing, action, mc_ref_from->extra); +  count_visits++; +  +  if (mc_ref_from->flags & MC_FLAG_LA_VISITED) { +  count_revisits++; +  DL_REMOVE (mc_ref_from); +  MC_DEBUG_MSG (NULL, "leave - removed from list"); +  } +  else { +  if (collect_stats && +  type_from_visit_fn (mc_ref_from->visit_fn) <= MAX_TYPE) +  count_visited++; +  MC_DEBUG_MSG (NULL, "leave"); +  } +  +  mc_ref_from->flags |= MC_FLAG_INT_VISITED; +     if (return_count || collect_stats || collect_internal) {    TYPE_T type = type_from_visit_fn (mc_ref_from->visit_fn);    if (type <= MAX_TYPE) {    count_internal++;    if (collect_internal) {    Pike_sp->type = type;    Pike_sp->subtype = 0;    Pike_sp->u.ptr = mc_ref_from->thing;    add_ref ((struct ref_dummy *) mc_ref_from->thing);    dmalloc_touch_svalue (Pike_sp);    Pike_sp++;    AGGR_ARR_CHECK (collect_internal, 120);    }    }    }    }       else { -  +  assert (mc_lookahead >= 0);    action = VISIT_NORMAL;    MC_DEBUG_MSG (NULL, "enter"); -  } +     -  count_visits++; +     mc_ref_from->visit_fn (mc_ref_from->thing, action, mc_ref_from->extra); -  +  count_visits++;    -  if (throw_value.type != PIKE_T_FREE) { -  exit_mc_marker_hash(); -  free (mc_work_queue + 1); -  mc_work_queue = NULL; -  throw_severity = THROW_ERROR; -  pike_throw(); +  if (mc_ref_from->flags & MC_FLAG_LA_VISITED) { +  count_revisits++; +  MC_DEBUG_MSG (NULL, "leave (revisit)");    }    -  if (!(mc_ref_from->flags & MC_FLAG_REFCOUNTED)) { -  mc_ref_from->flags |= MC_FLAG_REFCOUNTED; +  else {    if (collect_stats &&    type_from_visit_fn (mc_ref_from->visit_fn) <= MAX_TYPE)    count_visited++; -  +  +  mc_ref_from->flags |= MC_FLAG_LA_VISITED; +  +  /* The reason for fixing the lists here is to avoid putting +  * the "fringe" things that we never visit onto them. */ +  if (mc_ref_from->int_refs + mc_ref_from->la_refs < +  *(INT32 *) mc_ref_from->thing) { +  DL_ADD_LAST (mc_incomplete, mc_ref_from); +  MC_DEBUG_MSG (NULL, "leave - added to incomplete list");    } -  +  else { +  DL_ADD_LAST (mc_complete, mc_ref_from); +  MC_DEBUG_MSG (NULL, "leave - added to complete list"); +  } +  } +  }    -  if (mc_ref_from->flags & MC_FLAG_INTERNAL) -  mc_ref_from->flags |= MC_FLAG_MEMCOUNTED; -  MC_DEBUG_MSG (NULL, "leave"); +  if (throw_value.type != PIKE_T_FREE) { +  exit_mc_marker_hash(); +  free (mc_work_queue + 1); +  mc_work_queue = NULL; +  throw_severity = THROW_ERROR; +  pike_throw();    } -  +  }   #if defined (PIKE_DEBUG) || defined (MEMORY_COUNT_DEBUG)    mc_ref_from = (void *) (ptrdiff_t) -1;   #endif    -  +  /* If no things that might be indirectly incomplete have been +  * enqueued then there's no need to do another mark external pass. */ +  if (!mc_enqueued_noninternal) { +  DL_MAKE_EMPTY (mc_complete); +  break; +  } +  +  if (mc_lookahead < 0) { +  assert (mc_incomplete.dl_prev == &mc_incomplete); +  assert (mc_incomplete.dl_next == &mc_incomplete); +  assert (mc_complete.dl_prev == &mc_complete); +  assert (mc_complete.dl_next == &mc_complete); +  break; +  } +    #ifdef MEMORY_COUNT_DEBUG -  fputs ("MC_PASS_MARK_EXTERNAL\n", stderr); +  fprintf (stderr, "[%d] MC_PASS_MARK_EXTERNAL, " +  "traversing the incomplete list\n", count_rounds);   #endif    mc_pass = MC_PASS_MARK_EXTERNAL;    visit_ref = pass_mark_external_visit_ref;       assert (mc_indirect.dl_next == &mc_indirect);    assert (mc_indirect.dl_prev == &mc_indirect);       {    struct mc_marker *m, *list;       for (m = mc_incomplete.dl_next; m != &mc_incomplete; m = m->dl_next)    FLAG_EXTERNAL (m);       list = &mc_incomplete;    while (1) {    /* First go through the incomplete list to visit externals,    * then the indirectly incomplete list where all the new    * indirect externals appear. */    for (m = list->dl_next; m != list; m = m->dl_next) {    TYPE_T type = type_from_visit_fn (m->visit_fn); -  +  assert (!(m->flags & MC_FLAG_INTERNAL)); +  assert (m->flags & MC_FLAG_LA_VISITED); +  assert (list != &mc_incomplete || !(m->flags & MC_FLAG_CANDIDATE));    if (mc_block_lookahead & (1 << type)) -  MC_DEBUG_MSG (m, "type blocked - not visiting"); +  MC_DEBUG_MSG (m, "type is blocked - not visiting");    else { -  MC_DEBUG_MSG (m, "visiting external"); + #ifdef MEMORY_COUNT_DEBUG +  mc_ref_from = m; +  MC_DEBUG_MSG (NULL, "visiting external"); + #endif    count_visits++; -  +  count_revisits++;    m->visit_fn (m->thing, VISIT_NORMAL, m->extra);    }    } -  if (list == &mc_incomplete) list = &mc_indirect; +  if (list == &mc_incomplete) { +  list = &mc_indirect; + #ifdef MEMORY_COUNT_DEBUG +  fprintf (stderr, "[%d] MC_PASS_MARK_EXTERNAL, " +  "traversing the indirect list\n", count_rounds); + #endif +  }    else break;    } -  +  + #if defined (PIKE_DEBUG) || defined (MEMORY_COUNT_DEBUG) +  mc_ref_from = (void *) (ptrdiff_t) -1; + #endif    }       if (DL_IS_EMPTY (mc_complete)) break;    -  + #ifdef MEMORY_COUNT_DEBUG +  fprintf (stderr, "[%d] MC_PASS_MARK_EXTERNAL, " +  "enqueuing cyclic internals\n", count_rounds); + #endif +     {    /* We've found some internal cyclic stuff. Put it in the work    * list for the next round. */    struct mc_marker *m = mc_complete.dl_next;    assert (m != &mc_complete);    do { -  DL_REMOVE (m); +  assert (!(m->flags & (MC_FLAG_INTERNAL | MC_FLAG_INT_VISITED)));    m->flags |= MC_FLAG_INTERNAL; -  +  assert (m->flags & (MC_FLAG_CANDIDATE | MC_FLAG_LA_VISITED)); +  assert (!(mc_block_lookahead & +  (1 << type_from_visit_fn (m->visit_fn))));    /* The following assertion implies that the lookahead count    * already has been raised as it should. */    assert (m->flags & MC_FLAG_CANDIDATE_REF);    mc_wq_enqueue (m);    if (collect_stats && type_from_visit_fn (m->visit_fn) <= MAX_TYPE)    count_cyclic++;    MC_DEBUG_MSG (m, "enqueued cyclic internal"); -  m = mc_complete.dl_next; +  m = m->dl_next;    } while (m != &mc_complete);    }       DL_MOVE (mc_indirect, mc_complete);       TOGGLE_EXT_FLAGS();      #ifdef PIKE_DEBUG    if (d_flag) {    struct mc_marker *m;    for (m = mc_incomplete.dl_next; m != &mc_incomplete; m = m->dl_next) {    assert (!(m->flags & MC_FLAG_INTERNAL));    assert (!IS_EXTERNAL (m));    }    for (m = mc_complete.dl_next; m != &mc_complete; m = m->dl_next) {    assert (!(m->flags & MC_FLAG_INTERNAL));    assert (!IS_EXTERNAL (m));    }    }   #endif -  +  +  /* Prepare for next MC_PASS_LOOKAHEAD round. */ +  visit_ref = pass_lookahead_visit_ref;    } while (1);      #ifdef MEMORY_COUNT_DEBUG    fputs ("memory counting done\n", stderr);   #endif      #if 0    fprintf (stderr, "count_memory stats: %u internal, %u cyclic, %u external\n"    "count_memory stats: %u visits, %u revisits, %u rounds\n",    count_internal, count_cyclic,    count_visited - count_internal, -  count_visits, mc_count_revisits, count_rounds); +  count_visits, count_revisits, count_rounds);   #ifdef PIKE_DEBUG    {    size_t num, size;    count_memory_in_mc_markers (&num, &size);    fprintf (stderr, "count_memory used %"PRINTSIZET"u bytes "    "for %"PRINTSIZET"u markers.\n", size, num);    }   #endif   #endif   
pike.git/src/gc.c:5364:    struct pike_string *ind; \    push_ulongest (VALUE); \    MAKE_CONST_STRING (ind, NAME); \    mapping_string_insert (opts, ind, Pike_sp - 1); \    pop_stack(); \    } while (0)    INSERT_STAT ("internal", count_internal);    INSERT_STAT ("cyclic", count_cyclic);    INSERT_STAT ("external", count_visited - count_internal);    INSERT_STAT ("visits", count_visits); -  INSERT_STAT ("revisits", mc_count_revisits); +  INSERT_STAT ("revisits", count_revisits);    INSERT_STAT ("rounds", count_rounds);    INSERT_STAT ("work_queue_alloc", mc_wq_size);    INSERT_STAT ("size", mc_counted_bytes);    }       MAKE_CONST_STRING (ind, "collect_externals");    if ((val = low_mapping_string_lookup (opts, ind)) &&    !UNSAFE_IS_ZERO (val)) {    BEGIN_AGGREGATE_ARRAY (count_visited - count_internal) {    struct mc_marker *m, *list = &mc_incomplete;    while (1) {    /* Collect things from the mc_incomplete and mc_indirect lists. */ -  for (m = list->dl_next; m != list; m = m->dl_next) -  if (m->flags & MC_FLAG_REFCOUNTED) { +  for (m = list->dl_next; m != list; m = m->dl_next) {    TYPE_T type = type_from_visit_fn (m->visit_fn); -  +  assert (!(m->flags & MC_FLAG_INTERNAL)); +  assert (m->flags & MC_FLAG_LA_VISITED);    if (type <= MAX_TYPE) {    Pike_sp->type = type;    Pike_sp->subtype = 0;    Pike_sp->u.ptr = m->thing;    add_ref ((struct ref_dummy *) m->thing);    dmalloc_touch_svalue (Pike_sp);    Pike_sp++;    DO_AGGREGATE_ARRAY (120);    }    }    if (list == &mc_incomplete) list = &mc_indirect;    else break;    }    } END_AGGREGATE_ARRAY;    args++;    mapping_string_insert (opts, ind, Pike_sp - 1);    } -  +  +  MAKE_CONST_STRING (ind, "collect_direct_externals"); +  if ((val = low_mapping_string_lookup (opts, ind)) && +  !UNSAFE_IS_ZERO (val)) { +  BEGIN_AGGREGATE_ARRAY (count_visited - count_internal) { +  /* Collect things from the mc_incomplete list. */ +  struct mc_marker *m; +  for (m = mc_incomplete.dl_next; m != &mc_incomplete; m = m->dl_next) { +  TYPE_T type = type_from_visit_fn (m->visit_fn); +  assert (!(m->flags & MC_FLAG_INTERNAL)); +  assert (m->flags & MC_FLAG_LA_VISITED); +  if (type <= MAX_TYPE) { +  Pike_sp->type = type; +  Pike_sp->subtype = 0; +  Pike_sp->u.ptr = m->thing; +  add_ref ((struct ref_dummy *) m->thing); +  dmalloc_touch_svalue (Pike_sp); +  Pike_sp++; +  DO_AGGREGATE_ARRAY (120);    } -  +  } +  } END_AGGREGATE_ARRAY; +  args++; +  mapping_string_insert (opts, ind, Pike_sp - 1); +  } +  }       mc_pass = 0;    visit_ref = NULL;       DL_MAKE_EMPTY (mc_incomplete);    DL_MAKE_EMPTY (mc_indirect);   #ifdef DO_PIKE_CLEANUP    {    size_t e;    for (e = 0; e < mc_marker_hash_table_size; e++)