pike.git / src / mapping.c

version» Context lines:

pike.git/src/mapping.c:1:   /*   || This file is part of Pike. For copyright information see COPYRIGHT.   || Pike is distributed under GPL, LGPL and MPL. See the file COPYING   || for more information. - || $Id$ +    */      #include "global.h"   #include "main.h"   #include "object.h"   #include "mapping.h"   #include "svalue.h"   #include "array.h"   #include "pike_macros.h"   #include "pike_error.h"   #include "pike_memory.h"   #include "pike_types.h"   #include "dynamic_buffer.h"   #include "interpret.h"   #include "las.h"   #include "gc.h"   #include "stralloc.h" - #include "pike_security.h" - #include "block_alloc.h" + #include "block_allocator.h"   #include "opcodes.h"   #include "stuff.h"      #define AVG_LINK_LENGTH 4   #define MIN_LINK_LENGTH 1   #define MAP_SLOTS(X) ((X)?((X)+((X)>>4)+8):0)      struct mapping *first_mapping;      struct mapping *gc_internal_mapping = 0;
pike.git/src/mapping.c:36:      #define unlink_mapping_data(M) do{ \    struct mapping_data *md_=(M); \    if(md_->hardlinks) { md_->hardlinks--; md_->valrefs--; } \    free_mapping_data(M); \   }while(0)      #define MAPPING_DATA_SIZE(HSIZE, KEYPAIRS) \    PTR_TO_INT(MD_KEYPAIRS(0, HSIZE) + KEYPAIRS)    - #undef EXIT_BLOCK - #define EXIT_BLOCK(m) do{ \ - DO_IF_DEBUG( \ -  if(m->refs) { \ -  DO_IF_DMALLOC(describe_something(m, T_MAPPING, 0,2,0, NULL)); \ -  Pike_fatal("really free mapping on mapping with %d refs.\n", m->refs); \ -  } \ - ) \ -  \ -  FREE_PROT(m); \ -  \ -  unlink_mapping_data(m->data); \ -  \ -  DOUBLEUNLINK(first_mapping, m); \ -  \ -  GC_FREE(m); \ - }while(0) + static struct block_allocator mapping_allocator = BA_INIT_PAGES(sizeof(struct mapping), 2); + void count_memory_in_mappings(size_t * num, size_t * size) { +  struct mapping *m; +  double datasize = 0.0; +  ba_count_all(&mapping_allocator, num, size); +  for(m=first_mapping;m;m=m->next) { +  datasize+=MAPPING_DATA_SIZE(m->data->hashsize, m->data->num_keypairs) / (double) m->data->refs; +  } +  *size += (size_t) datasize; + }    -  + void really_free_mapping(struct mapping * m) { + #ifdef PIKE_DEBUG +  if (m->refs) { + # ifdef DEBUG_MALLOC +  describe_something(m, T_MAPPING, 0,2,0, NULL); + # endif +  Pike_fatal("really free mapping on mapping with %d refs.\n", m->refs); +  } + #endif +  unlink_mapping_data(m->data); +  DOUBLEUNLINK(first_mapping, m); +  GC_FREE(m); +  ba_free(&mapping_allocator, m); + }    - #undef COUNT_OTHER + ATTRIBUTE((malloc)) + static struct mapping * alloc_mapping(void) { +  return ba_alloc(&mapping_allocator); + }    - #define COUNT_OTHER() do{ \ -  struct mapping *m; \ -  double datasize = 0.0; \ -  for(m=first_mapping;m;m=m->next) \ -  { \ -  datasize+=MAPPING_DATA_SIZE(m->data->hashsize, m->data->num_keypairs) / \ -  (double) m->data->refs; \ -  } \ -  size += (size_t) datasize; \ - }while(0) + void free_all_mapping_blocks(void) { +  ba_destroy(&mapping_allocator); + }    - BLOCK_ALLOC_FILL_PAGES(mapping, 2) -  +    #ifndef PIKE_MAPPING_KEYPAIR_LOOP   #define IF_ELSE_KEYPAIR_LOOP(X, Y) Y   #define FREE_KEYPAIR(md, k) do { \    k->next = md->free_list; \    md->free_list = k; \    } while(0)   #else /* PIKE_MAPPING_KEYPAIR_LOOP */   #define IF_ELSE_KEYPAIR_LOOP(X, Y) X   #define FREE_KEYPAIR(md, k) do { \    md->free_list--; \
pike.git/src/mapping.c:106:    if (!*prev_) { \    Pike_fatal("Node to move not found!\n"); \    } \    ); \    } \    *prev_ = k; \    } \    } while(0)   #endif /* !PIKE_MAPPING_KEYPAIR_LOOP */    + void mapping_free_keypair(struct mapping_data *md, struct keypair *k) + { +  FREE_KEYPAIR(md, k); + } +  + static INLINE int check_type_contains(TYPE_FIELD types, const struct svalue * s) { +  return (TYPEOF(*s) == PIKE_T_OBJECT || types & (BIT_OBJECT|(1 << TYPEOF(*s)))); + } +  + static INLINE int check_type_overlaps(TYPE_FIELD t1, TYPE_FIELD t2) { +  return (!t1 && !t2) || t1 & t2 || (t1|t2) & BIT_OBJECT; + } +    #ifdef PIKE_DEBUG      /** This function checks that the type field isn't lacking any bits.    * It is used for debugging purposes only.    */   static void check_mapping_type_fields(const struct mapping *m)   {    INT32 e;    const struct keypair *k=0;    const struct mapping_data *md;
pike.git/src/mapping.c:176: Inside #if defined(PIKE_DEBUG)
   debug_malloc_touch(m);   #ifdef PIKE_DEBUG    if (Pike_in_gc > GC_PASS_PREPARE && Pike_in_gc < GC_PASS_ZAP_WEAK)    Pike_fatal("Can't allocate a new mapping_data inside gc.\n");    if(size < 0) Pike_fatal("init_mapping with negative value.\n");   #endif    if(size)    {    hashsize=find_next_power(size / AVG_LINK_LENGTH + 1);    +  if (size < hashsize) size = hashsize; +     e=MAPPING_DATA_SIZE(hashsize, size);    -  md=(struct mapping_data *)xalloc(e); +  md=xcalloc(1,e);       m->data=md;    md->hashsize=hashsize;    -  MEMSET((char *)md->hash, 0, sizeof(struct keypair *) * md->hashsize); -  +     md->free_list=MD_KEYPAIRS(md, hashsize);   #ifndef PIKE_MAPPING_KEYPAIR_LOOP    for(e=1;e<size;e++)    {    md->free_list[e-1].next = md->free_list + e;    mark_free_svalue (&md->free_list[e-1].ind);    mark_free_svalue (&md->free_list[e-1].val);    } -  md->free_list[e-1].next=0; +  /* md->free_list[e-1].next=0; */    mark_free_svalue (&md->free_list[e-1].ind);    mark_free_svalue (&md->free_list[e-1].val);   #endif /* !PIKE_MAPPING_KEYPAIR_LOOP */ -  md->ind_types = 0; -  md->val_types = 0; +  /* md->ind_types = 0; */ +  /* md->val_types = 0; */    md->flags = flags; -  md->size = 0; -  md->refs=0; - #ifdef ATOMIC_SVALUE -  md->ref_type = T_MAPPING_DATA; - #endif -  md->valrefs=0; -  md->hardlinks=0; +  /* md->size = 0; */ +  /* md->refs=0; */ +  /* md->valrefs=0; */ +  /* md->hardlinks=0; */    md->num_keypairs=size;    }else{    switch (flags & MAPPING_WEAK) {    case 0: md = &empty_data; break;    case MAPPING_WEAK_INDICES: md = &weak_ind_empty_data; break;    case MAPPING_WEAK_VALUES: md = &weak_val_empty_data; break;    default: md = &weak_both_empty_data; break;    }    }    add_ref(md);
pike.git/src/mapping.c:239:    * @see free_mapping    */   PMOD_EXPORT struct mapping *debug_allocate_mapping(int size)   {    struct mapping *m;       m=alloc_mapping();       GC_ALLOC(m);    -  INITIALIZE_PROT(m); +     init_mapping(m,size,0);    - #ifdef ATOMIC_SVALUE -  m->ref_type = T_MAPPING; - #endif +     m->refs = 0;    add_ref(m); /* For DMALLOC... */       DOUBLELINK(first_mapping, m);       return m;   }         PMOD_EXPORT void really_free_mapping_data(struct mapping_data *md)   {    INT32 e;    struct keypair *k;    debug_malloc_touch(md);      #ifdef PIKE_DEBUG    if (md->refs) {    Pike_fatal("really_free_mapping_data(): md has non-zero refs: %d\n",    md->refs);    } -  +  +  if (!md->size) { +  /* Paranoia and keep gcc happy. */ +  if (md == &empty_data) { +  Pike_fatal("really_free_mapping_data(): md is empty_data!\n"); +  } +  if (md == &weak_ind_empty_data) { +  Pike_fatal("really_free_mapping_data(): md is weak_ind_empty_data!\n"); +  } +  if (md == &weak_val_empty_data) { +  Pike_fatal("really_free_mapping_data(): md is weak_val_empty_data!\n"); +  } +  if (md == &weak_both_empty_data) { +  Pike_fatal("really_free_mapping_data(): md is weak_both_empty_data!\n"); +  } +  }   #endif /* PIKE_DEBUG */       NEW_MAPPING_LOOP(md)    {    free_svalue(& k->val);    free_svalue(& k->ind);    }    -  free((char *) md); +  free(md);    GC_FREE_BLOCK(md);   }      PMOD_EXPORT void do_free_mapping(struct mapping *m)   {    if (m)    inl_free_mapping(m);   }      /* This function is used to rehash a mapping without losing the internal    * order in each hash chain. This is to prevent mappings from becoming    * inefficient just after being rehashed.    */ -  + /* Evil: Steal the svalues from the old md. */   static void mapping_rehash_backwards_evil(struct mapping_data *md,    struct keypair *from)   {    unsigned INT32 h;    struct keypair *k, *prev = NULL, *next;       if(!(k = from)) return;       /* Reverse the hash chain. */    while ((next = k->next)) {    k->next = prev;    prev = k;    k = next;    }    k->next = prev;    -  from = k; +  prev = k;       /* Rehash and reverse the hash chain. */ -  while (from) { +  while ((from = prev)) { +  /* Reverse */ +  prev = from->next; +  from->next = next; +  next = from; +  +  if (md->flags & MAPPING_WEAK) { +  +  switch(md->flags & MAPPING_WEAK) { +  default: +  Pike_fatal("Instable mapping data flags.\n"); +  break; +  case MAPPING_WEAK_INDICES: +  if (!REFCOUNTED_TYPE(TYPEOF(from->ind)) || +  (*from->ind.u.refs > 1)) { +  goto keep_keypair; +  } +  break; +  case MAPPING_WEAK_VALUES: +  if (!REFCOUNTED_TYPE(TYPEOF(from->val)) || +  (*from->val.u.refs > 1)) { +  goto keep_keypair; +  } +  break; +  case MAPPING_WEAK: +  /* NB: Compat: Unreference counted values are counted +  * as multi-referenced here. +  */ +  if ((!REFCOUNTED_TYPE(TYPEOF(from->ind)) || +  (*from->ind.u.refs > 1)) && +  (!REFCOUNTED_TYPE(TYPEOF(from->val)) || +  (*from->val.u.refs > 1))) { +  goto keep_keypair; +  } +  break; +  } +  +  /* Free. +  * Note that we don't need to free or unlink the keypair, +  * since that will be done by the caller anyway. */ +  free_svalue(&from->ind); +  free_svalue(&from->val); +  +  continue; +  } +  keep_keypair: +     /* unlink */    k=md->free_list;   #ifndef PIKE_MAPPING_KEYPAIR_LOOP   #ifdef PIKE_DEBUG    if(!k) Pike_fatal("Error in rehash: not enough keypairs.\n");   #endif    md->free_list=k->next;   #else /* PIKE_MAPPING_KEYPAIR_LOOP */    md->free_list++;   #endif /* !PIKE_MAPPING_KEYPAIR_LOOP */
pike.git/src/mapping.c:331:    /* link */    h=k->hval;    h&=md->hashsize - 1;    k->next=md->hash[h];    md->hash[h]=k;       /* update */    md->ind_types |= 1<< (TYPEOF(k->ind));    md->val_types |= 1<< (TYPEOF(k->val));    md->size++; -  -  /* Reverse */ -  prev = from->next; -  from->next = next; -  next = from; -  from = prev; +     }   }    -  + /* Good: Copy the svalues from the old md. */   static void mapping_rehash_backwards_good(struct mapping_data *md,    struct keypair *from)   {    unsigned INT32 h;    struct keypair *k, *prev = NULL, *next;       if(!(k = from)) return;       /* Reverse the hash chain. */    while ((next = k->next)) {    k->next = prev;    prev = k;    k = next;    }    k->next = prev;    -  from = k; +  prev = k;       /* Rehash and reverse the hash chain. */ -  while (from) { +  while ((from = prev)) { +  /* Reverse */ +  prev = from->next; +  from->next = next; +  next = from; +  +  if (md->flags & MAPPING_WEAK) { +  +  switch(md->flags & MAPPING_WEAK) { +  default: +  Pike_fatal("Instable mapping data flags.\n"); +  break; +  case MAPPING_WEAK_INDICES: +  if (REFCOUNTED_TYPE(TYPEOF(from->ind)) && +  (*from->ind.u.refs > 1)) { +  goto keep_keypair; +  } +  break; +  case MAPPING_WEAK_VALUES: +  if (REFCOUNTED_TYPE(TYPEOF(from->val)) && +  (*from->val.u.refs > 1)) { +  goto keep_keypair; +  } +  break; +  case MAPPING_WEAK: +  /* NB: Compat: Unreference counted values are counted +  * as multi-referenced here. +  */ +  if ((!REFCOUNTED_TYPE(TYPEOF(from->ind)) || +  (*from->ind.u.refs > 1)) && +  (!REFCOUNTED_TYPE(TYPEOF(from->val)) || +  (*from->val.u.refs > 1))) { +  goto keep_keypair; +  } +  break; +  } +  +  /* Skip copying of this keypair. +  * +  * NB: We can't mess with the original md here, +  * since it might be in use by an iterator +  * or similar. +  */ +  +  continue; +  } +  keep_keypair: +     /* unlink */    k=md->free_list;   #ifndef PIKE_MAPPING_KEYPAIR_LOOP   #ifdef PIKE_DEBUG    if(!k) Pike_fatal("Error in rehash: not enough keypairs.\n");   #endif    md->free_list=k->next;   #else /* PIKE_MAPPING_KEYPAIR_LOOP */    md->free_list++;   #endif /* !PIKE_MAPPING_KEYPAIR_LOOP */
pike.git/src/mapping.c:386:    /* link */    h=k->hval;    h&=md->hashsize - 1;    k->next=md->hash[h];    md->hash[h]=k;       /* update */    md->ind_types |= 1<< (TYPEOF(k->ind));    md->val_types |= 1<< (TYPEOF(k->val));    md->size++; -  -  /* Reverse */ -  prev = from->next; -  from->next = next; -  next = from; -  from = prev; +     }   }      /** This function re-allocates a mapping. It adjusts the max no. of    * values can be fitted into the mapping. It takes a bit of time to    * run, but is used seldom enough not to degrade preformance significantly.    *    * @param m the mapping to be rehashed    * @param new_size new mappingsize    * @return the rehashed mapping
pike.git/src/mapping.c:433:    if ((md->hashsize == new_size) && (md->refs == 1)) return m;       init_mapping(m, new_size, md->flags);    debug_malloc_touch(m);    new_md=m->data;       /* This operation is now 100% atomic - no locking required */    if(md->refs>1)    {    /* good */ +  /* More than one reference to the md ==> We need to +  * keep it afterwards. +  */    for(e=0;e<md->hashsize;e++)    mapping_rehash_backwards_good(new_md, md->hash[e]);       unlink_mapping_data(md);    }else{    /* evil */ -  +  /* We have the only reference to the md, +  * so we can just copy the svalues without +  * bothering about type checking. +  */    for(e=0;e<md->hashsize;e++)    mapping_rehash_backwards_evil(new_md, md->hash[e]);    -  free((char *)md); +  free(md);    GC_FREE_BLOCK(md);    }      #ifdef PIKE_DEBUG -  if(m->data->size != tmp) -  Pike_fatal("Rehash failed, size not same any more.\n"); +  if((m->data->size != tmp) && +  ((m->data->size > tmp) || !(m->data->flags & MAPPING_WEAK))) +  Pike_fatal("Rehash failed, size not same any more (%ld != %ld).\n", +  (long)m->data->size, (long)tmp);   #endif   #ifdef MAPPING_SIZE_DEBUG    m->debug_size = m->data->size;   #endif      #ifdef PIKE_DEBUG    if(d_flag>1) check_mapping(m);   #endif       return m;
pike.git/src/mapping.c:485: Inside #if defined(PIKE_DEBUG)
  #ifdef PIKE_DEBUG    if (Pike_in_gc > GC_PASS_PREPARE && Pike_in_gc < GC_PASS_ZAP_WEAK)    Pike_fatal("Can't allocate a new mapping_data inside gc.\n");   #endif       debug_malloc_touch(md);       size=MAPPING_DATA_SIZE(md->hashsize, md->num_keypairs);       nmd=(struct mapping_data *)xalloc(size); -  MEMCPY(nmd, md, size); +  memcpy(nmd, md, size);    off=((char *)nmd) - ((char *)md);       RELOC(nmd->free_list);    for(e=0;e<nmd->hashsize;e++) RELOC(nmd->hash[e]);       keypairs=MD_KEYPAIRS(nmd, nmd->hashsize);   #ifndef PIKE_MAPPING_KEYPAIR_LOOP    for(e=0;e<nmd->num_keypairs;e++)    {    RELOC(keypairs[e].next);
pike.git/src/mapping.c:538:      #define MAPPING_DATA_IN_USE(MD) ((MD)->refs != (MD)->hardlinks + 1)      #define LOW_FIND(FUN, KEY, FOUND, NOT_FOUND) do { \    md=m->data; \    add_ref(md); \    if(md->hashsize) \    { \    h=h2 & (md->hashsize - 1); \    DO_IF_DEBUG( if(d_flag > 1) check_mapping_type_fields(m); ) \ -  if(md->ind_types & ((1 << TYPEOF(*key)) | BIT_OBJECT)) \ +  if(check_type_contains(md->ind_types, key)) \    { \    for(prev= md->hash + h;(k=*prev);prev=&k->next) \    { \    if(h2 == k->hval && FUN(& k->ind, KEY)) \    { \    FOUND; \    } \    } \    } \    } \
pike.git/src/mapping.c:561:         #define LOW_FIND2(FUN, KEY, FOUND, NOT_FOUND) do { \    struct keypair *k2; \    md=m->data; \    add_ref(md); \    if(md->hashsize) \    { \    h=h2 & (md->hashsize-1); \    DO_IF_DEBUG( if(d_flag > 1) check_mapping_type_fields(m); ) \ -  if(md->ind_types & ((1 << TYPEOF(*key)) | BIT_OBJECT)) \ +  if(check_type_contains(md->ind_types, key)) \    { \    k2=omd->hash[h2 & (omd->hashsize - 1)]; \    prev= md->hash + h; \    for(;(k=*prev) && k2;(prev=&k->next),(k2=k2->next)) \    if(!(h2 == k->hval && is_identical(&k2->ind, &k->ind))) \    break; \    for(;(k=*prev);prev=&k->next) \    { \    if(FUN(& k->ind, KEY)) \    { \
pike.git/src/mapping.c:1368:   #endif       m=allocate_mapping(MAP_SLOTS(ind->size));    i=ITEM(ind);    v=ITEM(val);    for(e=0;e<ind->size;e++) low_mapping_insert(m, i++, v++, 2);       return m;   }    - #if 0 - PMOD_EXPORT struct mapping *copy_mapping(struct mapping *m) - { -  INT32 e; -  struct mapping *n; -  struct keypair *k; -  struct mapping_data *md; -  -  md=m->data; -  n=allocate_mapping(MAP_SLOTS(md->size)); -  -  md->valrefs++; -  add_ref(md); -  NEW_MAPPING_LOOP(md) mapping_insert(n, &k->ind, &k->val); -  md->valrefs--; -  free_mapping_data(md); -  -  return n; - } - #else -  +    /* deferred mapping copy! */   PMOD_EXPORT struct mapping *copy_mapping(struct mapping *m)   {    struct mapping *n;      #ifdef PIKE_DEBUG    if(m->data->refs <=0)    Pike_fatal("Zero refs in mapping->data\n");   #endif   
pike.git/src/mapping.c:1413: Inside #if defined(MAPPING_SIZE_DEBUG)
  #ifdef MAPPING_SIZE_DEBUG    n->debug_size=n->data->size;   #endif    add_ref(n->data);    n->data->valrefs++;    n->data->hardlinks++;    debug_malloc_touch(n->data);    return n;   }    - #endif -  +    /* copy_mapping() for when destructive operations are ok.    *    * Note: It destructive operations on the resulting mapping *will*    * affect eg NEW_MAPPING_LOOP() on the original mapping.    */   static struct mapping *destructive_copy_mapping(struct mapping *m)   {    if ((m->refs == 1) && !m->data->hardlinks &&    !(m->data->flags & MAPPING_WEAK)) {    /* We may perform destructive operations on the mapping. */
pike.git/src/mapping.c:1660:    SET_ONERROR(r1,do_free_array,ai);       av=mapping_values(a);    SET_ONERROR(r2,do_free_array,av);       if(ai->size > 1)    {    zipper=get_set_order(ai);    order_array(ai, zipper);    order_array(av, zipper); -  free((char *)zipper); +  free(zipper);    }       bi=mapping_indices(b);    SET_ONERROR(r3,do_free_array,bi);       bv=mapping_values(b);    SET_ONERROR(r4,do_free_array,bv);       if(bi->size > 1)    {    zipper=get_set_order(bi);    order_array(bi, zipper);    order_array(bv, zipper); -  free((char *)zipper); +  free(zipper);    }       zipper=merge(ai,bi,op);       ci=array_zip(ai,bi,zipper);       UNSET_ONERROR(r4); free_array(bi);    UNSET_ONERROR(r3); free_array(ai);       cv=array_zip(av,bv,zipper);       UNSET_ONERROR(r2); free_array(bv);    UNSET_ONERROR(r1); free_array(av);    -  free((char *)zipper); +  free(zipper);       m=mkmapping(ci, cv);    free_array(ci);    free_array(cv);       return m;   }      /* FIXME: What are the semantics for this function?    * FIXME: It ought to be optimized just like the unordered variant.
pike.git/src/mapping.c:1720:       ai=mapping_indices(a);    SET_ONERROR(r1,do_free_array,ai);    av=mapping_values(a);    SET_ONERROR(r2,do_free_array,av);    if(ai->size > 1)    {    zipper=get_set_order(ai);    order_array(ai, zipper);    order_array(av, zipper); -  free((char *)zipper); +  free(zipper);    }       switch (op) /* no elements from »b» may be selected */    {    case PIKE_ARRAY_OP_AND:    zipper=merge(b,ai,op);    ci=array_zip(b,ai,zipper); /* b must not be used */    cv=array_zip(b,av,zipper); /* b must not be used */    break;    case PIKE_ARRAY_OP_SUB:
pike.git/src/mapping.c:1742:    ci=array_zip(ai,b,zipper); /* b must not be used */    cv=array_zip(av,b,zipper); /* b must not be used */    break;    default:    Pike_fatal("merge_mapping_array on other than AND or SUB\n");    }       UNSET_ONERROR(r2); free_array(av);    UNSET_ONERROR(r1); free_array(ai);    -  free((char *)zipper); +  free(zipper);       m=mkmapping(ci, cv);    free_array(ci);    free_array(cv);       return m;   }      PMOD_EXPORT struct mapping *merge_mapping_array_unordered(struct mapping *a,    struct array *b, INT32 op)
pike.git/src/mapping.c:1774:    SET_ONERROR(r1,do_free_array,b_temp);    m=merge_mapping_array_ordered(a,b_temp,op);    UNSET_ONERROR(r1); free_array(b_temp);    }    else    m=merge_mapping_array_ordered(a,b,op);       return m;   }    + void o_append_mapping( INT32 args ) + { +  struct svalue *lval = Pike_sp - args; +  struct svalue *val = lval + 2; +  int lvalue_type; +  + #ifdef PIKE_DEBUG +  if (args < 3) { +  Pike_fatal("Too few arguments to o_append_mapping(): %d\n", args); +  } + #endif +  args -= 3; +  /* Note: val should always be a zero here! */ +  lvalue_type = lvalue_to_svalue_no_free(val, lval); +  +  if ((TYPEOF(*val) == T_MAPPING) && (lvalue_type != PIKE_T_GET_SET)) { +  struct mapping *m = val->u.mapping; +  if( m->refs == 2 ) +  { +  if ((TYPEOF(*lval) == T_OBJECT) && +  lval->u.object->prog && +  ((FIND_LFUN(lval->u.object->prog, LFUN_ASSIGN_INDEX) >= 0) || +  (FIND_LFUN(lval->u.object->prog, LFUN_ASSIGN_ARROW) >= 0))) { +  /* There's a function controlling assignments in this object, +  * so we can't alter the mapping in place. +  */ +  } else { +  int i; +  /* fprintf( stderr, "map_refs==2\n" ); */ +  for( i=0; i<args; i+=2 ) +  low_mapping_insert( m, Pike_sp-(i+2), Pike_sp-(i+1), 2 ); +  stack_pop_n_elems_keep_top(2+args); +  return; +  } +  } +  } +  +  f_aggregate_mapping(args); +  f_add(2); +  assign_lvalue(lval, val); +  stack_pop_2_elems_keep_top(); + } +    /* NOTE: May perform destructive operations on either of the arguments    * if it has only a single reference.    */   PMOD_EXPORT struct mapping *add_mappings(struct svalue *argp, INT32 args)   {    INT32 e,d;    struct mapping *ret=0;    struct keypair *k;       for(e=d=0;d<args;d++)
pike.git/src/mapping.c:1865:    if (a->data == b->data) return 1;       /* If either is weak, they're different. */    if ((a->data->flags | b->data->flags) & MAPPING_WEAK) return 0;       check_mapping_for_destruct(a);    check_mapping_for_destruct(b);       if(m_sizeof(a) != m_sizeof(b)) return 0;    +  if (!check_type_overlaps(a->data->ind_types, b->data->ind_types) || +  !check_type_overlaps(a->data->val_types, b->data->val_types)) return 0; +     curr.pointer_a = a;    curr.pointer_b = b;    curr.next = p;       for( ;p ;p=p->next)    if(p->pointer_a == (void *)a && p->pointer_b == (void *)b)    return 1;       md=a->data;    md->valrefs++;
pike.git/src/mapping.c:2381: Inside #if defined(PIKE_DEBUG)
     void check_all_mappings(void)   {    struct mapping *m;    for(m=first_mapping;m;m=m->next)    check_mapping(m);   }   #endif      static void visit_mapping_data (struct mapping_data *md, int action, -  struct mapping *m) +  void *extra)   { -  switch (action) { +  visit_enter(md, T_MAPPING_DATA, extra); +  switch (action & VISIT_MODE_MASK) {   #ifdef PIKE_DEBUG    default:    Pike_fatal ("Unknown visit action %d.\n", action);    case VISIT_NORMAL:    case VISIT_COMPLEX_ONLY:    break;   #endif    case VISIT_COUNT_BYTES:    mc_counted_bytes += MAPPING_DATA_SIZE (md->hashsize, md->num_keypairs);    break;    }    -  if ((md->ind_types | md->val_types) & +  if (!(action & VISIT_NO_REFS) && +  (md->ind_types | md->val_types) &    (action & VISIT_COMPLEX_ONLY ? BIT_COMPLEX : BIT_REF_TYPES)) {    int ind_ref_type =    md->flags & MAPPING_WEAK_INDICES ? REF_TYPE_WEAK : REF_TYPE_NORMAL;    int val_ref_type =    md->flags & MAPPING_WEAK_VALUES ? REF_TYPE_WEAK : REF_TYPE_NORMAL;    INT32 e;    struct keypair *k;    NEW_MAPPING_LOOP (md) { -  visit_svalue (&k->ind, ind_ref_type); -  visit_svalue (&k->val, val_ref_type); +  visit_svalue (&k->ind, ind_ref_type, extra); +  visit_svalue (&k->val, val_ref_type, extra);    }    } -  +  visit_leave(md, T_MAPPING_DATA, extra);   }    - PMOD_EXPORT void visit_mapping (struct mapping *m, int action) + PMOD_EXPORT void visit_mapping (struct mapping *m, int action, void *extra)   { -  switch (action) { +  visit_enter(m, T_MAPPING, extra); +  switch (action & VISIT_MODE_MASK) {   #ifdef PIKE_DEBUG    default:    Pike_fatal ("Unknown visit action %d.\n", action);    case VISIT_NORMAL:    case VISIT_COMPLEX_ONLY:    break;   #endif    case VISIT_COUNT_BYTES:    mc_counted_bytes += sizeof (struct mapping);    break;    }       visit_ref (m->data, REF_TYPE_INTERNAL, -  (visit_thing_fn *) &visit_mapping_data, m); +  (visit_thing_fn *) &visit_mapping_data, extra); +  visit_leave(m, T_MAPPING, extra);   }      #ifdef MAPPING_SIZE_DEBUG   #define DO_IF_MAPPING_SIZE_DEBUG(x) x   #else   #define DO_IF_MAPPING_SIZE_DEBUG(x)   #endif      #define GC_RECURSE_MD_IN_USE(MD, RECURSE_FN, IND_TYPES, VAL_TYPES) do { \    INT32 e; \
pike.git/src/mapping.c:2551:    gc_free_svalue(&k->val); \    else \    N_REC(&k->val, 1); \   } while (0)      #define GC_REC_KP_VAL(REMOVE, N_REC, W_REC, N_TST, W_TST) do { \    if ((REMOVE = N_TST(&k->ind))) /* Don't recurse now. */ \    gc_free_svalue(&k->val); \    else if ((REMOVE = W_REC(&k->val, 1))) \    gc_free_svalue(&k->ind); \ -  else \ -  N_REC(&k->ind, 1); /* Now we can recurse the index. */ \ +  else if (N_REC(&k->ind, 1)) /* Now we can recurse the index. */ \ +  Pike_fatal("Mapping: %s and %s don't agree.\n", \ +  TOSTR(N_TST), TOSTR(N_REC)); \   } while (0)      #define GC_REC_KP_BOTH(REMOVE, N_REC, W_REC, N_TST, W_TST) do { \    if ((REMOVE = W_TST(&k->ind))) /* Don't recurse now. */ \    gc_free_svalue(&k->val); \    else if ((REMOVE = W_REC(&k->val, 1))) \    gc_free_svalue(&k->ind); \ -  else \ -  W_REC(&k->ind, 1); /* Now we can recurse the index. */ \ +  else if (W_REC(&k->ind, 1)) /* Now we can recurse the index. */ \ +  Pike_fatal("Mapping: %s and %s don't agree.\n", \ +  TOSTR(W_TST), TOSTR(W_REC)); \   } while (0)      void gc_mark_mapping_as_referenced(struct mapping *m)   {   #ifdef PIKE_DEBUG    if(m->data->refs <=0)    Pike_fatal("Zero refs in mapping->data\n");   #endif    debug_malloc_touch(m);    debug_malloc_touch(m->data);