pike.git / src / mapping.c

version» Context lines:

pike.git/src/mapping.c:14:   #include "pike_error.h"   #include "pike_memory.h"   #include "pike_types.h"   #include "buffer.h"   #include "interpret.h"   #include "las.h"   #include "gc.h"   #include "stralloc.h"   #include "block_allocator.h"   #include "opcodes.h" - #include "stuff.h" + #include "pike_cpulib.h"      /* Average number of keypairs per slot when allocating. */   #define AVG_LINK_LENGTH 4      /* Minimum number of elements in a hashtable is half of the slots. */   #define MIN_LINK_LENGTH_NUMERATOR 1   #define MIN_LINK_LENGTH_DENOMINATOR 2      /* Number of keypairs to allocate for a given size. */   #define MAP_SLOTS(X) ((X)?((X)+((X)>>4)+8):0)
pike.git/src/mapping.c:172:   static struct mapping_data weak_ind_empty_data =    { PIKE_CONSTANT_MEMOBJ_INIT(1, T_MAPPING_DATA), 1, 0,0,0,0,0,0, MAPPING_WEAK_INDICES,    IF_ELSE_KEYPAIR_LOOP((struct keypair *)&weak_ind_empty_data.hash, 0), {0}};   static struct mapping_data weak_val_empty_data =    { PIKE_CONSTANT_MEMOBJ_INIT(1, T_MAPPING_DATA), 1, 0,0,0,0,0,0, MAPPING_WEAK_VALUES,    IF_ELSE_KEYPAIR_LOOP((struct keypair *)&weak_val_empty_data.hash, 0), {0}};   static struct mapping_data weak_both_empty_data =    { PIKE_CONSTANT_MEMOBJ_INIT(1, T_MAPPING_DATA), 1, 0,0,0,0,0,0, MAPPING_WEAK,    IF_ELSE_KEYPAIR_LOOP((struct keypair *)&weak_both_empty_data.hash, 0), {0}};    + /* +  * This rounds an integer up to the next power of two. For x a power +  * of two, this will just return the same again. +  */ + static unsigned INT32 find_next_power(unsigned INT32 x) + { +  if( x == 0 ) return 1; +  return 1<<(my_log2(x-1)+1); + } +    /** This function allocates the hash table and svalue space for a mapping    * struct. The size is the max number of indices that can fit in the    * allocated space.    */   static void init_mapping(struct mapping *m,    INT32 hashsize,    INT16 flags)   {    struct mapping_data *md;    ptrdiff_t e;
pike.git/src/mapping.c:234:    md->num_keypairs=size;    }else{    switch (flags & MAPPING_WEAK) {    case 0: md = &empty_data; break;    case MAPPING_WEAK_INDICES: md = &weak_ind_empty_data; break;    case MAPPING_WEAK_VALUES: md = &weak_val_empty_data; break;    default: md = &weak_both_empty_data; break;    }    }    add_ref(md); +  gc_init_marker(md);    m->data=md;   #ifdef MAPPING_SIZE_DEBUG    m->debug_size = md->size;   #endif   }      static struct mapping *allocate_mapping_no_init(void)   {    struct mapping *m=alloc_mapping();    GC_ALLOC(m);
pike.git/src/mapping.c:533: Inside #if defined(PIKE_DEBUG)
  #ifdef PIKE_DEBUG    if(md->refs <=0)    Pike_fatal("Zero refs in mapping->data\n");       if(d_flag>1) check_mapping(m);   #endif       /* NB: Code duplication from init_mapping(). */    if (hashsize & (hashsize - 1))    hashsize = find_next_power(hashsize); -  if ((md->hashsize == hashsize) && (md->refs == 1)) return m; +  if ((md->hashsize == hashsize) && (md->refs == 1) +  /* FIXME: Paranoia check below; is this needed? */ +  && !MD_FULLP(md)) +  return m;       init_mapping(m, hashsize, md->flags);    debug_malloc_touch(m);    new_md=m->data;       /* This operation is now 100% atomic - no locking required */    if(md->refs>1)    {    /* good */    /* More than one reference to the md ==> We need to
pike.git/src/mapping.c:664: Inside #if defined(PIKE_DEBUG)
   if (Pike_in_gc > GC_PASS_PREPARE && Pike_in_gc < GC_PASS_ZAP_WEAK)    Pike_fatal("Can't allocate a new mapping_data inside gc.\n");   #endif       debug_malloc_touch(md);       size=MAPPING_DATA_SIZE(md->hashsize, md->num_keypairs);       nmd=(struct mapping_data *)xalloc(size);    memcpy(nmd, md, size); +  gc_init_marker(nmd);    off=((char *)nmd) - ((char *)md);       RELOC(nmd->free_list);    for(e=0;e<nmd->hashsize;e++) RELOC(nmd->hash[e]);       keypairs=MD_KEYPAIRS(nmd, nmd->hashsize);   #ifndef PIKE_MAPPING_KEYPAIR_LOOP    for(e=0;e<nmd->num_keypairs;e++)    {    RELOC(keypairs[e].next);
pike.git/src/mapping.c:711:    md->hardlinks--;    md->valrefs--;    }    sub_ref(md);       return nmd;   }      #define MAPPING_DATA_IN_USE(MD) ((MD)->refs != (MD)->hardlinks + 1)    + /** +  * void LOW_FIND(int (*FUN)(const struct svalue *IND, const struct svalue *KEY), +  * const struct svalue *KEY, +  * CODE_IF_FOUND, +  * CODE_IF_NOT_FOUND) +  * +  * Needs local variables: +  * struct mapping *m; +  * struct mapping_data *md; +  * size_t h, h2; +  * struct svalue *key; +  * struct keypair *k, **prev; +  * +  * CODE_IF_FOUND must end with a return, goto or longjmp to +  * avoid CODE_IF_NOT_FOUND being called afterwards, or for +  * potential multiple matches. +  * +  * On entry: +  * m is the mapping to search. +  * key === KEY. +  * h2 = hash_svalue(key). +  * +  * Internals: +  * md = m->data. +  * md gets an extra ref. +  * +  * When CODE_IF_FOUND gets executed k points to the matching keypair. +  */   #define LOW_FIND(FUN, KEY, FOUND, NOT_FOUND) do { \    md=m->data; \    add_ref(md); \    if(md->hashsize) \    { \    h=h2 & (md->hashsize - 1); \    DO_IF_DEBUG( if(d_flag > 1) check_mapping_type_fields(m); ) \    if(check_type_contains(md->ind_types, key)) \    { \    for(prev= md->hash + h;(k=*prev);prev=&k->next) \
pike.git/src/mapping.c:801:    LOW_RELOC(prev); \    md=m->data; \   }while(0)      #define PREPARE_FOR_DATA_CHANGE2() \    if(md->valrefs) COPYMAP2()      #define PREPARE_FOR_INDEX_CHANGE2() \    if(md->refs>1) COPYMAP2()    + #if 0   #define PROPAGATE() do { \    if(md->refs==1) \    { \    h=h2 & (md->hashsize - 1); \    *prev=k->next; \    k->next=md->hash[h]; \    md->hash[h]=k; \    } \    }while(0) -  + #else + #define PROPAGATE() + #endif         /* Assumes md is locked */   #define COPYMAP() do { \    ptrdiff_t off; \    m->data=copy_mapping_data(m->data); \    debug_malloc_touch(m->data); \    off=((char *)m->data)-((char *)md); \    LOW_RELOC(k); \    free_mapping_data(md); \
pike.git/src/mapping.c:891:   }      PMOD_EXPORT void low_mapping_insert(struct mapping *m,    const struct svalue *key,    const struct svalue *val,    int overwrite)   {    size_t h,h2;    struct keypair *k, **prev;    struct mapping_data *md, *omd; +  int grow_md; +  int refs;      #ifdef PIKE_DEBUG    if(m->data->refs <=0)    Pike_fatal("Zero refs in mapping->data\n");   #endif       h2=hash_svalue(key);      #ifdef PIKE_DEBUG    if(d_flag>1) check_mapping(m);
pike.git/src/mapping.c:931:    mi_do_nothing:    free_mapping_data(md);    return;       mi_set_value:   #ifdef PIKE_DEBUG    if(m->data != md)    Pike_fatal("Wrong dataset in mapping_insert!\n");    if(d_flag>1) check_mapping(m);   #endif -  free_mapping_data(md); +  /* NB: We know that md has a reference from the mapping +  * in addition to our reference. +  * +  * The use of sub_ref() silences warnings from Coverity, as well as +  * on the off chance of a reference counting error avoids accessing +  * freed memory. +  */ +  refs = sub_ref(md); /* free_mapping_data(md); */ +  assert(refs); +     if(!overwrite) return;    PREPARE_FOR_DATA_CHANGE2();    PROPAGATE(); /* propagate after preparing */    md->val_types |= 1 << TYPEOF(*val);    if (overwrite == 2 && TYPEOF(*key) == T_OBJECT)    /* Should replace the index too. It's only for objects that it's    * possible to tell the difference. */    assign_svalue (&k->ind, key);    assign_svalue(& k->val, val);   #ifdef PIKE_DEBUG    if(d_flag>1) check_mapping(m);   #endif    return;       mi_insert:   #ifdef PIKE_DEBUG    if(m->data != md)    Pike_fatal("Wrong dataset in mapping_insert!\n");    if(d_flag>1) check_mapping(m);   #endif -  free_mapping_data(md); +  /* NB: We know that md has a reference from the mapping +  * in addition to our reference. +  * +  * The use of sub_ref() silences warnings from Coverity, as well as +  * on the off chance of a reference counting error avoids accessing +  * freed memory. +  */ +  refs = sub_ref(md); /* free_mapping_data(md); */ +  assert(refs); +  +  grow_md = MD_FULLP(md); +     /* We do a re-hash here instead of copying the mapping. */ -  if( - #ifndef PIKE_MAPPING_KEYPAIR_LOOP -  (!md->free_list) || - #else /* PIKE_MAPPING_KEYPAIR_LOOP */ -  (md->size >= md->num_keypairs) || - #endif /* !PIKE_MAPPING_KEYPAIR_LOOP */ -  md->refs>1) +  if(grow_md || md->refs>1)    {    debug_malloc_touch(m); -  rehash(m, md->hashsize?(md->hashsize<<1):AVG_LINK_LENGTH); +  rehash(m, md->hashsize ? md->hashsize << grow_md : AVG_LINK_LENGTH);    md=m->data;    }    h=h2 & ( md->hashsize - 1);       /* no need to lock here since we are not calling is_eq - Hubbe */       k=md->free_list;   #ifndef PIKE_MAPPING_KEYPAIR_LOOP    md->free_list=k->next;   #else /* PIKE_MAPPING_KEYPAIR_LOOP */
pike.git/src/mapping.c:1011:   /* Inline the above in this file. */   #define mapping_insert(M, KEY, VAL) low_mapping_insert ((M), (KEY), (VAL), 1)      PMOD_EXPORT union anything *mapping_get_item_ptr(struct mapping *m,    const struct svalue *key,    TYPE_T t)   {    size_t h, h2;    struct keypair *k, **prev;    struct mapping_data *md,*omd; +  int grow_md; +  int refs;      #ifdef PIKE_DEBUG    if(m->data->refs <=0)    Pike_fatal("Zero refs in mapping->data\n");       if(d_flag>1) check_mapping(m);       debug_malloc_touch(m);    debug_malloc_touch(m->data);   #endif
pike.git/src/mapping.c:1055:    free_mapping_data(md);    return 0;       mg_set_value:   #ifdef PIKE_DEBUG    if(m->data != md)    Pike_fatal("Wrong dataset in mapping_get_item_ptr!\n");    if(d_flag)    check_mapping(m);   #endif -  free_mapping_data(md); +  /* NB: We know that md has a reference from the mapping +  * in addition to our reference. +  * +  * The use of sub_ref() silences warnings from Coverity, as well as +  * on the off chance of a reference counting error avoids accessing +  * freed memory. +  */ +  refs = sub_ref(md); /* free_mapping_data(md); */ +  assert(refs); +     if(TYPEOF(k->val) == t)    {    PREPARE_FOR_DATA_CHANGE2();    PROPAGATE(); /* prepare then propagate */       return & ( k->val.u );    }       return 0;       mg_insert:   #ifdef PIKE_DEBUG    if(m->data != md)    Pike_fatal("Wrong dataset in mapping_get_item_ptr!\n");    if(d_flag)    check_mapping(m);   #endif -  free_mapping_data(md); +  /* NB: We know that md has a reference from the mapping +  * in addition to our reference. +  * +  * The use of sub_ref() silences warnings from Coverity, as well as +  * on the off chance of a reference counting error avoids accessing +  * freed memory. +  */ +  refs = sub_ref(md); /* free_mapping_data(md); */ +  assert(refs);       if(t != T_INT) return 0;    -  +  grow_md = MD_FULLP(md); +     /* no need to call PREPARE_* because we re-hash instead */ -  if( - #ifndef PIKE_MAPPING_KEYPAIR_LOOP -  !(md->free_list) || - #else /* PIKE_MAPPING_KEYPAIR_LOOP */ -  (md->size >= md->num_keypairs) || - #endif /* !PIKE_MAPPING_KEYPAIR_LOOP */ -  md->refs>1) +  if(grow_md || md->refs>1)    {    debug_malloc_touch(m); -  rehash(m, md->hashsize?(md->hashsize<<1):AVG_LINK_LENGTH); +  rehash(m, md->hashsize ? md->hashsize << grow_md : AVG_LINK_LENGTH);    md=m->data;    }    h=h2 & ( md->hashsize - 1);       k=md->free_list;   #ifndef PIKE_MAPPING_KEYPAIR_LOOP    md->free_list=k->next;   #else /* PIKE_MAPPING_KEYPAIR_LOOP */    md->free_list++;   #endif /* !PIKE_MAPPING_KEYPAIR_LOOP */
pike.git/src/mapping.c:1309:    FIND();    if(k)    {    PROPAGATE();    return &k->val;    }    return 0;   }      PMOD_EXPORT struct svalue *low_mapping_string_lookup(struct mapping *m, -  struct pike_string *p) +  const struct pike_string *p)   {    struct svalue tmp; -  SET_SVAL(tmp, T_STRING, 0, string, p); +  /* Expanded SET_SVALUE macro to silence discard const warning. */ +  struct svalue *ptr = &tmp; +  ptr->u.string = (struct pike_string *)p; +  SET_SVAL_TYPE_SUBTYPE(*ptr, T_STRING, 0);    return low_mapping_lookup(m, &tmp);   }      PMOD_EXPORT void mapping_string_insert(struct mapping *m,    struct pike_string *p,    const struct svalue *val)   {    struct svalue tmp;    SET_SVAL(tmp, T_STRING, 0, string, p);    mapping_insert(m, &tmp, val);
pike.git/src/mapping.c:1558:   #endif       m=allocate_mapping(MAP_SLOTS(ind->size));    i=ITEM(ind);    v=ITEM(val);    for(e=0;e<ind->size;e++) low_mapping_insert(m, i++, v++, 2);       return m;   }    + PMOD_EXPORT void clear_mapping(struct mapping *m) + { +  struct mapping_data *md = m->data; +  int flags = md->flags; +  +  if (!md->size) return; +  unlink_mapping_data(md); +  +  switch (flags & MAPPING_WEAK) { +  case 0: md = &empty_data; break; +  case MAPPING_WEAK_INDICES: md = &weak_ind_empty_data; break; +  case MAPPING_WEAK_VALUES: md = &weak_val_empty_data; break; +  default: md = &weak_both_empty_data; break; +  } +  +  /* FIXME: Increment hardlinks & valrefs? +  * NB: init_mapping() doesn't do this. +  */ +  add_ref(md); +  /* gc_init_marker(md); */ +  +  m->data = md; + #ifdef MAPPING_SIZE_DEBUG +  m->debug_size = md->size; + #endif + } +    /* deferred mapping copy! */   PMOD_EXPORT struct mapping *copy_mapping(struct mapping *m)   {    struct mapping *n;      #ifdef PIKE_DEBUG    if(m->data->refs <=0)    Pike_fatal("Zero refs in mapping->data\n");   #endif