pike.git / src / mapping.c

version» Context lines:

pike.git/src/mapping.c:1:   /*\   ||| This file a part of Pike, and is copyright by Fredrik Hubinette   ||| Pike is distributed as GPL (General Public License)   ||| See the files COPYING and DISCLAIMER for more information.   \*/   /**/   #include "global.h" - RCSID("$Id: mapping.c,v 1.56 2000/02/01 06:25:06 hubbe Exp $"); + RCSID("$Id: mapping.c,v 1.57 2000/02/01 23:51:47 hubbe Exp $");   #include "main.h"   #include "object.h"   #include "mapping.h"   #include "svalue.h"   #include "array.h"   #include "pike_macros.h"   #include "language.h"   #include "error.h"   #include "pike_memory.h"   #include "dynamic_buffer.h"
pike.git/src/mapping.c:195:    free_svalue(& k->val);    free_svalue(& k->ind);    }       free((char *) md);   }      /* This function is used to rehash a mapping without loosing the internal    * order in each hash chain. This is to prevent mappings from becoming    * inefficient just after being rehashed. -  * (wonder if this can't be done faster somehow?) +     */ - static void mapping_rehash_backwards(struct mapping *m, -  struct keypair *p) + static void mapping_rehash_backwards_evil(struct mapping_data *md, +  struct keypair *from)   {    unsigned INT32 h;    struct keypair *tmp; -  +  struct keypair *k;    -  if(!p) return; -  mapping_rehash_backwards(m,p->next); +  if(!from) return; +  mapping_rehash_backwards_evil(md,from->next);    -  /* We insert without overwriting -  * however, this could still cause -  * problems. (for instance if someone does an -  * m_delete on an element that is not there yet.. -  */ -  low_mapping_insert(m, &p->ind, &p->val,0); +  /* unlink */ +  k=md->free_list; + #ifdef PIKE_DEBUG +  if(!k) fatal("Error in rehash: not enough kehypairs.\n"); + #endif +  md->free_list=k->next; +  +  /* initialize */ +  *k=*from; +  +  /* link */ +  h=k->hval; +  h%=md->hashsize; +  k->next=md->hash[h]; +  md->hash[h]=k; +  +  /* update */ +  md->ind_types |= 1<< (k->ind.type); +  md->val_types |= 1<< (k->val.type); +  md->size++;   }    -  + static void mapping_rehash_backwards_good(struct mapping_data *md, +  struct keypair *from) + { +  unsigned INT32 h; +  struct keypair *tmp; +  struct keypair *k; +  +  if(!from) return; +  mapping_rehash_backwards_good(md,from->next); +  +  /* unlink */ +  k=md->free_list; + #ifdef PIKE_DEBUG +  if(!k) fatal("Error in rehash: not enough kehypairs.\n"); + #endif +  md->free_list=k->next; +  +  /* initialize */ +  k->hval=from->hval; +  assign_svalue_no_free(&k->ind, &from->ind); +  assign_svalue_no_free(&k->val, &from->val); +  +  /* link */ +  h=k->hval; +  h%=md->hashsize; +  k->next=md->hash[h]; +  md->hash[h]=k; +  +  /* update */ +  md->ind_types |= 1<< (k->ind.type); +  md->val_types |= 1<< (k->val.type); +  md->size++; + } +    /* This function re-allocates a mapping. It adjusts the max no. of    * values can be fitted into the mapping. It takes a bit of time to    * run, but is used seldom enough not to degrade preformance significantly.    */   static struct mapping *rehash(struct mapping *m, int new_size)   { -  struct mapping_data *md; +  struct mapping_data *md, *new_md;   #ifdef PIKE_DEBUG    INT32 tmp=m->data->size;   #endif    INT32 e;       md=m->data;   #ifdef PIKE_DEBUG    if(md->refs <=0)    fatal("Zero refs in mapping->data\n");       if(d_flag>1) check_mapping(m);   #endif       init_mapping(m, new_size);    debug_malloc_touch(m); -  +  new_md=m->data;    -  /* No need to do add_ref(md) because the ref -  * from 'm' is not freed yet -  */ -  md->valrefs++; +  /* This operation is now 100% atomic - no locking required */ +  if(md->refs>1) +  { +  /* good */ +  for(e=0;e<md->hashsize;e++) +  mapping_rehash_backwards_good(new_md, md->hash[e]);    -  +  if(md->hardlinks) md->hardlinks--; +  free_mapping_data(md); +  }else{ +  /* evil */    for(e=0;e<md->hashsize;e++) -  mapping_rehash_backwards(m, md->hash[e]); +  mapping_rehash_backwards_evil(new_md, md->hash[e]);    -  md->valrefs--; +  free((char *)md); +  }      #ifdef PIKE_DEBUG    if(m->data->size != tmp)    fatal("Rehash failed, size not same any more.\n");   #endif    -  free_mapping_data(md); -  +    #ifdef PIKE_DEBUG    if(d_flag>1) check_mapping(m);   #endif       return m;   }      /*    * No need for super efficiency, should not be called very often    * -Hubbe
pike.git/src/mapping.c:321:   #define LOW_FIND(FUN, KEY, FOUND, NOT_FOUND) do { \    md=m->data; \    add_ref(md); \    if(md->hashsize) \    { \    h=h2 % md->hashsize; \    if(md->ind_types & (1 << key->type)) \    { \    for(prev= md->hash + h;(k=*prev);prev=&k->next) \    { \ -  if(FUN(& k->ind, KEY)) \ +  if(h2 == k->hval && FUN(& k->ind, KEY)) \    { \    FOUND; \    } \    } \    } \    } \    NOT_FOUND; \   }while(0)      
pike.git/src/mapping.c:344:    md=m->data; \    add_ref(md); \    if(md->hashsize) \    { \    h=h2 % md->hashsize; \    if(md->ind_types & (1 << key->type)) \    { \    k2=omd->hash[h2 % md->hashsize]; \    prev= md->hash + h; \    for(;(k=*prev) && k2;(prev=&k->next),(k2=k2->next)) \ -  if(!is_identical(&k2->ind, &k->ind)) \ +  if(!(h2 == k->hval && is_identical(&k2->ind, &k->ind))) \    break; \    for(;(k=*prev);prev=&k->next) \    { \    if(FUN(& k->ind, KEY)) \    { \    FOUND; \    } \    } \    } \    } \
pike.git/src/mapping.c:546:    /* no need to lock here since we are not calling is_eq - Hubbe */       k=md->free_list;    md->free_list=k->next;    k->next=md->hash[h];    md->hash[h]=k;    md->ind_types |= 1 << key->type;    md->val_types |= 1 << val->type;    assign_svalue_no_free(& k->ind, key);    assign_svalue_no_free(& k->val, val); +  k->hval = h2;    md->size++;      #ifdef PIKE_DEBUG    if(d_flag>1) check_mapping(m);   #endif   }      void mapping_insert(struct mapping *m,    struct svalue *key,    struct svalue *val)
pike.git/src/mapping.c:650:    }    h=h2 % md->hashsize;       md->free_list=k->next;    k->next=md->hash[h];    md->hash[h]=k;    assign_svalue_no_free(& k->ind, key);    k->val.type=T_INT;    k->val.subtype=NUMBER_NUMBER;    k->val.u.integer=0; +  k->hval = h2;    md->ind_types |= 1 << key->type;    md->val_types |= BIT_INT;    md->size++;      #ifdef PIKE_DEBUG    if(d_flag > 1) check_mapping_type_fields(m);   #endif       return & ( k->val.u );   }
pike.git/src/mapping.c:1600: Inside #if defined(PIKE_DEBUG)
  void check_mapping(struct mapping *m)   {    int e,num;    struct keypair *k;    struct mapping_data *md;    md=m->data;       if(m->refs <=0)    fatal("Mapping has zero refs.\n");    -  +     if(!m->data)    fatal("Mapping has no data block.\n");       if (!m->data->refs)    fatal("Mapping data block has zero refs.\n");       if(m->next && m->next->prev != m)    fatal("Mapping ->next->prev != mapping.\n");       if(m->prev)
pike.git/src/mapping.c:1665: Inside #if defined(PIKE_DEBUG)
   num++;       if(! ( (1 << k->ind.type) & (md->ind_types) ))    fatal("Mapping indices type field lies.\n");       if(! ( (1 << k->val.type) & (md->val_types) ))    fatal("Mapping values type field lies.\n");       check_svalue(& k->ind);    check_svalue(& k->val); +  +  /* FIXME add check for k->hval +  * beware that hash_svalue may be threaded and locking +  * is required!! +  */    }       if(md->size != num)    fatal("Shields are failing, hull integrity down to 20%%\n");   }      void check_all_mappings(void)   {    struct mapping *m;    for(m=first_mapping;m;m=m->next)