Branch: Tag:

2000-09-08

2000-09-08 04:40:31 by Fredrik Hübinette (Hubbe) <hubbe@hubbe.net>

mapping addition fixed and optimized

Rev: src/mapping.c:1.101

5:   \*/   /**/   #include "global.h" - RCSID("$Id: mapping.c,v 1.100 2000/09/07 11:31:05 grubba Exp $"); + RCSID("$Id: mapping.c,v 1.101 2000/09/08 04:40:31 hubbe Exp $");   #include "main.h"   #include "object.h"   #include "mapping.h"
312:    init_mapping(m, new_size);    debug_malloc_touch(m);    new_md=m->data; +  new_md->flags = md->flags;       /* This operation is now 100% atomic - no locking required */    if(md->refs>1)
1444:    e+=argp[d].u.mapping->data->size;    }    - #if 1 -  /* Major optimization */ -  for(d=0;d<args;d++) -  if(e==argp[d].u.mapping->data->size) -  return copy_mapping(argp[d].u.mapping); +  if(!e) return allocate_mapping(0); +  +  d=0; +  +  for(;d<args;d++) +  { +  struct mapping *m=argp[d].u.mapping; +  struct mapping_data *md=m->data; +  +  if(md->size == 0) continue; +  + #if 1 /* major optimization */ +  if(e==md->size && !(md->flags && MAPPING_FLAG_WEAK)) +  return copy_mapping(m);   #endif    -  /* FIXME: need locking! */ -  if(argp[0].u.mapping->refs == 1 && -  !argp[0].u.mapping->data->hardlinks) +  if(m->refs == 1 && !md->hardlinks)    { -  add_ref( ret=argp[0].u.mapping ); -  d=1; +  add_ref( ret=m ); +  d++;    }else{    ret=allocate_mapping(MAP_SLOTS(e)); -  d=0; +     } -  +     for(;d<args;d++) -  MAPPING_LOOP(argp[d].u.mapping) +  { +  m=argp[d].u.mapping; +  md=m->data; +  +  add_ref(md); +  NEW_MAPPING_LOOP(md)    mapping_insert(ret, &k->ind, &k->val); -  +  free_mapping_data(md); +  }    return ret;    } -  +  fatal("add_mappings is confused!\n"); + }      PMOD_EXPORT int mapping_equal_p(struct mapping *a, struct mapping *b, struct processing *p)   {
1691:    md=m->data;    md->valrefs++;    add_ref(md); -  MAPPING_LOOP(m) /* FIXME: Shouldn't NEW_MAPPING_LOOP() be used? */ +  NEW_MAPPING_LOOP(md)    {    copy_svalues_recursively_no_free(sp,&k->ind, 1, p);    sp++;