Branch: Tag:

2019-02-02

2019-02-02 14:14:27 by Tobias S. Josefowitz <tobij@tobij.de>

Mapping: m1 & m2 is now much faster in cases m2 >> m1

Previously, when doing m1 & m2, we would always copy m2 and delete all
entries from it not present in m1. Now, if m1 has less than half the
amount of entries m2 has, we copy m1, delete entries not present in m2,
and copy the values from m2 for all entries in m1.

1775:    if (!a_md->size || !b_md->size) return allocate_mapping(0);    if (a_md == b_md) return destructive_copy_mapping(a);    +  if (a_md->size >= b_md->size / 2) {    /* Copy the second mapping. */    res = copy_mapping(b);    SET_ONERROR(err, do_free_mapping, res);
1793:    }    }    UNSET_ONERROR(err); +  } else { +  /* Copy the first mapping */ +  res = copy_mapping(a); +  SET_ONERROR(err, do_free_mapping, res); +  +  /* Remove elements in res that aren't in b, copy values for those that +  * are. */ +  NEW_MAPPING_LOOP(a_md) { +  size_t h = k->hval & ( b_md->hashsize - 1); +  struct keypair *k2; +  for (k2 = b_md->hash[h]; k2; k2 = k2->next) { +  if ((k2->hval == k->hval) && is_eq(&k2->ind, &k->ind)) { +  mapping_insert(res, &k2->ind, &k2->val); +  break; +  } +  } +  if (!k2) { +  map_delete(res, &k->ind); +  } +  } +  +  UNSET_ONERROR(err); +  }    return res;   }