Branch: Tag:

2014-08-23

2014-08-23 14:48:40 by Arne Goedeke <el@laramies.com>

block_allocator: fix ba_walk() and improve valgrind support

Blocks in the free list are now marked as free while the callback is
running. This helps detecting invalid accesses. Also fixed a bug in
the free list sort.

421:    bv_int_t V = *_v & (~BV_NIL << bit);       bit = c * BV_LENGTH; -  while (!(V)) { -  if (bit >= bv->length) { -  bit = (size_t)-1; -  goto RET; -  } +  +  while (1) { +  if (V) return bit + BV_CTZ(V); +  +  bit += BV_LENGTH; +  +  if (bit >= bv->length) break; +     V = *(++_v); -  bit += (BV_WIDTH*8); +     } -  bit += BV_CTZ(V); -  if (bit >= bv->length) bit = (size_t)-1; +     - RET: -  return bit; +  return (size_t)-1;   }    - #ifdef BA_DEBUG - static void bv_print(struct bitvector * bv) { + #ifdef PIKE_DEBUG + static void __attribute((unused)) bv_print(struct bitvector * bv) {    size_t i;    for (i = 0; i < bv->length; i++) {    fprintf(stderr, "%d", bv_get(bv, i));
446:   }   #endif    - struct ba_block_header * ba_sort_list(const struct ba_page * p, -  struct ba_block_header * b, -  const struct ba_layout * l) { + static void ba_sort_free_list(const struct block_allocator *a, struct ba_page *p, +  const struct ba_layout *l) {    struct bitvector v;    size_t i, j; -  struct ba_block_header ** t = &b; +  struct ba_block_header * b = p->h.first; +  struct ba_block_header ** t = &p->h.first;    -  +  if (!b) return; +  +  j = 0; +     v.length = l->blocks;    i = bv_byte_length(&v);    /* we should probably reuse an area for this.
466:    while (b) {    unsigned INT32 n = ba_block_number(l, p, b);    bv_set(&v, n, 1); +  j++; +  PIKE_MEMPOOL_ALLOC(a, b, l->block_size); +  PIKE_MEM_RW_RANGE(b, sizeof(struct ba_block_header));    if (b->next == BA_ONE) {    v.length = n+1; -  +  PIKE_MEMPOOL_FREE(a, b, l->block_size);    break; -  } else b = b->next; +  } else { +  struct ba_block_header * tmp = b->next; +  PIKE_MEMPOOL_FREE(a, b, l->block_size); +  b = tmp;    } -  +  }    -  +  b = NULL; +     /*    * Handle consecutive free blocks in the end, those    * we dont need anyway.    */    if (v.length) {    i = v.length-1; -  while (i && bv_get(&v, i)) { i--; } +  while (i && bv_get(&v, i)) { i--; j--; }    v.length = i+1;    }    -  +  if (!j) goto last; +     j = 0;       /*
489:    */    while ((i = bv_ctz(&v, j)) != (size_t)-1) {    *t = BA_BLOCKN(*l, p, i); +  if (b) PIKE_MEMPOOL_FREE(a, b, l->block_size); +  b = *t; +  PIKE_MEMPOOL_ALLOC(a, b, l->block_size);    t = &((*t)->next);    j = i+1;    }    -  + last:    /*    * The last one    */       if (v.length < l->blocks) { -  *t = BA_BLOCKN(*l, p, v.length); +  if (b) PIKE_MEMPOOL_FREE(a, b, l->block_size); +  *t = b = BA_BLOCKN(*l, p, v.length); +  PIKE_MEMPOOL_ALLOC(a, b, l->block_size);    (*t)->next = BA_ONE; -  } else *t = NULL; -  -  return b; +  PIKE_MEMPOOL_FREE(a, b, l->block_size); +  } else { +  if (b) PIKE_MEMPOOL_FREE(a, b, l->block_size); +  PIKE_MEMPOOL_ALLOC(a, t, l->block_size); +  *t = NULL; +  PIKE_MEMPOOL_FREE(a, t, l->block_size);    }    - #ifdef USE_VALGRIND - static void ba_list_defined(struct block_allocator * a, struct ba_block_header * b) { -  while (b && b != BA_ONE) { -  PIKE_MEMPOOL_ALLOC(a, b, a->l.block_size); -  PIKE_MEM_RW_RANGE(b, sizeof(struct ba_block_header)); -  b = b->next; +    } - } - #else - #define ba_list_defined(a, b) - #endif +     - #ifdef USE_VALGRIND - static void ba_list_undefined(struct block_allocator * a, struct ba_block_header * b) { -  while (b && b != BA_ONE) { -  struct ba_block_header * next = b->next; -  PIKE_MEMPOOL_FREE(a, b, a->l.block_size); -  b = next; -  } - } - #else - #define ba_list_undefined(a, b) - #endif -  +    /*    * This function allows iteration over all allocated blocks. Some things are not allowed:    * - throwing from within the callback
557:    for (i = 0; i < a->size; i++) {    struct ba_page * p = a->pages[i];    if (p && p->h.used) { -  struct ba_block_header * free_list, * free_block; +  struct ba_block_header *free_block;    -  ba_list_defined(a, p->h.first); -  +     if (!(p->h.flags & BA_FLAG_SORTED)) { -  p->h.first = ba_sort_list(p, p->h.first, &it.l); +  ba_sort_free_list(a, p, &it.l);    p->h.flags |= BA_FLAG_SORTED;    }    /* we fake an allocation to prevent the page from being freed during iteration */    p->h.used ++;    -  free_list = p->h.first; -  free_block = free_list; +  free_block = p->h.first;       it.cur = BA_BLOCKN(it.l, p, 0);   
590:    }       it.end = free_block; -  free_block = free_block->next; +     -  +  PIKE_MEMPOOL_ALLOC(a, free_block, it.l.block_size); +  PIKE_MEM_RW_RANGE(free_block, sizeof(struct ba_block_header)); +  { +  struct ba_block_header *tmp = free_block->next; +  PIKE_MEMPOOL_FREE(a, free_block, it.l.block_size); +  free_block = tmp; +  } +    #ifdef PIKE_DEBUG    if ((char*)it.end < (char*)it.cur)    Pike_fatal("Free list not sorted in ba_walk.\n");   #endif -  if ((char*)it.end != (char*)it.cur) +  if ((char*)it.end != (char*)it.cur) { + #ifdef PIKE_DEBUG +  ba_check_ptr(a, i, it.cur, NULL, __LINE__); +  ba_check_ptr(a, i, (char*)it.end - it.l.block_size, NULL, __LINE__); + #endif    cb(&it, data); -  +  }       it.cur = (char*)it.end + it.l.block_size;    }       /* if the callback throws, this will never happen */ -  ba_list_undefined(a, free_list); +     p->h.used--;    }    ba_double_layout(&it.l);