Branch: Tag:

2014-05-23

2014-05-23 15:58:24 by Per Hedbor <ph@opera.com>

Removed dead feature PIKE_RUN_UNLOCKED

This feature has not worked since at least y2k
(5b15bb756813728027b8f9d6077ac0a3a1faaf0b)

In fact, it never did work very well even befor that, and has always
much slower than not running unlocked. Especially on multi-cpu
systems.

15:   #undef PTR_HASH_ALLOC_FILL_PAGES   #undef PTR_HASH_ALLOC_FIXED_FILL_PAGES    - /* Define this to keep freed blocks around in a backlog, which can -  * help locating leftover pointers to other blocks. It can also hide -  * bugs since the blocks remain intact a while after they are freed. -  * Valgrind will immediately detect attempts to use blocks on the -  * backlog list, though. Only available with dmalloc debug. */ - /* #define DMALLOC_BLOCK_BACKLOG */ -  +    #define INIT_BLOCK(X)   #define EXIT_BLOCK(X)   #define BLOCK_ALLOC_HSIZE_SHIFT 2 -  - #if defined (DMALLOC_BLOCK_BACKLOG) && defined (DEBUG_MALLOC) - #define DO_IF_BLOCK_BACKLOG(X) X - #define DO_IF_NOT_BLOCK_BACKLOG(X) - #define BLOCK_ALLOC_USED real_used - #else - #define DO_IF_BLOCK_BACKLOG(X) - #define DO_IF_NOT_BLOCK_BACKLOG(X) X - #define BLOCK_ALLOC_USED used - #endif -  +    #ifndef PIKE_HASH_T   #define PIKE_HASH_T size_t   #endif /* !PIKE_HASH_T */    - #ifdef PIKE_RUN_UNLOCKED - #include "threads.h" -  - /* Block Alloc UnLocked */ - #define BA_UL(X) PIKE_CONCAT(X,_unlocked) - #define BA_STATIC static - #define BA_INLINE INLINE - #else - #define BA_UL(X) X +    #define BA_STATIC   #define BA_INLINE - #endif +          #define PTR_HASH_ALLOC_FILL_PAGES(DATA, PAGES) \
123:   size_t PIKE_CONCAT(DATA,_hash_table_size)=0; \   static size_t PIKE_CONCAT(num_,DATA)=0; \    \ - static INLINE struct DATA * \ + static struct DATA * \    PIKE_CONCAT3(really_low_find_,DATA,_unlocked)(void *ptr, \    PIKE_HASH_T hval) \   { \
147:   { \    struct DATA *p; \    PIKE_HASH_T hval = ptr_hashfun(ptr); \ -  DO_IF_RUN_UNLOCKED(mt_lock(&PIKE_CONCAT(DATA,_mutex))); \ +     if(!PIKE_CONCAT(DATA,_hash_table)) { \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return 0; \    } \    hval &= (PIKE_HASH_T)PIKE_CONCAT(DATA,_hash_table_size) - 1; \    p=PIKE_CONCAT3(really_low_find_,DATA,_unlocked)(ptr, hval); \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return p; \   } \    \
179:   { \    struct DATA *p; \    PIKE_HASH_T hval = ptr_hashfun(ptr); \ -  DO_IF_RUN_UNLOCKED(mt_lock(&PIKE_CONCAT(DATA,_mutex))); \ +     if(!PIKE_CONCAT(DATA,_hash_table)) { \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return 0; \    } \    hval &= (PIKE_HASH_T)PIKE_CONCAT(DATA,_hash_table_size) - 1; \    p=PIKE_CONCAT3(just_find_,DATA,_unlocked)(ptr, hval); \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return p; \   } \    \
196:   { \    struct DATA *p; \    PIKE_HASH_T hval = ptr_hashfun(ptr); \ -  DO_IF_RUN_UNLOCKED(mt_lock(&PIKE_CONCAT(DATA,_mutex))); \ +     hval &= (PIKE_HASH_T)PIKE_CONCAT(DATA,_hash_table_size) - 1; \    p=PIKE_CONCAT3(make_,DATA,_unlocked)(ptr,hval); \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return p; \   } \    \
207:   { \    struct DATA *p; \    PIKE_HASH_T hval = ptr_hashfun(ptr); \ -  DO_IF_RUN_UNLOCKED(mt_lock(&PIKE_CONCAT(DATA,_mutex))); \ +     hval &= (PIKE_HASH_T)PIKE_CONCAT(DATA,_hash_table_size) - 1; \    if(!(p=PIKE_CONCAT3(really_low_find_,DATA,_unlocked)(ptr, hval))) \    p=PIKE_CONCAT3(make_,DATA,_unlocked)(ptr, hval); \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return p; \   } \    \   int PIKE_CONCAT3(check_,DATA,_semaphore)(void *ptr) \   { \    PIKE_HASH_T hval = ptr_hashfun(ptr); \ -  DO_IF_RUN_UNLOCKED(mt_lock(&PIKE_CONCAT(DATA,_mutex))); \ +     hval &= (PIKE_HASH_T)PIKE_CONCAT(DATA,_hash_table_size) - 1; \    if(PIKE_CONCAT3(really_low_find_,DATA,_unlocked)(ptr, hval)) \    { \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return 0; \    } \    \    PIKE_CONCAT3(make_,DATA,_unlocked)(ptr, hval); \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return 1; \   } \    \
235:   { \    struct DATA **pp, *p; \    PIKE_HASH_T hval = ptr_hashfun(block->PTR_HASH_ALLOC_DATA); \ -  DO_IF_RUN_UNLOCKED(mt_lock(&PIKE_CONCAT(DATA,_mutex))); \ +     hval &= (PIKE_HASH_T)PIKE_CONCAT(DATA,_hash_table_size) - 1; \    pp=PIKE_CONCAT(DATA,_hash_table) + hval; \    while((p = *pp)) \
253:    ((PIKE_HASH_T)PIKE_CONCAT(DATA,_hash_table_size) - 1); \    block->BLOCK_ALLOC_NEXT = PIKE_CONCAT(DATA,_hash_table)[hval]; \    PIKE_CONCAT(DATA,_hash_table)[hval] = block; \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +    } \    \   int PIKE_CONCAT(remove_,DATA)(void *ptr) \   { \    struct DATA **pp, *p; \    PIKE_HASH_T hval = ptr_hashfun(ptr); \ -  DO_IF_RUN_UNLOCKED(mt_lock(&PIKE_CONCAT(DATA,_mutex))); \ +     if(!PIKE_CONCAT(DATA,_hash_table)) \    { \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return 0; \    } \    hval &= (PIKE_HASH_T)PIKE_CONCAT(DATA,_hash_table_size) - 1; \
274:    { \    *pp = p->BLOCK_ALLOC_NEXT; \    PIKE_CONCAT(num_,DATA)--; \ -  BA_UL(PIKE_CONCAT(really_free_,DATA))(p); \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +  PIKE_CONCAT(really_free_,DATA)(p); \    return 1; \    } \    pp = &p->BLOCK_ALLOC_NEXT; \    } \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +     return 0; \   } \    \ - void PIKE_CONCAT3(low_init_,DATA,_hash)(size_t size) \ + static void PIKE_CONCAT3(low_init_,DATA,_hash)(size_t size) \   { \ -  DO_IF_RUN_UNLOCKED(mt_lock(&PIKE_CONCAT(DATA,_mutex))); \ +     PIKE_CONCAT(DATA,_hash_table_size)= \    ptr_hash_find_hashsize(size); \    \
299:    } \    MEMSET(PIKE_CONCAT(DATA,_hash_table),0, \    sizeof(struct DATA *)*PIKE_CONCAT(DATA,_hash_table_size)); \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +    } \    \    \ - void PIKE_CONCAT3(init_,DATA,_hash)(void) \ + static void PIKE_CONCAT3(init_,DATA,_hash)(void) \   { \    PIKE_CONCAT3(low_init_,DATA,_hash)(BSIZE); \   } \    \ - void PIKE_CONCAT3(exit_,DATA,_hash)(void) \ + static void PIKE_CONCAT3(exit_,DATA,_hash)(void) \   { \ -  DO_IF_RUN_UNLOCKED(mt_lock(&PIKE_CONCAT(DATA,_mutex))); \ +     ba_free_all(& PIKE_CONCAT(DATA,_allocator)); \    free(PIKE_CONCAT(DATA,_hash_table)); \    PIKE_CONCAT(DATA,_hash_table)=0; \    PIKE_CONCAT(num_,DATA)=0; \ -  DO_IF_RUN_UNLOCKED(mt_unlock(&PIKE_CONCAT(DATA,_mutex))); \ +    }      #define PTR_HASH_ALLOC_FIXED(DATA,BSIZE) \ - struct DATA *PIKE_CONCAT3(make_,DATA,_unlocked)(void *ptr, PIKE_HASH_T hval);\ + static struct DATA *PIKE_CONCAT3(make_,DATA,_unlocked)(void *ptr, PIKE_HASH_T hval);\   LOW_PTR_HASH_ALLOC(DATA,BSIZE) \    \ - struct DATA *PIKE_CONCAT3(make_,DATA,_unlocked)(void *ptr, PIKE_HASH_T hval) \ + static struct DATA *PIKE_CONCAT3(make_,DATA,_unlocked)(void *ptr, PIKE_HASH_T hval) \   { \    struct DATA *p; \    \
330:    Pike_fatal("Hash table error!\n"); ) \    PIKE_CONCAT(num_,DATA)++; \    \ -  p=BA_UL(PIKE_CONCAT(alloc_,DATA))(); \ +  p=PIKE_CONCAT(alloc_,DATA)(); \    p->PTR_HASH_ALLOC_DATA=ptr; \    p->BLOCK_ALLOC_NEXT=PIKE_CONCAT(DATA,_hash_table)[hval]; \    PIKE_CONCAT(DATA,_hash_table)[hval]=p; \
339:         #define PTR_HASH_ALLOC(DATA,BSIZE) \ - struct DATA *PIKE_CONCAT3(make_,DATA,_unlocked)(void *ptr, \ + static struct DATA *PIKE_CONCAT3(make_,DATA,_unlocked)(void *ptr, \    PIKE_HASH_T hval); \   LOW_PTR_HASH_ALLOC(DATA,BSIZE) \    \
379:    } \   } \    \ - struct DATA *PIKE_CONCAT3(make_,DATA,_unlocked)(void *ptr, \ + static struct DATA *PIKE_CONCAT3(make_,DATA,_unlocked)(void *ptr, \    PIKE_HASH_T hval) \   { \    struct DATA *p; \
396:    hval &= (PIKE_HASH_T)PIKE_CONCAT(DATA,_hash_table_size) - 1; \    } \    \ -  p=BA_UL(PIKE_CONCAT(alloc_,DATA))(); \ +  p=PIKE_CONCAT(alloc_,DATA)(); \    p->PTR_HASH_ALLOC_DATA=ptr; \    p->BLOCK_ALLOC_NEXT=PIKE_CONCAT(DATA,_hash_table)[hval]; \    PIKE_CONCAT(DATA,_hash_table)[hval]=p; \    return p; \   }