|
|
|
|
|
|
#include <roxen.h> |
#include <config.h> |
|
#ifdef MORE_CACHE_DEBUG |
# define MORE_CACHE_WERR(X...) report_debug("CACHE: "+X) |
# undef CACHE_DEBUG |
# define CACHE_DEBUG |
#else |
# define MORE_CACHE_WERR(X...) 0 |
#endif |
|
#ifdef CACHE_DEBUG |
# define CACHE_WERR(X...) report_debug("CACHE: "+X) |
#else |
# define CACHE_WERR(X...) 0 |
#endif |
|
int total_size_limit = 1024 * 1024; |
|
|
|
constant rebalance_adaptivity = 30 * 60; |
|
|
|
|
|
|
constant rebalance_interval = 10; |
|
|
constant rebalance_keep_factor = |
1.0 - 1.0 / (rebalance_adaptivity / rebalance_interval); |
|
|
|
|
|
|
|
|
|
|
constant rebalance_min_size = 1024 * 1024; |
|
|
protected constant cm_stats_avg_period = rebalance_adaptivity; |
|
|
|
void set_total_size_limit (int size) |
|
{ |
total_size_limit = size; |
|
|
|
|
|
|
|
update_cache_size_balance(); |
} |
|
class CacheEntry (mixed key, mixed data) |
|
{ |
|
|
|
int size; |
|
|
#ifdef DEBUG_CACHE_SIZES |
int cmp_size; |
|
|
|
|
|
#endif |
|
int timeout; |
|
|
|
|
|
|
|
|
protected string format_key() |
{ |
if (stringp (key)) { |
if (sizeof (key) > 40) |
return sprintf ("%q...", key[..39 - sizeof ("...")]); |
else |
return sprintf ("%q", key); |
} |
else if (intp (key) || floatp (key) || objectp (key)) |
return sprintf ("%O", key); |
else |
return sprintf ("%t", key); |
} |
|
protected string _sprintf (int flag) |
{ |
return flag == 'O' && sprintf ("CacheEntry(%s, %db)", format_key(), size); |
} |
} |
|
class CacheStats |
|
{ |
int count; |
|
|
int size; |
|
|
int hits, misses; |
|
|
|
int|float cost_hits, cost_misses; |
|
|
|
|
|
#ifdef CACHE_BYTE_HR_STATS |
int byte_hits, byte_misses; |
|
|
|
|
#endif |
|
protected string _sprintf (int flag) |
{ |
return flag == 'O' && sprintf ("CacheStats(%d, %dk)", count, size / 1024); |
} |
} |
|
class CacheManager |
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
constant has_cost = 0; |
|
|
int total_size_limit = global::total_size_limit; |
|
|
int size; |
|
|
int size_limit = global::total_size_limit; |
|
|
|
|
int entry_add_count; |
|
|
mapping(string:mapping(mixed:CacheEntry)) lookup = ([]); |
|
|
|
|
|
mapping(string:CacheStats) stats = ([]); |
|
|
|
|
|
|
|
|
|
void clear_cache_context() {} |
|
|
|
|
void got_miss (string cache_name, mixed key, mapping cache_context); |
|
|
protected void account_miss (string cache_name) |
{ |
recent_misses++; |
if (CacheStats cs = stats[cache_name]) |
cs->misses++; |
} |
|
void got_hit (string cache_name, CacheEntry entry, mapping cache_context); |
|
|
protected void account_hit (string cache_name, CacheEntry entry) |
{ |
recent_hits++; |
recent_cost_hits += entry->cost; |
if (CacheStats cs = stats[cache_name]) { |
cs->hits++; |
cs->cost_hits += entry->cost; |
#ifdef CACHE_BYTE_HR_STATS |
cs->byte_hits += entry->size; |
#endif |
} |
} |
|
int add_entry (string cache_name, CacheEntry entry, |
int old_entry, mapping cache_context); |
|
|
|
|
|
|
|
|
private void account_remove_entry (string cache_name, CacheEntry entry) |
{ |
if (CacheStats cs = stats[cache_name]) { |
cs->count--; |
cs->size -= entry->size; |
ASSERT_IF_DEBUG (cs->size >= 0, cs->size); |
ASSERT_IF_DEBUG (cs->count >= 0, cs->count); |
} |
size -= entry->size; |
ASSERT_IF_DEBUG (size >= 0, size); |
} |
|
protected int low_add_entry (string cache_name, CacheEntry entry) |
{ |
ASSERT_IF_DEBUG (entry->size , entry->size); |
|
|
|
recent_cost_misses += entry->cost; |
|
if (CacheStats cs = stats[cache_name]) { |
cs->cost_misses += entry->cost; |
#ifdef CACHE_BYTE_HR_STATS |
cs->byte_misses += entry->size; |
#endif |
|
if (mapping(mixed:CacheEntry) lm = lookup[cache_name]) { |
|
CacheEntry old_entry = lm[entry->key]; |
lm[entry->key] = entry; |
|
|
if (old_entry) { |
account_remove_entry (cache_name, old_entry); |
recent_added_bytes -= entry->size; |
remove_entry (cache_name, old_entry); |
} |
|
cs->count++; |
cs->size += entry->size; |
size += entry->size; |
recent_added_bytes += entry->size; |
|
if (!(++entry_add_count & 0x3fff)) |
update_size_limit(); |
} |
|
return 1; |
} |
|
return 0; |
} |
|
int remove_entry (string cache_name, CacheEntry entry); |
|
|
|
|
|
|
protected int low_remove_entry (string cache_name, CacheEntry entry) |
|
|
{ |
if (mapping(mixed:CacheEntry) lm = lookup[cache_name]) |
if (lm[entry->key] == entry) { |
|
m_delete (lm, entry->key); |
account_remove_entry (cache_name, entry); |
return 1; |
} |
return 0; |
} |
|
void evict (int max_size); |
|
|
void after_gc() {} |
|
|
|
int manager_size_overhead() |
|
|
{ |
int res = (Pike.count_memory (-1, this, lookup) + |
Pike.count_memory (0, stats)); |
foreach (lookup;; mapping(mixed:CacheEntry) lm) |
res += Pike.count_memory (-1, lm); |
return res; |
} |
|
float add_rate = 0.0; |
|
|
|
|
|
float hits = 0.0, misses = 0.0; |
|
|
|
float cost_hits = 0.0, cost_misses = 0.0; |
|
|
|
|
protected int recent_added_bytes; |
protected int recent_hits, recent_misses; |
protected int|float recent_cost_hits, recent_cost_misses; |
|
void update_decaying_stats (int start_time, int last_update, int now) |
|
|
{ |
|
if (now == last_update) |
return; |
|
float last_period = (float) (now - last_update); |
float tot_period = (float) (now - start_time); |
int startup = tot_period < cm_stats_avg_period; |
if (!startup) tot_period = (float) cm_stats_avg_period; |
|
float our_weight = min (last_period / tot_period, 1.0); |
float old_weight = 1.0 - our_weight; |
|
if (startup) { |
hits += (float) recent_hits; |
misses += (float) recent_misses; |
cost_hits += (float) recent_cost_hits; |
cost_misses += (float) recent_cost_misses; |
} |
|
else { |
hits = old_weight * hits + (float) recent_hits; |
misses = old_weight * misses + (float) recent_misses; |
cost_hits = old_weight * cost_hits + (float) recent_cost_hits; |
cost_misses = old_weight * cost_misses + (float) recent_cost_misses; |
} |
|
add_rate = (old_weight * add_rate + |
our_weight * (recent_added_bytes / last_period)); |
|
recent_added_bytes = 0; |
recent_hits = recent_misses = 0; |
recent_cost_hits = recent_cost_misses = 0; |
} |
|
void update_size_limit() |
{ |
MORE_CACHE_WERR ("%O: update_size_limit\n", this); |
int mgr_oh = manager_size_overhead(); |
size_limit = max (0, total_size_limit - mgr_oh); |
if (size > size_limit) { |
CACHE_WERR ("%O: Evicting %db " |
"(entry size limit %db, manager overhead %db, total %db)\n", |
this, size - size_limit, |
size_limit, mgr_oh, total_size_limit); |
evict (size_limit); |
} |
} |
|
int free_space() |
|
|
|
{ |
return size_limit - size; |
} |
|
string format_cost (int|float cost) {return "-";} |
|
|
|
protected string _sprintf (int flag) |
{ |
return flag == 'O' && |
sprintf ("CacheManager(%s: %dk/%dk)", |
this->name || "-", size / 1024, size_limit / 1024); |
} |
} |
|
#if 0 |
class CM_Random |
{ |
inherit CacheManager; |
|
constant name = "Random"; |
constant doc = #"\ |
This is a very simple cache manager that just evicts entries from the |
cache at random. The only upside with it is that the cache management |
overhead is minimal."; |
|
|
|
constant CacheEntry = global::CacheEntry; |
|
void got_miss (string cache_name, mixed key, mapping cache_context) |
{ |
account_miss (cache_name); |
} |
|
void got_hit (string cache_name, CacheEntry entry, mapping cache_context) |
{ |
account_hit (cache_name, entry); |
} |
|
int add_entry (string cache_name, CacheEntry entry, |
int old_entry, mapping cache_context) |
{ |
int res = low_add_entry (cache_name, entry); |
if (size > size_limit) evict (size_limit); |
return res; |
} |
|
int remove_entry (string cache_name, CacheEntry entry) |
{ |
return low_remove_entry (cache_name, entry); |
} |
|
void evict (int max_size) |
{ |
while (size > max_size) { |
if (!sizeof (lookup)) break; |
|
string cache_name = random (lookup)[0]; |
|
if (mapping(mixed:CacheEntry) lm = lookup[cache_name]) { |
if (sizeof (lm)) { |
|
CacheEntry entry = random (lm)[1]; |
MORE_CACHE_WERR ("evict: Size %db > %db - evicting %O / %O.\n", |
size, max_size, cache_name, entry); |
low_remove_entry (cache_name, entry); |
} |
else |
m_delete (lookup, cache_name); |
} |
} |
} |
} |
|
protected CM_Random cm_random = CM_Random(); |
#endif |
|
class CM_GreedyDual |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
inherit CacheManager; |
|
class CacheEntry |
{ |
inherit global::CacheEntry; |
|
int|float value; |
|
|
int|float pval; |
|
|
|
|
string cache_name; |
|
|
|
protected int `< (CacheEntry other) |
{ |
return pval < other->pval; |
} |
|
protected string _sprintf (int flag) |
{ |
return flag == 'O' && |
sprintf ("CM_GreedyDual.CacheEntry(%O: %s, %db, %O)", |
pval, format_key(), size, value); |
} |
} |
|
multiset(CacheEntry) priority_list = (<>); |
|
|
|
protected int max_used_pval; |
|
|
|
|
|
|
|
|
|
|
|
|
#ifdef CACHE_DEBUG |
protected void debug_check_priority_list() |
|
{ |
werror ("Checking priority_list with %d entries.\n", |
sizeof (priority_list)); |
CacheEntry prev; |
foreach (priority_list; CacheEntry entry;) { |
if (!prev) |
prev = entry; |
else if (prev >= entry) |
error ("Found cache entries in wrong order: %O vs %O in %O\n", |
prev, entry, priority_list); |
if (intp (entry->pval) && entry->pval > max_used_pval) |
error ("Found %O with pval higher than max %O.\n", |
entry, max_used_pval); |
} |
} |
#endif |
|
int|float calc_value (string cache_name, CacheEntry entry, |
int old_entry, mapping cache_context); |
|
|
|
|
void got_miss (string cache_name, mixed key, mapping cache_context) |
{ |
account_miss (cache_name); |
} |
|
local protected int|float calc_pval (CacheEntry entry) |
{ |
int|float pval; |
if (CacheEntry lowest = get_iterator (priority_list)->index()) { |
int|float l = lowest->pval, v = entry->value; |
pval = l + v; |
|
if (floatp (v)) { |
if (v != 0.0 && v < l * (Float.EPSILON * 0x10)) { |
#ifdef DEBUG |
if (max_used_pval != Int.NATIVE_MAX) |
werror ("%O: Ran out of significant digits for cache entry %O - " |
"got min priority %O and entry value %O.\n", |
this, entry, l, v); |
#endif |
|
max_used_pval = Int.NATIVE_MAX; |
} |
} |
else if (pval > max_used_pval) |
max_used_pval = pval; |
} |
else |
|
pval = entry->value; |
return pval; |
} |
|
void got_hit (string cache_name, CacheEntry entry, mapping cache_context) |
{ |
account_hit (cache_name, entry); |
int|float pval = calc_pval (entry); |
|
|
|
|
|
|
|
|
priority_list[entry] = 0; |
entry->pval = pval; |
priority_list[entry] = 1; |
} |
|
int add_entry (string cache_name, CacheEntry entry, |
int old_entry, mapping cache_context) |
{ |
entry->cache_name = cache_name; |
int|float v = entry->value = |
calc_value (cache_name, entry, old_entry, cache_context); |
|
if (!low_add_entry (cache_name, entry)) return 0; |
|
entry->pval = calc_pval (entry); |
priority_list[entry] = 1; |
|
if (size > size_limit) evict (size_limit); |
return 1; |
} |
|
int remove_entry (string cache_name, CacheEntry entry) |
{ |
priority_list[entry] = 0; |
return low_remove_entry (cache_name, entry); |
} |
|
void evict (int max_size) |
{ |
object threads_disabled; |
|
while (size > max_size) { |
CacheEntry entry = get_iterator (priority_list)->index(); |
if (!entry) break; |
|
if (!priority_list[entry]) { |
|
|
|
if (!threads_disabled) |
|
threads_disabled = _disable_threads(); |
else { |
|
|
#ifdef CACHE_DEBUG |
debug_check_priority_list(); |
#endif |
report_warning ("Warning: Recovering from race inconsistency " |
"in %O->priority_list (on %O).\n", this, entry); |
priority_list = (<>); |
foreach (lookup; string cache_name; mapping(mixed:CacheEntry) lm) |
foreach (lm;; CacheEntry entry) |
priority_list[entry] = 1; |
} |
continue; |
} |
|
priority_list[entry] = 0; |
|
MORE_CACHE_WERR ("evict: Size %db > %db - evicting %O / %O.\n", |
size, max_size, entry->cache_name, entry); |
|
low_remove_entry (entry->cache_name, entry); |
} |
|
threads_disabled = 0; |
} |
|
int manager_size_overhead() |
{ |
return Pike.count_memory (-1, priority_list) + ::manager_size_overhead(); |
} |
|
void after_gc() |
{ |
if (max_used_pval > Int.NATIVE_MAX / 2) { |
int|float pval_base; |
if (CacheEntry lowest = get_iterator (priority_list)->index()) |
pval_base = lowest->pval; |
else |
return; |
|
|
|
|
|
object threads_disabled = _disable_threads(); |
mapping(string:mapping(mixed:CacheEntry)) old_lookup = lookup; |
lookup = ([]); |
foreach (old_lookup; string cache_name;) { |
lookup[cache_name] = ([]); |
if (CacheStats cs = stats[cache_name]) { |
cs->count = 0; |
cs->size = 0; |
} |
} |
priority_list = (<>); |
size = 0; |
max_used_pval = 0; |
threads_disabled = 0; |
|
int|float max_pval; |
|
foreach (old_lookup; string cache_name; mapping(mixed:CacheEntry) old_lm) |
if (CacheStats cs = stats[cache_name]) |
if (mapping(mixed:CacheEntry) new_lm = lookup[cache_name]) |
foreach (old_lm; mixed key; CacheEntry entry) { |
int|float pval = entry->pval -= pval_base; |
if (!new_lm[key]) { |
|
new_lm[key] = entry; |
|
priority_list[entry] = 1; |
if (pval > max_pval) max_pval = pval; |
|
cs->count++; |
cs->size += entry->size; |
size += entry->size; |
} |
} |
|
#ifdef CACHE_DEBUG |
debug_check_priority_list(); |
#endif |
|
if (intp (max_pval)) |
max_used_pval = max_pval; |
|
CACHE_WERR ("%O: Rebased priority values - " |
"old base was %O, new range is 0..%O.\n", |
this, pval_base, max_pval); |
|
if (Configuration admin_config = roxenp()->get_admin_configuration()) |
|
admin_config->log_event ("roxen", "ram-cache-rebase", this->name, ([ |
"old-pval-base": pval_base, |
"new-max-pval": max_pval, |
])); |
} |
} |
} |
|
class CM_GDS_1 |
{ |
inherit CM_GreedyDual; |
|
constant name = "GDS(1)"; |
constant doc = #"\ |
This cache manager implements <a |
href='http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.30.7285'>GreedyDual-Size</a> |
with the cost of each entry fixed at 1, which makes it optimize the |
cache hit ratio."; |
|
float calc_value (string cache_name, CacheEntry entry, |
int old_entry, mapping cache_context) |
{ |
ASSERT_IF_DEBUG (entry->size > 10, entry->size); |
return 1.0 / entry->size; |
} |
} |
|
protected CM_GDS_1 cm_gds_1 = CM_GDS_1(); |
|
class CM_GDS_Time |
|
|
{ |
inherit CM_GreedyDual; |
|
constant has_cost = 1; |
|
class CacheEntry |
{ |
inherit CM_GreedyDual::CacheEntry; |
int|float cost; |
|
protected string _sprintf (int flag) |
{ |
return flag == 'O' && |
sprintf ("CM_GDS_Time.CacheEntry(%O: %s, %db, %O, %O)", |
pval, format_key(), size, value, cost); |
} |
} |
|
protected Thread.Local cache_contexts = Thread.Local(); |
|
|
|
|
|
|
|
void clear_cache_context() |
{ |
cache_contexts->set (([])); |
} |
|
protected int gettime_func(); |
|
|
|
protected void save_start_hrtime (string cache_name, mixed key, |
mapping cache_context) |
{ |
if (mapping all_ctx = cache_context || cache_contexts->get()) { |
int start = gettime_func() - all_ctx[0]; |
|
if (mapping(mixed:int) ctx = all_ctx[cache_name]) { |
#if 0 |
if (!zero_type (ctx[key])) |
|
|
|
|
|
werror ("Warning: Detected repeated missed lookup calls.\n%s\n", |
describe_backtrace (backtrace())); |
#endif |
ctx[key] = start; |
} |
else |
all_ctx[cache_name] = ([key: start]); |
} |
|
else { |
#ifdef DEBUG |
werror ("Warning: Got call from %O without cache context mapping.\n%s\n", |
Thread.this_thread(), describe_backtrace (backtrace())); |
#endif |
} |
} |
|
void got_miss (string cache_name, mixed key, mapping cache_context) |
{ |
|
account_miss (cache_name); |
save_start_hrtime (cache_name, key, cache_context); |
} |
|
void got_hit (string cache_name, CacheEntry entry, mapping cache_context) |
{ |
account_hit (cache_name, entry); |
|
|
|
save_start_hrtime (cache_name, entry->key, cache_context); |
} |
|
protected int entry_create_hrtime (string cache_name, mixed key, |
mapping cache_context) |
{ |
if (mapping all_ctx = cache_context || cache_contexts->get()) |
if (mapping(mixed:int) ctx = all_ctx[cache_name]) { |
int start = m_delete (ctx, key); |
if (!zero_type (start)) { |
int duration = (gettime_func() - all_ctx[0]) - start; |
#if 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ASSERT_IF_DEBUG (duration >= 0 , |
duration, start, all_ctx); |
#endif |
if (duration < 0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
duration = 0; |
all_ctx[0] += duration; |
return duration; |
} |
} |
#ifdef DEBUG |
|
|
|
werror ("Warning: No preceding lookup for this key - " |
"cannot determine entry creation time.\n%s\n", |
describe_backtrace (backtrace())); |
#endif |
return 0; |
} |
|
protected mapping(string:array(float|int)) mean_costs = ([]); |
|
|
|
|
|
|
|
|
|
|
float calc_value (string cache_name, CacheEntry entry, |
int old_entry, mapping cache_context) |
{ |
if (int hrtime = !old_entry && |
entry_create_hrtime (cache_name, entry->key, cache_context)) { |
float cost = entry->cost = (float) hrtime; |
|
if (array(float|int) mean_entry = mean_costs[cache_name]) { |
[float mean_cost, int mean_count] = mean_entry; |
mean_entry[0] = (mean_count * mean_cost + cost) / (mean_count + 1); |
if (mean_count < 1000) mean_entry[1] = mean_count + 1; |
} |
else |
mean_costs[cache_name] = ({cost, 1}); |
|
return cost / entry->size; |
} |
|
|
|
|
if (array(float|int) mean_entry = mean_costs[cache_name]) |
return mean_entry[0] / entry->size; |
else |
return 0.0; |
} |
|
void evict (int max_size) |
{ |
::evict (max_size); |
if (!max_size) mean_costs = ([]); |
} |
|
string format_cost (float cost) |
{ |
return Roxen.format_hrtime ((int) cost); |
} |
} |
|
class CM_GDS_CPUTime |
{ |
inherit CM_GDS_Time; |
|
constant name = "GDS(cpu time)"; |
string doc = #"\ |
This cache manager implements <a |
href='http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.30.7285'>GreedyDual-Size</a> |
with the cost of each entry determined by the CPU time it took to |
create it. The CPU time implementation is " + |
Roxen.html_encode_string (System.CPU_TIME_IMPLEMENTATION) + |
" which is " + |
(System.CPU_TIME_IS_THREAD_LOCAL ? "thread local" : "not thread local") + |
" and has a resolution of " + |
(System.CPU_TIME_RESOLUTION / 1e6) + " ms."; |
|
protected int gettime_func() |
{ |
return gethrvtime(); |
} |
} |
|
protected CM_GDS_CPUTime cm_gds_cputime = CM_GDS_CPUTime(); |
|
class CM_GDS_RealTime |
{ |
inherit CM_GDS_Time; |
|
constant name = "GDS(real time)"; |
string doc = #"\ |
This cache manager implements <a |
href='http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.30.7285'>GreedyDual-Size</a> |
with the cost of each entry determined by the real (wall) time it took |
to create it. The real time implementation is " + |
Roxen.html_encode_string (System.REAL_TIME_IMPLEMENTATION) + |
" which is " + |
(System.REAL_TIME_IS_MONOTONIC ? "monotonic" : "not monotonic") + |
" and has a resolution of " + |
(System.REAL_TIME_RESOLUTION / 1e6) + " ms."; |
|
protected int gettime_func() |
{ |
|
|
|
|
|
|
return gethrtime() - Pike.implicit_gc_real_time(); |
} |
} |
|
protected CM_GDS_RealTime cm_gds_realtime = CM_GDS_RealTime(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mapping(string:CacheManager) cache_manager_prefs = ([ |
#if 0 |
|
|
|
|
|
|
|
|
"default": (System.CPU_TIME_IS_THREAD_LOCAL != "yes" || |
System.CPU_TIME_RESOLUTION > 10000 ? |
|
|
cm_gds_realtime : |
cm_gds_cputime), |
#else |
"default": cm_gds_realtime, |
#endif |
"no_cpu_timings": cm_gds_realtime, |
"no_thread_timings": cm_gds_realtime, |
"no_timings": cm_gds_1, |
]); |
|
|
array(CacheManager) cache_managers = |
Array.uniq (({cache_manager_prefs->default, |
cache_manager_prefs->no_cpu_timings, |
cache_manager_prefs->no_thread_timings, |
cache_manager_prefs->no_timings, |
})); |
|
protected Thread.Mutex cache_mgmt_mutex = Thread.Mutex(); |
|
|
|
protected int cache_start_time = time() - 1; |
protected int last_cache_size_balance = time() - 1; |
|
|
|
|
protected void update_cache_size_balance() |
|
|
{ |
Thread.MutexKey lock = cache_mgmt_mutex->lock(); |
|
int now = time(); |
mapping(CacheManager:int) mgr_used = ([]); |
int used_size; |
|
|
foreach (cache_managers, CacheManager mgr) |
mgr->update_decaying_stats (cache_start_time, last_cache_size_balance, now); |
|
if (!total_size_limit) { |
|
foreach (cache_managers, CacheManager mgr) { |
mgr->total_size_limit = 0; |
mgr->update_size_limit(); |
} |
} |
|
else { |
foreach (cache_managers, CacheManager mgr) |
used_size += mgr_used[mgr] = |
mgr->size + (mgr->total_size_limit - mgr->size_limit); |
|
if (used_size < (int) (0.9 * total_size_limit)) { |
|
|
|
|
|
int extra = total_size_limit - used_size; |
CACHE_WERR ("Rebalance: %db under the limit - free growth " |
"(overshoot %db).\n", extra, |
extra * sizeof (cache_managers) + used_size - |
total_size_limit); |
|
foreach (cache_managers, CacheManager mgr) { |
mgr->total_size_limit = max (rebalance_min_size, mgr_used[mgr] + extra); |
mgr->update_size_limit(); |
} |
} |
|
else { |
int reserved_size; |
#ifdef CACHE_DEBUG |
int overshoot_size; |
#endif |
mapping(CacheManager:int) mgr_reserved_size = ([]); |
mapping(CacheManager:float) mgr_weights = ([]); |
float total_weight = 0.0; |
|
foreach (cache_managers, CacheManager mgr) { |
int reserved = (int) (rebalance_keep_factor * mgr_used[mgr]); |
if (reserved < rebalance_min_size) { |
|
|
|
|
reserved = mgr_used[mgr]; |
#ifdef CACHE_DEBUG |
overshoot_size += rebalance_min_size - reserved; |
#endif |
} |
reserved_size += mgr_reserved_size[mgr] = reserved; |
|
float lookups = mgr->has_cost ? |
mgr->cost_hits + mgr->cost_misses : mgr->hits + mgr->misses; |
float hit_rate_per_byte = lookups != 0.0 && mgr_used[mgr] ? |
mgr->hits / lookups / mgr_used[mgr] : 0.0; |
|
|
|
|
float weight = max (mgr->add_rate * hit_rate_per_byte, 0.0); |
mgr_weights[mgr] = weight; |
total_weight += weight; |
|
CACHE_WERR ("Rebalance weight %s: Reserved %db, " |
"add rate %g, hr %g, hr/b %g, weight %g.\n", |
mgr->name, reserved, mgr->add_rate, |
lookups != 0.0 ? mgr->hits / lookups : 0.0, |
hit_rate_per_byte, weight); |
} |
|
if (total_weight > 0.0) { |
|
|
|
if (reserved_size > total_size_limit) { |
|
|
|
|
|
|
|
|
|
|
int total_above_min = total_size_limit; |
foreach (cache_managers, CacheManager mgr) { |
int reserved = min (mgr_used[mgr], rebalance_min_size); |
used_size -= reserved; |
mgr_used[mgr] -= reserved; |
total_above_min -= reserved; |
} |
|
if (used_size > 0) { |
CACHE_WERR ("Rebalance: %db over the limit - shrinking linearly " |
"(overshoot %db).\n", used_size - total_above_min, |
rebalance_min_size * sizeof (cache_managers) + |
total_above_min - total_size_limit); |
|
foreach (cache_managers, CacheManager mgr) { |
int new_size = rebalance_min_size + |
(int) (((float) mgr_used[mgr] / used_size) * total_above_min); |
CACHE_WERR ("Rebalance shrink %s: From %db to %db - diff %db.\n", |
mgr->name, mgr->total_size_limit, new_size, |
new_size - mgr->total_size_limit); |
mgr->total_size_limit = new_size; |
mgr->update_size_limit(); |
} |
} |
} |
|
else { |
int size_for_rebalance = total_size_limit - reserved_size; |
CACHE_WERR ("Rebalance: %db for rebalance (reserved %db, " |
"overshoot %db)\n", size_for_rebalance, reserved_size, |
overshoot_size); |
|
foreach (cache_managers, CacheManager mgr) { |
int new_size = max (mgr_reserved_size[mgr] + |
(int) ((mgr_weights[mgr] / total_weight) * |
size_for_rebalance), |
rebalance_min_size); |
CACHE_WERR ("Rebalance on weight %s: From %db to %db - diff %db.\n", |
mgr->name, mgr->total_size_limit, new_size, |
new_size - mgr->total_size_limit); |
mgr->total_size_limit = new_size; |
mgr->update_size_limit(); |
} |
} |
} |
} |
} |
|
#ifdef DEBUG |
foreach (cache_managers, CacheManager mgr) |
ASSERT_IF_DEBUG (mgr->total_size_limit == 0 || |
mgr->total_size_limit >= rebalance_min_size, |
mgr, mgr->total_size_limit); |
#endif |
|
last_cache_size_balance = now; |
} |
|
protected void periodic_update_cache_size_balance() |
{ |
update_cache_size_balance(); |
roxenp()->background_run (rebalance_interval, |
periodic_update_cache_size_balance); |
} |
|
#ifdef DEBUG_CACHE_SIZES |
protected int cmp_sizeof_cache_entry (string cache_name, CacheEntry entry) |
{ |
int res; |
mixed data = entry->data; |
mapping opts = (["block_strings": 1, |
#if DEBUG_CACHE_SIZES > 1 |
"collect_internals": 1, |
#endif |
]); |
if (function(int|mapping:int) cm_cb = |
objectp (data) && data->cache_count_memory) |
res = cm_cb (opts) + Pike.count_memory (-1, entry, entry->key); |
else |
res = Pike.count_memory (opts, entry, entry->key, data); |
#if DEBUG_CACHE_SIZES > 1 |
werror ("Internals counted for %O / %O: ({\n%{%s,\n%}})\n", |
cache_name, entry, |
sort (map (opts->collect_internals, |
lambda (mixed m) {return sprintf ("%O", m);}))); |
#endif |
return res; |
} |
#endif |
|
protected mapping(string:CacheManager) caches = ([]); |
|
|
mapping(string:CacheManager) cache_list() |
|
|
{ |
return caches + ([]); |
} |
|
CacheManager cache_register (string cache_name, |
void|string|CacheManager manager) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
Thread.MutexKey lock = |
cache_mgmt_mutex->lock (2); |
|
if (CacheManager mgr = caches[cache_name]) |
return mgr; |
|
if (!manager) manager = cache_manager_prefs->default; |
else if (stringp (manager)) { |
string cache_type = manager; |
manager = cache_manager_prefs[cache_type]; |
if (!manager) error ("Unknown cache manager type %O requested.\n", |
cache_type); |
} |
|
caches[cache_name] = manager; |
manager->stats[cache_name] = CacheStats(); |
manager->lookup[cache_name] = ([]); |
return manager; |
} |
|
void cache_unregister (string cache_name) |
|
|
{ |
Thread.MutexKey lock = cache_mgmt_mutex->lock(); |
|
|
if (CacheManager mgr = m_delete (caches, cache_name)) { |
mapping(mixed:CacheEntry) lm = m_delete (mgr->lookup, cache_name); |
CacheStats cs = m_delete (mgr->stats, cache_name); |
|
mgr->size -= cs->size; |
|
destruct (lock); |
foreach (lm;; CacheEntry entry) |
mgr->remove_entry (cache_name, entry); |
} |
} |
|
CacheManager cache_get_manager (string cache_name) |
|
|
{ |
return caches[cache_name]; |
} |
|
void cache_change_manager (string cache_name, CacheManager manager) |
|
|
|
|
{ |
Thread.MutexKey lock = cache_mgmt_mutex->lock(); |
|
|
CacheManager old_mgr = m_delete (caches, cache_name); |
if (old_mgr == manager) |
caches[cache_name] = manager; |
|
|
else { |
mapping(mixed:CacheEntry) old_lm = m_delete (old_mgr->lookup, cache_name); |
CacheStats old_cs = m_delete (old_mgr->stats, cache_name); |
|
old_mgr->size -= old_cs->size; |
cache_register (cache_name, manager); |
|
|
destruct (lock); |
int entry_size_diff = (Pike.count_memory (0, manager->CacheEntry (0, 0)) - |
Pike.count_memory (0, old_mgr->CacheEntry (0, 0))); |
foreach (old_lm; mixed key; CacheEntry old_ent) { |
old_mgr->remove_entry (cache_name, old_ent); |
CacheEntry new_ent = manager->CacheEntry (key, old_ent->data); |
new_ent->size = old_ent->size + entry_size_diff; |
manager->add_entry (cache_name, new_ent, 1, 0); |
} |
manager->update_size_limit(); |
} |
} |
|
void cache_expire (void|string cache_name) |
|
|
{ |
|
|
foreach (cache_name ? ({cache_name}) : indices (caches), string cn) { |
CACHE_WERR ("Emptying cache %O.\n", cn); |
if (CacheManager mgr = caches[cn]) { |
mgr->evict (0); |
mgr->update_size_limit(); |
} |
} |
} |
|
void flush_memory_cache (void|string cache_name) {cache_expire (cache_name);} |
|
void cache_clear_deltas() |
{ |
cache_managers->clear_cache_context(); |
} |
|
mixed cache_lookup (string cache_name, mixed key, void|mapping cache_context) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
CacheManager mgr = caches[cache_name] || cache_register (cache_name); |
|
if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) |
if (CacheEntry entry = lm[key]) { |
|
if (entry->timeout && entry->timeout <= time (1) || !entry->data) { |
mgr->remove_entry (cache_name, entry); |
mgr->got_miss (cache_name, key, cache_context); |
MORE_CACHE_WERR ("cache_lookup (%O, %s): %s\n", |
cache_name, RXML.utils.format_short (key), |
entry->data ? "Timed out" : "Destructed"); |
return 0; |
} |
|
mgr->got_hit (cache_name, entry, cache_context); |
MORE_CACHE_WERR ("cache_lookup (%O, %s): Hit\n", |
cache_name, RXML.utils.format_short (key)); |
return entry->data; |
} |
|
mgr->got_miss (cache_name, key, cache_context); |
MORE_CACHE_WERR ("cache_lookup (%O, %s): Miss\n", |
cache_name, RXML.utils.format_short (key)); |
return 0; |
} |
|
mixed cache_peek (string cache_name, mixed key) |
|
|
|
{ |
if (CacheManager mgr = caches[cache_name]) |
if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) |
if (CacheEntry entry = lm[key]) { |
|
if (entry->timeout && entry->timeout <= time (1)) { |
mgr->remove_entry (cache_name, entry); |
MORE_CACHE_WERR ("cache_peek (%O, %s): Timed out\n", |
cache_name, RXML.utils.format_short (key)); |
return 0; |
} |
|
MORE_CACHE_WERR ("cache_peek (%O, %s): Entry found\n", |
cache_name, RXML.utils.format_short (key)); |
return entry->data; |
} |
|
MORE_CACHE_WERR ("cache_peek (%O, %s): Entry not found\n", |
cache_name, RXML.utils.format_short (key)); |
return 0; |
} |
|
mixed cache_set (string cache_name, mixed key, mixed data, void|int timeout, |
void|mapping|int(1..1) cache_context) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
ASSERT_IF_DEBUG (data); |
|
CacheManager mgr = caches[cache_name] || cache_register (cache_name); |
CacheEntry new_entry = mgr->CacheEntry (key, data); |
|
|
|
|
|
|
|
#ifdef DEBUG_COUNT_MEM |
mapping opts = (["lookahead": DEBUG_COUNT_MEM - 1, |
"collect_stats": 1, |
"collect_direct_externals": 1, |
]); |
float t = gauge { |
#else |
#define opts 0 |
#endif |
|
if (function(int|mapping:int) cm_cb = |
objectp (data) && data->cache_count_memory) |
new_entry->size = cm_cb (opts) + Pike.count_memory (-1, new_entry, key); |
else |
new_entry->size = Pike.count_memory (opts, new_entry, key, data); |
|
#ifdef DEBUG_COUNT_MEM |
}; |
werror ("%O: la %d size %d time %g int %d cyc %d ext %d vis %d revis %d " |
"rnd %d wqa %d\n", |
new_entry, opts->lookahead, opts->size, t, opts->internal, |
opts->cyclic, opts->external, opts->visits, opts->revisits, |
opts->rounds, opts->work_queue_alloc); |
|
#if 0 |
if (opts->external) { |
opts->collect_direct_externals = 1; |
|
if (opts->lookahead < 1) opts->lookahead = 1; |
|
if (function(int|mapping:int) cm_cb = |
objectp (data) && data->cache_count_memory) |
res = cm_cb (opts) + Pike.count_memory (-1, entry, key); |
else |
res = Pike.count_memory (opts, entry, key, data); |
|
array exts = opts->collect_direct_externals; |
werror ("Externals found using lookahead %d: %O\n", |
opts->lookahead, exts); |
#if 0 |
foreach (exts, mixed ext) |
if (objectp (ext) && ext->locate_my_ext_refs) { |
werror ("Refs to %O:\n", ext); |
_locate_references (ext); |
} |
#endif |
} |
#endif |
|
#endif // DEBUG_COUNT_MEM |
#undef opts |
|
#ifdef DEBUG_CACHE_SIZES |
new_entry->cmp_size = cmp_sizeof_cache_entry (cache_name, new_entry); |
#endif |
|
if (timeout) |
new_entry->timeout = time (1) + timeout; |
|
mgr->add_entry (cache_name, new_entry, |
intp (cache_context) && cache_context, |
mappingp (cache_context) && cache_context); |
|
MORE_CACHE_WERR ("cache_set (%O, %s, %s, %O): %O\n", |
cache_name, RXML.utils.format_short (key), |
sprintf (objectp (data) ? "%O" : "%t", data), timeout, |
new_entry); |
|
return data; |
} |
|
void cache_remove (string cache_name, mixed key) |
|
|
|
|
|
{ |
MORE_CACHE_WERR ("cache_remove (%O, %O)\n", cache_name, key); |
if (CacheManager mgr = caches[cache_name]) |
if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) |
if (CacheEntry entry = lm[key]) |
mgr->remove_entry (cache_name, entry); |
} |
|
mapping(mixed:CacheEntry) cache_entries (string cache_name) |
|
|
{ |
if (CacheManager mgr = caches[cache_name]) |
if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) |
return lm; |
return ([]); |
} |
|
array cache_indices(string|void cache_name) |
|
{ |
if (!cache_name) |
return indices (caches); |
else |
return indices (cache_entries (cache_name)); |
} |
|
mapping(CacheManager:mapping(string:CacheStats)) cache_stats() |
|
|
|
|
{ |
mapping(CacheManager:mapping(string:CacheStats)) res = ([]); |
foreach (cache_managers, CacheManager mgr) |
res[mgr] = mgr->stats; |
return res; |
} |
|
|
|
constant gc_stats_period = 60 * 60; |
float sum_gc_runs = 0.0, sum_gc_time = 0.0; |
float sum_destruct_garbage_size = 0.0; |
float sum_timeout_garbage_size = 0.0; |
float avg_destruct_garbage_ratio = 0.0; |
float avg_timeout_garbage_ratio = 0.0; |
|
int last_gc_run; |
|
protected void cache_clean() |
|
{ |
int now = time (1); |
int vt = gethrvtime(), t = gethrtime(); |
int total_size, destr_garb_size, timeout_garb_size; |
|
CACHE_WERR ("Starting RAM cache cleanup.\n"); |
|
|
|
|
|
|
#ifdef DEBUG_CACHE_SIZES |
mapping(CacheManager:int) cache_sizes = ([]); |
#endif |
|
foreach (caches; string cache_name; CacheManager mgr) { |
#ifdef DEBUG_CACHE_SIZES |
int cache_count, cache_size; |
#endif |
if (mapping(mixed:CacheEntry) lm = mgr->lookup[cache_name]) { |
foreach (lm;; CacheEntry entry) { |
|
if (!entry->data) { |
MORE_CACHE_WERR ("%s: Removing destructed entry %O\n", |
cache_name, entry); |
destr_garb_size += entry->size; |
mgr->remove_entry (cache_name, entry); |
} |
|
else if (entry->timeout && entry->timeout <= now) { |
MORE_CACHE_WERR ("%s: Removing timed out entry %O\n", |
cache_name, entry); |
timeout_garb_size += entry->size; |
mgr->remove_entry (cache_name, entry); |
} |
|
else { |
#ifdef DEBUG_CACHE_SIZES |
cache_count++; |
cache_size += entry->size; |
int size = cmp_sizeof_cache_entry (cache_name, entry); |
if (size != entry->cmp_size) { |
|
|
|
|
|
werror ("Size diff in %O: Is %d, was %d in cache_set() - " |
"diff %d: %O\n", |
cache_name, size, entry->cmp_size, |
size - entry->cmp_size, entry); |
|
entry->cmp_size = size; |
} |
#endif |
} |
} |
} |
|
#ifdef DEBUG_CACHE_SIZES |
mapping(string:int)|CacheStats st = mgr->stats[cache_name] || ([]); |
|
if (cache_count != st->count) |
werror ("Entry count difference for %O: " |
"Have %d, expected %d - diff %d.\n", |
cache_name, cache_count, st->count, cache_count - st->count); |
if (cache_size != st->size) |
werror ("Entry size difference for %O: " |
"Have %d, expected %d - diff %d.\n", |
cache_name, cache_size, st->size, cache_size - st->size); |
cache_sizes[mgr] += st->size; |
#endif |
} |
|
foreach (cache_managers, CacheManager mgr) { |
mgr->after_gc(); |
total_size += mgr->size; |
|
#ifdef DEBUG_CACHE_SIZES |
|
if (cache_sizes[mgr] != mgr->size) |
werror ("Cache size difference for %O: " |
"Have %d, expected %d - diff %d.\n", |
mgr, cache_sizes[mgr], mgr->size, cache_sizes[mgr] - mgr->size); |
#endif |
} |
|
vt = gethrvtime() - vt; |
t = gethrtime() - t; |
CACHE_WERR ("Finished RAM cache cleanup: " |
"%db stale, %db timed out, took %s.\n", |
destr_garb_size, timeout_garb_size, |
Roxen.format_hrtime (vt || t)); |
|
int stat_last_period = now - last_gc_run; |
int stat_tot_period = now - cache_start_time; |
int startup = stat_tot_period < gc_stats_period; |
if (!startup) stat_tot_period = gc_stats_period; |
|
if (stat_last_period > stat_tot_period) { |
|
|
|
sum_gc_time = (float) (vt || t); |
sum_gc_runs = 1.0; |
sum_destruct_garbage_size = (float) destr_garb_size; |
sum_timeout_garbage_size = (float) timeout_garb_size; |
if (total_size) { |
avg_destruct_garbage_ratio = (float) destr_garb_size / total_size; |
avg_timeout_garbage_ratio = (float) timeout_garb_size / total_size; |
} |
} |
|
else { |
float our_weight = (float) stat_last_period / stat_tot_period; |
float old_weight = 1.0 - our_weight; |
|
if (startup) { |
sum_gc_runs += 1.0; |
sum_gc_time += (float) (vt || t); |
sum_destruct_garbage_size += (float) destr_garb_size; |
sum_timeout_garbage_size += (float) timeout_garb_size; |
} |
|
else { |
sum_gc_runs = old_weight * sum_gc_runs + 1.0; |
sum_gc_time = old_weight * sum_gc_time + (float) (vt || t); |
sum_destruct_garbage_size = (old_weight * sum_destruct_garbage_size + |
(float) destr_garb_size); |
sum_timeout_garbage_size = (old_weight * sum_timeout_garbage_size + |
(float) timeout_garb_size); |
} |
|
if (total_size) { |
avg_destruct_garbage_ratio = (old_weight * avg_destruct_garbage_ratio + |
our_weight * destr_garb_size / total_size); |
avg_timeout_garbage_ratio = (old_weight * avg_timeout_garbage_ratio + |
our_weight * timeout_garb_size / total_size); |
} |
} |
|
last_gc_run = now; |
|
if (Configuration admin_config = roxenp()->get_admin_configuration()) |
admin_config->log_event ("roxen", "ram-cache-gc", 0, ([ |
"handle-cputime": vt, |
"handle-time": t, |
"stale-size": destr_garb_size, |
"timed-out-size": timeout_garb_size, |
])); |
|
|
roxenp()->background_run (roxenp()->query ("mem_cache_gc_2") || 60, |
cache_clean); |
} |
|
|
|
|
private mapping(string:mapping(string:mixed)) nongc_cache; |
|
|
|
|
void nongarbing_cache_set(string cache_id, string key, mixed value) { |
if(nongc_cache[cache_id]) |
nongc_cache[cache_id][key] = value; |
else |
nongc_cache[cache_id] = ([ key:value ]); |
} |
|
|
|
mixed nongarbing_cache_lookup(string cache_id, string key) { |
return nongc_cache[cache_id]?nongc_cache[cache_id][key]:([])[0]; |
} |
|
|
void nongarbing_cache_remove(string cache_id, string key) { |
if(nongc_cache[cache_id]) m_delete(nongc_cache[cache_id], key); |
} |
|
|
void nongarbing_cache_flush(string cache_id) { |
m_delete(nongc_cache, cache_id); |
} |
|
mapping(string:array(int)) ngc_status() { |
mapping(string:array(int)) res = ([]); |
|
foreach(nongc_cache; string cache; mapping(string:mixed) cachemap) { |
int size = Pike.count_memory (0, cachemap); |
res[cache] = ({ sizeof(cachemap), size}); |
} |
|
return res; |
} |
|
|
|
|
#ifndef SESSION_BUCKETS |
# define SESSION_BUCKETS 4 |
#endif |
#ifndef SESSION_SHIFT_TIME |
# define SESSION_SHIFT_TIME 15*60 |
#endif |
|
|
private mapping(string:int) session_persistence; |
|
private array(mapping(string:mixed)) session_buckets; |
|
private function(string:Sql.Sql) db; |
|
private int max_persistence; |
|
|
private void store_session(string id, mixed data, int t) { |
data = encode_value(data); |
db("local")->query("REPLACE INTO session_cache VALUES (%s," + t + ",%s)", |
id, data); |
} |
|
|
|
private void session_cache_handler() { |
int t=time(1); |
if(max_persistence>t) { |
|
clean: |
foreach(session_buckets[-1]; string id; mixed data) { |
if(session_persistence[id]<t) { |
m_delete(session_buckets[-1], id); |
m_delete(session_persistence, id); |
continue; |
} |
for(int i; i<SESSION_BUCKETS-2; i++) |
if(session_buckets[i][id]) { |
continue clean; |
} |
if(objectp(data)) { |
m_delete(session_buckets[-1], id); |
m_delete(session_persistence, id); |
continue; |
} |
store_session(id, data, session_persistence[id]); |
m_delete(session_buckets[-1], id); |
m_delete(session_persistence, id); |
} |
} |
|
session_buckets = ({ ([]) }) + session_buckets[..SESSION_BUCKETS-2]; |
roxenp()->background_run(SESSION_SHIFT_TIME, session_cache_handler); |
} |
|
|
|
private void session_cache_destruct() { |
int t=time(1); |
if(max_persistence>t) { |
report_notice("Synchronizing session cache"); |
foreach(session_buckets, mapping(string:mixed) session_bucket) |
foreach(session_bucket; string id; mixed data) |
if(session_persistence[id]>t) { |
store_session(id, data, session_persistence[id]); |
m_delete(session_persistence, id); |
} |
} |
report_notice("Session cache synchronized\n"); |
} |
|
|
|
|
|
|
void clear_session(string id) { |
m_delete(session_persistence, id); |
foreach(session_buckets, mapping bucket) |
m_delete(bucket, id); |
db("local")->query("DELETE FROM session_cache WHERE id=%s", id); |
} |
|
|
|
|
|
|
mixed get_session_data(string id) { |
mixed data; |
foreach(session_buckets, mapping bucket) |
if(data=bucket[id]) { |
session_buckets[0][id] = data; |
return data; |
} |
data = db("local")->query("SELECT data FROM session_cache WHERE id=%s", id); |
if(sizeof([array]data) && |
!catch(data=decode_value( ([array(mapping(string:string))]data)[0]->data ))) |
return data; |
return ([])[0]; |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
string set_session_data(mixed data, void|string id, void|int persistence, |
void|int(0..1) store) { |
if(!id) id = ([function(void:string)]roxenp()->create_unique_id)(); |
session_persistence[id] = persistence; |
session_buckets[0][id] = data; |
max_persistence = max(max_persistence, persistence); |
if(store && persistence) store_session(id, data, persistence); |
return id; |
} |
|
|
private void setup_tables() { |
db("local")->query("CREATE TABLE IF NOT EXISTS session_cache (" |
"id CHAR(32) NOT NULL PRIMARY KEY, " |
"persistence INT UNSIGNED NOT NULL DEFAULT 0, " |
"data BLOB NOT NULL)"); |
master()->resolv("DBManager.is_module_table") |
( 0, "local", "session_cache", "Used by the session manager" ); |
} |
|
|
void init_session_cache() { |
db = (([function(string:function(string:object(Sql.Sql)))]master()->resolv) |
("DBManager.cached_get")); |
setup_tables(); |
} |
|
void init_call_outs() |
{ |
roxenp()->background_run(60, cache_clean); |
roxenp()->background_run (0, periodic_update_cache_size_balance); |
roxenp()->background_run(SESSION_SHIFT_TIME, session_cache_handler); |
|
CACHE_WERR("Cache garb call outs installed.\n"); |
} |
|
void create() |
{ |
add_constant( "cache", this_object() ); |
|
nongc_cache = ([ ]); |
|
session_buckets = ({ ([]) }) * SESSION_BUCKETS; |
session_persistence = ([]); |
|
CACHE_WERR("Now online.\n"); |
} |
|
void destroy() { |
session_cache_destruct(); |
return; |
} |
|
|