|
|
|
|
#include <module_constants.h> |
#include <module.h> |
#include <request_trace.h> |
|
constant __pragma_save_parent__ = 1; |
|
|
constant pike_cycle_depth = 0; |
|
inherit "basic_defvar"; |
mapping(string:array(int)) error_log=([]); |
|
constant is_module = 1; |
|
|
|
constant module_unique = 1; |
|
|
|
|
|
|
|
|
constant webdav_opaque = 0; |
|
private Configuration _my_configuration; |
private string _module_local_identifier; |
private string _module_identifier = |
lambda() { |
mixed init_info = roxen->bootstrap_info->get(); |
if (arrayp (init_info)) { |
[_my_configuration, _module_local_identifier] = init_info; |
return _my_configuration->name + "/" + _module_local_identifier; |
} |
#ifdef DEBUG |
else |
error ("Got invalid bootstrap info for module: %O\n", init_info); |
#endif |
}(); |
protected mapping _api_functions = ([]); |
|
class ModuleJSONLogger { |
inherit Logger.BaseJSONLogger; |
|
void create(object parent_config) { |
string name = combine_path_unix(parent_config->json_logger->logger_name, |
module_local_id()); |
::create(name, UNDEFINED, parent_config->json_logger); |
} |
} |
|
|
private ModuleJSONLogger json_logger; |
|
string|array(string) module_creator; |
string module_url; |
RXML.TagSet module_tag_set; |
|
|
|
|
|
|
|
|
void report_fatal(sprintf_format fmt, sprintf_args ... args) |
{ predef::report_fatal(fmt, @args); } |
void report_error(sprintf_format fmt, sprintf_args ... args) |
{ predef::report_error(fmt, @args); } |
void report_warning(sprintf_format fmt, sprintf_args ... args) |
{ predef::report_warning(fmt, @args); } |
void report_notice(sprintf_format fmt, sprintf_args ... args) |
{ predef::report_notice(fmt, @args); } |
void report_debug(sprintf_format fmt, sprintf_args ... args) |
{ predef::report_debug(fmt, @args); } |
void report_warning_sparsely (sprintf_format fmt, sprintf_args ... args) |
{predef::report_warning_sparsely (fmt, @args);} |
void report_error_sparsely (sprintf_format fmt, sprintf_args ... args) |
{predef::report_error_sparsely (fmt, @args);} |
|
void log_event (string facility, string action, string resource, |
void|mapping(string:mixed) info) |
|
|
|
|
{ |
_my_configuration->log_event (facility || _module_local_identifier, |
action, resource, info); |
} |
|
void json_log_trace(string|mapping log_msg) { json_log_with_level(log_msg, Logger.BaseJSONLogger.TRACE); } |
void json_log_debug(string|mapping log_msg) { json_log_with_level(log_msg, Logger.BaseJSONLogger.DBG); } |
void json_log_info (string|mapping log_msg) { json_log_with_level(log_msg, Logger.BaseJSONLogger.INFO); } |
void json_log_warn (string|mapping log_msg) { json_log_with_level(log_msg, Logger.BaseJSONLogger.WARN); } |
void json_log_error(string|mapping log_msg) { json_log_with_level(log_msg, Logger.BaseJSONLogger.ERROR); } |
void json_log_fatal(string|mapping log_msg) { json_log_with_level(log_msg, Logger.BaseJSONLogger.FATAL); } |
|
|
void json_log_with_level(string|mapping log_msg, int level) { |
if (stringp(log_msg)) { |
log_msg = ([ |
"msg" : log_msg, |
]); |
} |
log_msg->level = level; |
json_log(log_msg); |
} |
|
|
void json_log(string|mapping log_msg) { |
if (stringp(log_msg)) { |
log_msg = ([ |
"msg" : log_msg, |
]); |
} |
|
if (json_logger && functionp(json_logger->log)) { |
json_logger->log(log_msg); |
} |
} |
|
string module_identifier() |
|
|
|
{ |
return _module_identifier; |
} |
|
string module_local_id() |
|
|
|
|
{ |
return _module_local_identifier; |
} |
|
RoxenModule this_module() |
{ |
return this_object(); |
} |
|
|
DECLARE_OBJ_COUNT; |
|
|
string _sprintf() |
{ |
return sprintf ("RoxenModule(%s)" + OBJ_COUNT, _module_identifier || "?"); |
} |
|
array register_module() |
{ |
return ({ |
this_object()->module_type, |
this_object()->module_name, |
this_object()->module_doc, |
0, |
module_unique, |
this_object()->module_locked, |
this_object()->module_counter, |
}); |
} |
|
string fix_cvs(string from) |
{ |
from = replace(from, ({ "$", "Id: "," Exp $" }), ({"","",""})); |
sscanf(from, "%*s,v %s", from); |
return replace(from,"/","-"); |
} |
|
int module_dependencies(Configuration configuration, |
array (string) modules, |
int|void now) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
modules = map (modules, |
lambda (string modname) { |
sscanf ((modname / "/")[-1], "%[^#]", modname); |
return modname; |
}); |
Configuration conf = configuration || my_configuration(); |
if (!conf) |
report_warning ("Configuration not resolved; module(s) %s that %O " |
"depend on weren't added.\n", String.implode_nicely (modules), |
module_identifier() || |
master()->describe_program(this_program) || |
"unknown module"); |
else |
conf->add_modules( modules, now ); |
return 1; |
} |
|
string file_name_and_stuff() |
{ |
return ("<b>Loaded from:</b> "+(roxen->filename(this_object()))+"<br>"+ |
(this_object()->cvs_version? |
"<b>CVS Version:</b> "+ |
fix_cvs(this_object()->cvs_version)+"\n":"")); |
} |
|
|
Configuration my_configuration() |
|
|
{ |
return _my_configuration; |
} |
|
final void set_configuration(Configuration c) |
{ |
if(_my_configuration && _my_configuration != c) |
error("set_configuration() called twice.\n"); |
_my_configuration = c; |
|
|
json_logger = ModuleJSONLogger(_my_configuration); |
} |
|
void set_module_creator(string|array(string) c) |
|
|
|
|
|
{ |
module_creator = c; |
} |
|
void set_module_url(string to) |
|
|
|
|
{ |
module_url = to; |
} |
|
void free_some_sockets_please(){} |
|
|
|
|
|
|
|
void start(int variable_save, Configuration conf, void|int newly_added) {} |
void stop() {} |
void ready_to_receive_requests (Configuration conf) {} |
|
string status() {} |
|
string info(Configuration conf) |
{ |
return (this_object()->register_module()[2]); |
} |
|
string sname( ) |
{ |
return my_configuration()->otomod[ this_object() ]; |
} |
|
ModuleInfo my_moduleinfo( ) |
|
{ |
string f = sname(); |
if( f ) return roxen.find_module( (f/"#")[0] ); |
} |
|
void save_me() |
{ |
my_configuration()->save_one( this_object() ); |
my_configuration()->module_changed( my_moduleinfo(), this_object() ); |
} |
|
void save() { save_me(); } |
string comment() { return ""; } |
|
string query_internal_location() |
|
|
{ |
if(!_my_configuration) |
error("Please do not call this function from create()!\n"); |
return _my_configuration->query_internal_location(this_object()); |
} |
|
string query_absolute_internal_location(RequestID id) |
|
|
{ |
return (id->misc->site_prefix_path || "") + query_internal_location(); |
} |
|
string query_location() |
|
|
|
{ |
string s; |
catch{s = query("location");}; |
return s; |
} |
|
array(string) location_urls() |
|
{ |
string loc = query_location(); |
if (!loc) return ({}); |
if(!_my_configuration) |
error("Please do not call this function from create()!\n"); |
array(string) urls = copy_value(_my_configuration->query("URLs")); |
string hostname; |
if (string world_url = _my_configuration->query ("MyWorldLocation")) |
if (sizeof(world_url)) { |
Standards.URI uri = Standards.URI(world_url); |
hostname = uri->host; |
} |
if (!hostname) hostname = gethostname(); |
for (int i = 0; i < sizeof (urls); i++) |
{ |
urls[i] = (urls[i]/"#")[0]; |
if (sizeof (urls[i]/"*") == 2) |
urls[i] = replace(urls[i], "*", hostname); |
} |
return map (urls, `+, loc[1..]); |
} |
|
string location_url() |
|
|
|
{ |
string short_array(array a) |
{ |
return "({ " + (map(a, lambda(object o) { |
return sprintf("%O", o); |
})*", ") + " })"; |
}; |
string loc = query_location(); |
if(!loc) return 0; |
if(!_my_configuration) |
error("Please do not call this function from create()!\n"); |
string hostname; |
string world_url = _my_configuration->query("MyWorldLocation"); |
if (world_url && sizeof(world_url)) { |
Standards.URI uri = Standards.URI(world_url); |
hostname = uri->host; |
} |
if(!hostname) |
hostname = gethostname(); |
#ifdef LOCATION_URL_DEBUG |
werror(" Hostname: %O\n", hostname); |
#endif |
Standards.URI candidate_uri; |
array(string) urls = |
filter(_my_configuration->registered_urls, has_prefix, "http:") + |
filter(_my_configuration->registered_urls, has_prefix, "https:"); |
foreach(urls, string url) |
{ |
#ifdef LOCATION_URL_DEBUG |
werror(" URL: %s\n", url); |
#endif |
mapping url_info = roxen.urls[url]; |
if(!url_info || !url_info->port || url_info->conf != _my_configuration) |
continue; |
Protocol p = url_info->port; |
#ifdef LOCATION_URL_DEBUG |
werror(" Protocol: %s\n", p); |
#endif |
Standards.URI uri = Standards.URI(url); |
string ip = p->ip || "127.0.0.1"; |
if (ip == "::") |
ip = "::1"; |
uri->fragment = "ip=" + ip; |
if(has_value(uri->host, "*") || has_value(uri->host, "?")) |
if(glob(uri->host, hostname)) |
uri->host = hostname; |
else { |
if(!candidate_uri) { |
candidate_uri = uri; |
candidate_uri->host = hostname; |
} |
continue; |
} |
uri->path += loc[1..]; |
return (string)uri; |
} |
if(candidate_uri) { |
report_warning("Warning: Could not find any suitable ports, continuing anyway. " |
"Please make sure that your Primary Server URL matches " |
"at least one port. Primary Server URL: %O, URLs: %s.\n", |
world_url, short_array(urls)); |
candidate_uri->path += loc[1..]; |
return (string)candidate_uri; |
} |
return 0; |
} |
|
|
multiset(string) query_provides() { return 0; } |
|
|
function(RequestID:int|mapping) query_seclevels() |
{ |
if(catch(query("_seclevels")) || (query("_seclevels") == 0)) |
return 0; |
return roxen.compile_security_pattern(query("_seclevels"),this_object()); |
} |
|
void set_status_for_path (string path, RequestID id, int status_code, |
string|void message, mixed... args) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
if (sizeof (args)) message = sprintf (message, @args); |
id->set_status_for_path (query_location() + path, status_code, message); |
} |
|
Stat stat_file(string f, RequestID id){} |
array(string) find_dir(string f, RequestID id){} |
mapping(string:Stat) find_dir_stat(string f, RequestID id) |
{ |
SIMPLE_TRACE_ENTER(this, "find_dir_stat(): %O", f); |
|
array(string) files = find_dir(f, id); |
mapping(string:Stat) res = ([]); |
|
foreach(files || ({}), string fname) { |
SIMPLE_TRACE_ENTER(this, "stat()'ing %O", f + "/" + fname); |
Stat st = stat_file(replace(f + "/" + fname, "//", "/"), id); |
if (st) { |
res[fname] = st; |
TRACE_LEAVE("OK"); |
} else { |
TRACE_LEAVE("No stat info"); |
} |
} |
|
TRACE_LEAVE(""); |
return(res); |
} |
|
class DefaultPropertySet |
{ |
inherit PropertySet; |
|
protected Stat stat; |
|
protected void create (string path, string abs_path, RequestID id, Stat stat) |
{ |
::create (path, abs_path, id); |
this_program::stat = stat; |
} |
|
Stat get_stat() {return stat;} |
|
protected mapping(string:string) response_headers; |
|
mapping(string:string) get_response_headers() |
{ |
if (!response_headers) { |
|
if (!id->misc->common) |
id->misc->common = ([]); |
|
RequestID sub_id = id->clone_me(); |
sub_id->misc->common = id->misc->common; |
|
sub_id->raw_url = sub_id->not_query = query_location() + path; |
if ((sub_id->raw_url != id->raw_url) && (id->raw_url != id->not_query)) { |
|
sub_id->raw_url = sub_id->not_query + |
(({ "" }) + (id->raw_url/"?")[1..]) * "?"; |
} |
sub_id->method = "HEAD"; |
|
mapping(string:mixed)|int(-1..0)|object res = find_file (path, sub_id); |
if (res == -1) res = ([]); |
else if (objectp (res)) { |
string ext; |
if(stringp(sub_id->extension)) { |
sub_id->not_query += sub_id->extension; |
ext = lower_case(Roxen.extension(sub_id->not_query, sub_id)); |
} |
array(string) tmp=sub_id->conf->type_from_filename(sub_id->not_query, 1, ext); |
if(tmp) |
res = ([ "file":res, "type":tmp[0], "encoding":tmp[1] ]); |
else |
res = (["file": res]); |
} |
response_headers = sub_id->make_response_headers (res); |
destruct (sub_id); |
} |
|
return response_headers; |
} |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
PropertySet|mapping(string:mixed) query_property_set(string path, RequestID id) |
{ |
SIMPLE_TRACE_ENTER (this, "Querying properties on %O", path); |
Stat st = stat_file(path, id); |
|
if (!st) { |
SIMPLE_TRACE_LEAVE ("No such file or dir"); |
return 0; |
} |
|
PropertySet res = DefaultPropertySet(path, query_location()+path, id, st); |
SIMPLE_TRACE_LEAVE (""); |
return res; |
} |
|
|
|
|
|
|
|
|
|
|
string|array(Parser.XML.Tree.SimpleNode)|mapping(string:mixed) |
query_property(string path, string prop_name, RequestID id) |
{ |
mapping(string:mixed)|PropertySet properties = query_property_set(path, id); |
if (!properties) { |
return Roxen.http_status(Protocols.HTTP.HTTP_NOT_FOUND, |
"No such file or directory."); |
} |
if (mappingp (properties)) |
return properties; |
return properties->query_property(prop_name) || |
Roxen.http_status(Protocols.HTTP.HTTP_NOT_FOUND, "No such property."); |
} |
|
|
|
|
|
|
mapping(string:mixed) recurse_find_properties(string path, string mode, |
int depth, RequestID id, |
multiset(string)|void filt) |
{ |
string prefix = map(query_location()[1..]/"/", Roxen.http_encode_url)*"/"; |
MultiStatus.Prefixed result = |
id->get_multi_status()->prefix (id->url_base() + prefix); |
|
mapping(string:mixed) recurse (string path, int depth) { |
SIMPLE_TRACE_ENTER (this, "%s for %O, depth %d", |
mode == "DAV:propname" ? "Listing property names" : |
mode == "DAV:allprop" ? "Retrieving all properties" : |
mode == "DAV:prop" ? "Retrieving specific properties" : |
"Finding properties with mode " + mode, |
path, depth); |
mapping(string:mixed)|PropertySet properties = query_property_set(path, id); |
|
if (!properties) { |
SIMPLE_TRACE_LEAVE ("No such file or dir"); |
return 0; |
} |
|
{ |
mapping(string:mixed) ret = mappingp (properties) ? |
properties : properties->find_properties(mode, result, filt); |
|
if (ret) { |
SIMPLE_TRACE_LEAVE ("Got status %d: %O", ret->error, ret->rettext); |
return ret; |
} |
} |
|
if (properties->get_stat()->isdir) { |
if (depth <= 0) { |
SIMPLE_TRACE_LEAVE ("Not recursing due to depth limit"); |
return ([]); |
} |
depth--; |
foreach(find_dir(path, id) || ({}), string filename) { |
filename = combine_path_unix(path, filename); |
if (mapping(string:mixed) sub_res = recurse(filename, depth)) |
if (sizeof (sub_res)) |
result->add_status (filename, sub_res->error, sub_res->rettext); |
} |
} |
|
SIMPLE_TRACE_LEAVE (""); |
return ([]); |
}; |
|
return recurse (path, depth); |
} |
|
mapping(string:mixed) patch_properties(string path, |
array(PatchPropertyCommand) instructions, |
RequestID id) |
{ |
SIMPLE_TRACE_ENTER (this, "Patching properties for %O", path); |
mapping(string:mixed)|PropertySet properties = query_property_set(path, id); |
|
if (!properties) { |
SIMPLE_TRACE_LEAVE ("No such file or dir"); |
return 0; |
} |
if (mappingp (properties)) { |
SIMPLE_TRACE_LEAVE ("Got error %d from query_property_set: %O", |
properties->error, properties->rettext); |
return properties; |
} |
|
mapping(string:mixed) errcode; |
|
if (errcode = write_access(path, 0, id)) { |
SIMPLE_TRACE_LEAVE("Patching denied by write_access()."); |
return errcode; |
} |
|
if (errcode = properties->start()) { |
SIMPLE_TRACE_LEAVE ("Got error %d from PropertySet.start: %O", |
errcode->error, errcode->rettext); |
return errcode; |
} |
|
array(mapping(string:mixed)) results; |
|
mixed err = catch { |
results = instructions->execute(properties); |
}; |
if (err) { |
properties->unroll(); |
throw (err); |
} else { |
string prefix = map((query_location()[1..] + path)/"/", |
Roxen.http_encode_url)*"/"; |
MultiStatus.Prefixed result = |
id->get_multi_status()->prefix (id->url_base() + prefix); |
int any_failed; |
foreach(results, mapping(string:mixed) answer) { |
if (any_failed = (answer && (answer->error >= 300))) { |
break; |
} |
} |
if (any_failed) { |
|
int i; |
mapping(string:mixed) answer = |
Roxen.http_status (Protocols.HTTP.DAV_FAILED_DEP); |
for(i = 0; i < sizeof(results); i++) { |
if (!results[i] || results[i]->error < 300) { |
result->add_property("", instructions[i]->property_name, |
answer); |
} else { |
result->add_property("", instructions[i]->property_name, |
results[i]); |
} |
} |
properties->unroll(); |
} else { |
int i; |
for(i = 0; i < sizeof(results); i++) { |
result->add_property("", instructions[i]->property_name, |
results[i]); |
} |
properties->commit(); |
} |
} |
|
SIMPLE_TRACE_LEAVE (""); |
return 0; |
} |
|
|
|
|
|
|
mapping(string:mixed) set_property (string path, string prop_name, |
string|array(Parser.XML.Tree.SimpleNode) value, |
RequestID id) |
{ |
mapping(string:mixed)|PropertySet properties = query_property_set(path, id); |
if (!properties) return Roxen.http_status(Protocols.HTTP.HTTP_NOT_FOUND, |
"File not found."); |
if (mappingp (properties)) return properties; |
|
mapping(string:mixed) result = properties->start(); |
if (result) return result; |
|
result = properties->set_property(prop_name, value); |
if (result && result->error >= 300) { |
properties->unroll(); |
return result; |
} |
|
properties->commit(); |
return 0; |
} |
|
|
|
|
|
|
mapping(string:mixed) remove_property (string path, string prop_name, |
RequestID id) |
{ |
mapping(string:mixed)|PropertySet properties = query_property_set(path, id); |
if (!properties) return Roxen.http_status(Protocols.HTTP.HTTP_NOT_FOUND, |
"File not found."); |
if (mappingp (properties)) return properties; |
|
mapping(string:mixed) result = properties->start(); |
if (result) return result; |
|
result = properties->remove_property(prop_name); |
if (result && result->error >= 300) { |
properties->unroll(); |
return result; |
} |
|
properties->commit(); |
return 0; |
} |
|
string resource_id (string path, RequestID|int(0..0) id) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
return has_suffix (path, "/") ? path : path + "/"; |
} |
|
string|int authenticated_user_id (string path, RequestID id) |
|
|
|
|
|
|
|
|
|
|
|
{ |
|
User uid = my_configuration()->authenticate (id); |
return uid && uid->name(); |
} |
|
|
|
|
|
protected mapping(string:mapping(mixed:DAVLock)) file_locks = ([]); |
|
|
|
|
|
|
protected mapping(string:mapping(mixed:DAVLock)) prefix_locks = ([]); |
|
#define LOOP_OVER_BOTH(PATH, LOCKS, CODE) \ |
do { \ |
foreach (file_locks; PATH; LOCKS) {CODE;} \ |
foreach (prefix_locks; PATH; LOCKS) {CODE;} \ |
} while (0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
multiset(DAVLock) find_locks(string path, |
int(-1..1) recursive, |
int(0..1) exclude_shared, |
RequestID id) |
{ |
|
if (!sizeof(file_locks) && !sizeof(prefix_locks)) return 0; |
|
TRACE_ENTER(sprintf("find_locks(%O, %O, %O, X)", |
path, recursive, exclude_shared), this); |
|
string rsc = resource_id (path, id); |
|
multiset(DAVLock) locks = (<>); |
function(mapping(mixed:DAVLock):void) add_locks; |
|
if (exclude_shared) { |
mixed auth_user = authenticated_user_id (path, id); |
add_locks = lambda (mapping(mixed:DAVLock) sub_locks) { |
foreach (sub_locks; string user; DAVLock lock) |
if (user == auth_user || |
lock->lockscope == "DAV:exclusive") |
locks[lock] = 1; |
}; |
} |
else |
add_locks = lambda (mapping(mixed:DAVLock) sub_locks) { |
locks |= mkmultiset (values (sub_locks)); |
}; |
|
if (file_locks[rsc]) { |
add_locks (file_locks[rsc]); |
} |
|
if (recursive >= 0) { |
foreach(prefix_locks; |
string prefix; mapping(mixed:DAVLock) sub_locks) { |
if (has_prefix(rsc, prefix)) { |
add_locks (sub_locks); |
break; |
} |
} |
} |
|
if (recursive) { |
LOOP_OVER_BOTH (string prefix, mapping(mixed:DAVLock) sub_locks, { |
if (has_prefix(prefix, rsc)) { |
add_locks (sub_locks); |
} |
}); |
} |
|
add_locks = 0; |
|
TRACE_LEAVE(sprintf("Done, found %d locks.", sizeof(locks))); |
|
return sizeof(locks) && locks; |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DAVLock|LockFlag check_locks(string path, |
int(0..1) recursive, |
RequestID id) |
{ |
TRACE_ENTER(sprintf("check_locks(%O, %d, X)", path, recursive), this); |
|
|
if (!sizeof(file_locks) && !sizeof(prefix_locks)) { |
TRACE_LEAVE ("Got no locks"); |
return 0; |
} |
|
mixed auth_user = authenticated_user_id (path, id); |
path = resource_id (path, id); |
|
if (DAVLock lock = |
file_locks[path] && file_locks[path][auth_user] || |
prefix_locks[path] && prefix_locks[path][auth_user]) { |
TRACE_LEAVE(sprintf("Found own lock %O.", lock->locktoken)); |
return lock; |
} |
|
LockFlag shared; |
|
if (mapping(mixed:DAVLock) locks = file_locks[path]) { |
foreach(locks;; DAVLock lock) { |
if (lock->lockscope == "DAV:exclusive") { |
TRACE_LEAVE(sprintf("Found other user's exclusive lock %O.", |
lock->locktoken)); |
return LOCK_EXCL_AT; |
} |
shared = LOCK_SHARED_AT; |
break; |
} |
} |
|
foreach(prefix_locks; |
string prefix; mapping(mixed:DAVLock) locks) { |
if (has_prefix(path, prefix)) { |
if (DAVLock lock = locks[auth_user]) { |
SIMPLE_TRACE_LEAVE ("Found own lock %O on %O.", lock->locktoken, prefix); |
return lock; |
} |
if (!shared) |
|
|
foreach(locks;; DAVLock lock) { |
if (lock->lockscope == "DAV:exclusive") { |
TRACE_LEAVE(sprintf("Found other user's exclusive lock %O.", |
lock->locktoken)); |
return LOCK_EXCL_AT; |
} |
shared = LOCK_SHARED_AT; |
break; |
} |
} |
} |
|
if (!recursive) { |
SIMPLE_TRACE_LEAVE("Returning %O.", shared); |
return shared; |
} |
|
int(0..1) locked_by_auth_user; |
|
|
|
LOOP_OVER_BOTH (string prefix, mapping(mixed:DAVLock) locks, { |
if (has_prefix(prefix, path)) { |
if (locks[auth_user]) |
locked_by_auth_user = 1; |
else |
foreach(locks;; DAVLock lock) { |
if (lock->lockscope == "DAV:exclusive") { |
TRACE_LEAVE(sprintf("Found other user's exclusive lock %O.", |
lock->locktoken)); |
return LOCK_EXCL_BELOW; |
} |
if (!shared) shared = LOCK_SHARED_BELOW; |
break; |
} |
} |
}); |
|
SIMPLE_TRACE_LEAVE("Returning %O.", locked_by_auth_user ? LOCK_OWN_BELOW : shared); |
return locked_by_auth_user ? LOCK_OWN_BELOW : shared; |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
protected void register_lock(string path, DAVLock lock, RequestID id) |
{ |
TRACE_ENTER(sprintf("register_lock(%O, lock(%O), X).", path, lock->locktoken), |
this); |
ASSERT_IF_DEBUG (lock->locktype == "DAV:write"); |
mixed auth_user = authenticated_user_id (path, id); |
path = resource_id (path, id); |
if (lock->recursive) { |
if (prefix_locks[path]) { |
prefix_locks[path][auth_user] = lock; |
} else { |
prefix_locks[path] = ([ auth_user:lock ]); |
} |
} else { |
if (file_locks[path]) { |
file_locks[path][auth_user] = lock; |
} else { |
file_locks[path] = ([ auth_user:lock ]); |
} |
} |
TRACE_LEAVE("Ok."); |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
protected void unregister_lock (string path, DAVLock lock, |
RequestID|int(0..0) id) |
{ |
TRACE_ENTER(sprintf("unregister_lock(%O, lock(%O), X).", path, lock->locktoken), |
this); |
mixed auth_user = id && authenticated_user_id (path, id); |
path = resource_id (path, id); |
DAVLock removed_lock; |
if (lock->recursive) { |
if (id) { |
removed_lock = m_delete(prefix_locks[path], auth_user); |
} else { |
foreach(prefix_locks[path]; mixed user; DAVLock l) { |
if (l == lock) { |
removed_lock = m_delete(prefix_locks[path], user); |
} |
} |
} |
if (!sizeof (prefix_locks[path])) m_delete (prefix_locks, path); |
} |
else if (file_locks[path]) { |
if (id) { |
removed_lock = m_delete (file_locks[path], auth_user); |
} else { |
foreach(file_locks[path]; mixed user; DAVLock l) { |
if (l == lock) { |
removed_lock = m_delete(file_locks[path], user); |
} |
} |
} |
if (!sizeof (file_locks[path])) m_delete (file_locks, path); |
} |
ASSERT_IF_DEBUG (lock == removed_lock , lock, removed_lock); |
TRACE_LEAVE("Ok."); |
return 0; |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mapping(string:mixed) lock_file(string path, |
DAVLock lock, |
RequestID id) |
{ |
return 0; |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mapping(string:mixed) unlock_file (string path, |
DAVLock lock, |
RequestID|int(0..0) id); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mapping(string:mixed)|int(0..1) check_if_header(string relative_path, |
int(0..1) recursive, |
RequestID id) |
{ |
SIMPLE_TRACE_ENTER(this, "Checking \"If\" header for %O", |
relative_path); |
|
int|DAVLock lock = check_locks(relative_path, recursive, id); |
|
int(0..1) got_sublocks; |
if (lock && intp(lock)) { |
if (lock & 1) { |
TRACE_LEAVE("Locked by other user."); |
return Roxen.http_status(Protocols.HTTP.DAV_LOCKED); |
} |
else if (recursive) |
|
|
|
got_sublocks = 1; |
} |
|
string path = relative_path; |
if (!has_suffix (path, "/")) path += "/"; |
path = query_location() + path; |
|
mapping(string:array(array(array(string)))) if_data = id->get_if_data(); |
array(array(array(string))) condition; |
if (!if_data || !sizeof(condition = if_data[path] || if_data[0])) { |
if (lock) { |
TRACE_LEAVE("Locked, no if header."); |
return Roxen.http_status(Protocols.HTTP.DAV_LOCKED); |
} |
SIMPLE_TRACE_LEAVE("No lock and no if header - ok%s.", |
got_sublocks ? " (this level only)" : ""); |
return got_sublocks; |
} |
|
string|int(-1..0) etag; |
|
int(0..1) locked_fail = !!lock; |
next_condition: |
foreach(condition, array(array(string)) sub_cond) { |
SIMPLE_TRACE_ENTER(this, |
"Trying condition ( %{%s:%O %})...", sub_cond); |
int negate; |
DAVLock locked = lock; |
foreach(sub_cond, array(string) token) { |
switch(token[0]) { |
case "not": |
negate = !negate; |
break; |
case "etag": |
if (!etag) { |
|
|
if (!stringp(etag = query_property(relative_path, |
"DAV:getetag", id))) { |
etag = -1; |
} |
} |
if (etag != token[1]) { |
|
if (!negate) { |
TRACE_LEAVE("Etag mismatch."); |
continue next_condition; |
} |
} else if (negate) { |
|
TRACE_LEAVE("Matched negated etag."); |
continue next_condition; |
} |
negate = 0; |
break; |
case "key": |
|
locked_fail = 0; |
if (negate) { |
if (lock && lock->locktoken == token[1]) { |
TRACE_LEAVE("Matched negated lock."); |
continue next_condition; |
} |
} else if (!lock || lock->locktoken != token[1]) { |
|
TRACE_LEAVE("Lock mismatch."); |
continue next_condition; |
} else { |
locked = 0; |
} |
negate = 0; |
break; |
} |
} |
if (!locked) { |
TRACE_LEAVE("Found match."); |
SIMPLE_TRACE_LEAVE("Ok%s.", |
got_sublocks ? " (this level only)" : ""); |
return got_sublocks; |
} |
SIMPLE_TRACE_LEAVE("Conditional ok, but still locked (locktoken: %O).", |
lock->locktoken); |
locked_fail = 1; |
} |
|
if (locked_fail) { |
TRACE_LEAVE("Failed (locked)."); |
} else { |
TRACE_LEAVE("Precondition failed."); |
} |
return Roxen.http_status(locked_fail ? |
Protocols.HTTP.DAV_LOCKED : |
Protocols.HTTP.HTTP_PRECOND_FAILED); |
} |
|
|
|
|
|
|
|
|
|
|
|
|
protected mapping(string:mixed)|int(0..1) write_access(string relative_path, |
int(0..1) recursive, |
RequestID id) |
{ |
return check_if_header (relative_path, recursive, id); |
} |
|
mapping(string:mixed)|int(-1..0)|Stdio.File find_file(string path, |
RequestID id); |
|
|
|
|
|
|
|
|
|
|
|
protected mapping(string:mixed) delete_file(string path, RequestID id) |
{ |
|
RequestID tmp_id = id->clone_me(); |
tmp_id->not_query = query_location() + path; |
tmp_id->method = "DELETE"; |
|
return find_file(path, tmp_id) || |
tmp_id->misc->error_code && Roxen.http_status (tmp_id->misc->error_code); |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
mapping(string:mixed) recurse_delete_files(string path, |
RequestID id, |
void|MultiStatus.Prefixed stat) |
{ |
SIMPLE_TRACE_ENTER (this, "Deleting %O recursively", path); |
if (!stat) |
stat = id->get_multi_status()->prefix (id->url_base() + |
query_location()[1..]); |
|
Stat st = stat_file(path, id); |
if (!st) { |
SIMPLE_TRACE_LEAVE ("No such file or directory"); |
return 0; |
} |
|
mapping(string:mixed) recurse (string path, Stat st) |
{ |
|
|
if (st->isdir) { |
|
|
|
int fail; |
if (!has_suffix(path, "/")) path += "/"; |
foreach(find_dir(path, id) || ({}), string fname) { |
fname = path + fname; |
if (Stat sub_stat = stat_file (fname, id)) { |
SIMPLE_TRACE_ENTER (this, "Deleting %O", fname); |
if (mapping(string:mixed) sub_res = recurse(fname, sub_stat)) { |
|
|
|
|
if (sizeof (sub_res) && sub_res->error != 204) { |
stat->add_status(fname, sub_res->error, sub_res->rettext); |
} |
if (!sizeof (sub_res) || sub_res->error >= 300) fail = 1; |
} |
} |
} |
if (fail) { |
SIMPLE_TRACE_LEAVE ("Partial failure"); |
return ([]); |
} |
} |
|
SIMPLE_TRACE_LEAVE (""); |
return delete_file (path, id); |
}; |
|
return recurse(path, st) || Roxen.http_status(204); |
} |
|
|
|
|
|
|
|
mapping(string:mixed) make_collection(string path, RequestID id) |
{ |
|
RequestID tmp_id = id->clone_me(); |
tmp_id->not_query = query_location() + path; |
tmp_id->method = "MKCOL"; |
|
return find_file(path, tmp_id); |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
protected mapping(string:mixed) copy_properties( |
string source, string destination, PropertyBehavior behavior, RequestID id) |
{ |
SIMPLE_TRACE_ENTER(this, "copy_properties(%O, %O, %O, %O)", |
source, destination, behavior, id); |
PropertySet source_properties = query_property_set(source, id); |
PropertySet destination_properties = query_property_set(destination, id); |
|
multiset(string) property_set = source_properties->query_all_properties(); |
mapping(string:mixed) res; |
foreach(property_set; string property_name;) { |
SIMPLE_TRACE_ENTER(this, "Copying the property %O.", property_name); |
string|array(Parser.XML.Tree.SimpleNode)|mapping(string:mixed) source_val = |
source_properties->query_property(property_name); |
if (mappingp(source_val)) { |
TRACE_LEAVE("Reading of property failed. Skipped."); |
continue; |
} |
string|array(Parser.XML.Tree.SimpleNode)|mapping(string:mixed) dest_val = |
destination_properties->query_property(property_name); |
if (dest_val == source_val) { |
TRACE_LEAVE("Destination already has the correct value."); |
continue; |
} |
mapping(string:mixed) subres = |
destination_properties->set_property(property_name, source_val); |
if (!behavior) { |
TRACE_LEAVE("Omit verify."); |
continue; |
} |
if ((intp(behavior) || behavior[property_name]) && (subres->error < 300)) { |
|
|
|
} |
if ((subres->error < 300) || |
(subres->error == Protocols.HTTP.HTTP_CONFLICT)) { |
|
TRACE_LEAVE("Copy ok or read-only property."); |
continue; |
} |
if (!subres) { |
|
res = Roxen.http_status(Protocols.HTTP.HTTP_PRECOND_FAILED); |
} |
TRACE_LEAVE("Copy failed."); |
} |
TRACE_LEAVE(res?"Failed.":"Ok."); |
return res; |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
protected mapping(string:mixed) copy_collection( |
string source, string destination, PropertyBehavior behavior, |
Overwrite overwrite, MultiStatus.Prefixed result, RequestID id) |
{ |
SIMPLE_TRACE_ENTER(this, "copy_collection(%O, %O, %O, %O, %O, %O).", |
source, destination, behavior, overwrite, result, id); |
Stat st = stat_file(destination, id); |
if (st) { |
|
switch(overwrite) { |
case DO_OVERWRITE: |
|
|
|
|
|
TRACE_ENTER("Destination exists and overwrite is on.", this); |
mapping(string:mixed) res = recurse_delete_files(destination, id, result); |
if (res && (!sizeof (res) || res->error >= 300)) { |
|
TRACE_LEAVE("Deletion failed."); |
TRACE_LEAVE("Copy collection failed."); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if 0 |
return Roxen.http_status(Protocols.HTTP.HTTP_PRECOND_FAILED); |
#else |
if (sizeof (res)) { |
|
|
|
|
|
|
|
|
result->add_status (destination, res->error, res->rettext); |
} |
return ([]); |
#endif |
} |
TRACE_LEAVE("Deletion ok."); |
break; |
case NEVER_OVERWRITE: |
TRACE_LEAVE("Destination already exists."); |
return Roxen.http_status(Protocols.HTTP.HTTP_PRECOND_FAILED); |
case MAYBE_OVERWRITE: |
|
|
if (st->isdir) { |
TRACE_LEAVE("Destination exists and is a directory."); |
return copy_properties(source, destination, behavior, id); |
} |
TRACE_LEAVE("Destination exists and is not a directory."); |
return Roxen.http_status(Protocols.HTTP.HTTP_PRECOND_FAILED); |
} |
} |
|
TRACE_LEAVE("Make a new collection."); |
mapping(string:mixed) res = make_collection(destination, id); |
if (res && res->error >= 300) return res; |
return copy_properties(source, destination, behavior, id) || res; |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
protected mapping(string:mixed) copy_file(string source, string destination, |
PropertyBehavior behavior, |
Overwrite overwrite, RequestID id) |
{ |
SIMPLE_TRACE_ENTER(this, "copy_file(%O, %O, %O, %O, %O)\n", |
source, destination, behavior, overwrite, id); |
TRACE_LEAVE("Not implemented."); |
return Roxen.http_status (Protocols.HTTP.HTTP_NOT_IMPL); |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mapping(string:mixed) recurse_copy_files(string source, string destination, |
PropertyBehavior behavior, |
Overwrite overwrite, RequestID id) |
{ |
SIMPLE_TRACE_ENTER(this, "Recursive copy from %O to %O (%s)", |
source, destination, |
overwrite == DO_OVERWRITE ? "replace" : |
overwrite == NEVER_OVERWRITE ? "no overwrite" : |
"overlay"); |
string src_tmp = has_suffix(source, "/")?source:(source+"/"); |
string dst_tmp = has_suffix(destination, "/")?destination:(destination+"/"); |
if ((src_tmp == dst_tmp) || |
has_prefix(src_tmp, dst_tmp) || |
has_prefix(dst_tmp, src_tmp)) { |
TRACE_LEAVE("Source and destination overlap."); |
return Roxen.http_status(403, "Source and destination overlap."); |
} |
|
string prefix = map(query_location()[1..]/"/", Roxen.http_encode_url)*"/"; |
MultiStatus.Prefixed result = |
id->get_multi_status()->prefix (id->url_base() + prefix); |
|
mapping(string:mixed) recurse(string source, string destination) { |
|
|
Stat st = stat_file(source, id); |
if (!st) { |
TRACE_LEAVE("Source not found."); |
return 0; |
} |
|
if (st->isdir) { |
mapping(string:mixed) res = |
copy_collection(source, destination, behavior, overwrite, result, id); |
if (res && (!sizeof (res) || res->error >= 300)) { |
|
TRACE_LEAVE("Copy of collection failed."); |
return res; |
} |
foreach(find_dir(source, id), string filename) { |
string subsrc = combine_path_unix(source, filename); |
string subdst = combine_path_unix(destination, filename); |
SIMPLE_TRACE_ENTER(this, "Copy from %O to %O\n", subsrc, subdst); |
mapping(string:mixed) sub_res = recurse(subsrc, subdst); |
if (sub_res && !(<0, 201, 204>)[sub_res->error]) { |
result->add_status(subdst, sub_res->error, sub_res->rettext); |
} |
} |
TRACE_LEAVE(""); |
return res; |
} else { |
TRACE_LEAVE(""); |
return copy_file(source, destination, behavior, overwrite, id); |
} |
}; |
|
int start_ms_size = id->multi_status_size(); |
mapping(string:mixed) res = recurse (source, destination); |
if (res && res->error != 204 && res->error != 201) |
return res; |
else if (id->multi_status_size() != start_ms_size) |
return ([]); |
else |
return res; |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
protected mapping(string:mixed) move_file(string source, string destination, |
PropertyBehavior behavior, |
Overwrite overwrite, RequestID id) |
{ |
|
RequestID tmp_id = id->clone_me(); |
tmp_id->not_query = query_location() + source; |
tmp_id->misc["new-uri"] = query_location() + destination; |
tmp_id->request_headers->destination = |
id->url_base() + query_location()[1..] + destination; |
tmp_id->method = "MOVE"; |
mapping(string:mixed) res = find_file(source, tmp_id); |
if (!res || res->error != 501) return res; |
|
res = copy_file(source, destination, behavior, overwrite, id); |
if (res && res->error >= 300) { |
|
return res; |
} |
return delete_file(source, id); |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
protected mapping(string:mixed) move_collection( |
string source, string destination, PropertyBehavior behavior, |
Overwrite overwrite, RequestID id) |
{ |
|
RequestID tmp_id = id->clone_me(); |
tmp_id->not_query = query_location() + source; |
tmp_id->misc["new-uri"] = query_location() + destination; |
tmp_id->request_headers->destination = |
id->url_base() + query_location()[1..] + destination; |
tmp_id->method = "MOVE"; |
mapping(string:mixed) res = find_file(source, tmp_id); |
if (!res || res->error != 501) return res; |
|
string prefix = map(query_location()[1..]/"/", Roxen.http_encode_url)*"/"; |
MultiStatus.Prefixed result = |
id->get_multi_status()->prefix (id->url_base() + prefix); |
res = copy_collection(source, destination, behavior, overwrite, result, id); |
if (res && (res->error >= 300 || !sizeof(res))) { |
|
return res; |
} |
int fail; |
foreach(find_dir(source, id), string filename) { |
string subsrc = combine_path_unix(source, filename); |
string subdst = combine_path_unix(destination, filename); |
SIMPLE_TRACE_ENTER(this, "Recursive move from %O to %O\n", |
subsrc, subdst); |
if (mapping(string:mixed) sub_res = |
recurse_move_files(subsrc, subdst, behavior, overwrite, id)) { |
if (!(<0, 201, 204>)[sub_res->error]) { |
result->add_status(subdst, sub_res->error, sub_res->rettext); |
} |
if (!sizeof (sub_res) || sub_res->error >= 300) { |
|
fail = 1; |
} |
} |
} |
if (fail) return ([]); |
return delete_file(source, id); |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mapping(string:mixed) recurse_move_files(string source, string destination, |
PropertyBehavior behavior, |
Overwrite overwrite, RequestID id) |
{ |
Stat st = stat_file(source, id); |
if (!st) return 0; |
|
if (st->isdir) { |
return move_collection(source, destination, behavior, overwrite, id); |
} |
return move_file(source, destination, behavior, overwrite, id); |
} |
|
string real_file(string f, RequestID id){} |
|
void add_api_function( string name, function f, void|array(string) types) |
{ |
_api_functions[name] = ({ f, types }); |
} |
|
mapping api_functions() |
{ |
return _api_functions; |
} |
|
#if ROXEN_COMPAT <= 1.4 |
mapping(string:function) query_tag_callers() |
|
{ |
mapping(string:function) m = ([]); |
foreach(glob("tag_*", indices( this_object())), string q) |
if(functionp( this_object()[q] )) |
m[replace(q[4..], "_", "-")] = this_object()[q]; |
return m; |
} |
|
mapping(string:function) query_container_callers() |
|
{ |
mapping(string:function) m = ([]); |
foreach(glob("container_*", indices( this_object())), string q) |
if(functionp( this_object()[q] )) |
m[replace(q[10..], "_", "-")] = this_object()[q]; |
return m; |
} |
#endif |
|
mapping(string:array(int|function)) query_simpletag_callers() |
{ |
mapping(string:array(int|function)) m = ([]); |
foreach(glob("simpletag_*", indices(this_object())), string q) |
if(functionp(this_object()[q])) |
m[replace(q[10..],"_","-")] = |
({ intp (this_object()[q + "_flags"]) && this_object()[q + "_flags"], |
this_object()[q] }); |
return m; |
} |
|
mapping(string:array(int|function)) query_simple_pi_tag_callers() |
{ |
mapping(string:array(int|function)) m = ([]); |
foreach (glob ("simple_pi_tag_*", indices (this_object())), string q) |
if (functionp (this_object()[q])) |
m[replace (q[sizeof ("simple_pi_tag_")..], "_", "-")] = |
({(intp (this_object()[q + "_flags"]) && this_object()[q + "_flags"]) | |
RXML.FLAG_PROC_INSTR, this_object()[q]}); |
return m; |
} |
|
RXML.TagSet query_tag_set() |
{ |
if (!module_tag_set) { |
array(function|program|object) tags = |
filter (rows (this_object(), |
glob ("Tag*", indices (this_object()))), |
lambda(mixed x) { return functionp(x)||programp(x); }); |
for (int i = 0; i < sizeof (tags); i++) |
if (programp (tags[i])) |
if (!tags[i]->is_RXML_Tag) tags[i] = 0; |
else tags[i] = tags[i](); |
else { |
tags[i] = tags[i](); |
|
if (!tags[i]->is_RXML_Tag) tags[i] = 0; |
} |
tags -= ({0}); |
module_tag_set = |
(this_object()->ModuleTagSet || RXML.TagSet) (this_object(), "", tags); |
} |
return module_tag_set; |
} |
|
mixed get_value_from_file(string path, string index, void|string pre) |
{ |
Stdio.File file=Stdio.File(); |
if(!file->open(path,"r")) return 0; |
if(has_suffix(index, "()")) |
index = index[..sizeof(index) - 3]; |
|
|
|
return compile_string((pre || "") + file->read(), path)[index]; |
} |
|
#if constant(roxen.FSGarbWrapper) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
roxen.FSGarbWrapper register_fsgarb(string path, int max_age, |
int|void max_size, int|void max_files, |
string|void quarantine) |
{ |
return roxen.register_fsgarb(module_identifier(), path, max_age, |
max_size, max_files, quarantine); |
} |
#endif |
|
private mapping __my_tables = ([]); |
|
array(mapping(string:mixed)) sql_query( string query, mixed ... args ) |
|
|
|
|
|
|
|
|
{ |
return get_my_sql()->query( replace( query, __my_tables ), @args ); |
} |
|
object sql_big_query( string query, mixed ... args ) |
|
|
{ |
return get_my_sql()->big_query( replace( query, __my_tables ), @args ); |
} |
|
array(mapping(string:mixed)) sql_query_ro( string query, mixed ... args ) |
|
|
|
|
|
|
|
|
{ |
return get_my_sql(1)->query( replace( query, __my_tables ), @args ); |
} |
|
object sql_big_query_ro( string query, mixed ... args ) |
|
|
{ |
return get_my_sql(1)->big_query( replace( query, __my_tables ), @args ); |
} |
|
protected int create_sql_tables( mapping(string:array(string)) definitions, |
string|void comment, int|void no_unique_names ) |
|
|
{ |
int ddc; |
if( !no_unique_names ) |
foreach( indices( definitions ), string t ) |
ddc+=get_my_table( t, definitions[t], comment, 1 ); |
else |
{ |
Sql.Sql sql = get_my_sql(); |
foreach( indices( definitions ), string t ) |
{ |
if( !catch { |
sql->query("CREATE TABLE "+t+" ("+definitions[t]*","+")" ); |
} ) |
ddc++; |
DBManager.is_module_table( this_object(), my_db, t, comment ); |
} |
} |
return ddc; |
} |
|
protected string sql_table_exists( string name ) |
|
{ |
if(strlen(name)) |
name = "_"+name; |
|
string res = hash(_my_configuration->name)->digits(36) |
+ "_" + replace(sname(), ({ "#","-" }), ({ "_","_" })) + name; |
|
return catch(get_my_sql()->query( "SELECT * FROM "+res+" LIMIT 1" ))?0:res; |
} |
|
|
protected string|int get_my_table( string|array(string) name, |
void|array(string)|string definition, |
string|void comment, int|void flag ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{ |
string oname; |
int ddc; |
if( !definition ) |
{ |
definition = name; |
oname = name = ""; |
} |
else if(strlen(name)) |
name = "_"+(oname = name); |
|
Sql.Sql sql = get_my_sql(); |
|
string res = hash(_my_configuration->name)->digits(36) |
+ "_" + replace(sname(),({ "#","-" }), ({ "_","_" })) + name; |
|
if( !sql ) |
{ |
report_error("Failed to get SQL handle, permission denied for "+my_db+"\n"); |
return 0; |
} |
if( arrayp( definition ) ) |
definition *= ", "; |
|
if( catch(sql->query( "SELECT * FROM "+res+" LIMIT 1" )) ) |
{ |
ddc++; |
mixed error = |
catch |
{ |
get_my_sql()->query( "CREATE TABLE "+res+" ("+definition+")" ); |
DBManager.is_module_table( this_object(), my_db, res, |
oname+"\0"+comment ); |
}; |
if( error ) |
{ |
if( strlen( name ) ) |
name = " "+name; |
report_error( "Failed to create table"+name+": "+ |
describe_error( error ) ); |
return 0; |
} |
if( flag ) |
{ |
__my_tables[ "&"+oname+";" ] = res; |
return ddc; |
} |
return __my_tables[ "&"+oname+";" ] = res; |
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
if( flag ) |
{ |
__my_tables[ "&"+oname+";" ] = res; |
return ddc; |
} |
return __my_tables[ "&"+oname+";" ] = res; |
} |
|
protected string my_db = "local"; |
|
protected void set_my_db( string to ) |
|
|
|
{ |
my_db = to; |
} |
|
Sql.Sql get_my_sql( int|void read_only, void|string charset ) |
|
|
|
|
|
|
|
{ |
return DBManager.cached_get( my_db, _my_configuration, read_only, charset ); |
} |
|
|
|
int|string format_db_browser_value (string db_name, string table_name, |
string column_name, array(string) col_names, |
array(string) col_types, array(string) row, |
RequestID id); |
|
|