pike.git / lib / modules / Search.pmod / Utils.pmod

version» Context lines:

pike.git/lib/modules/Search.pmod/Utils.pmod:1:   // This file is part of Roxen Search   // Copyright © 2001 Roxen IS. All rights reserved.   // - // $Id: Utils.pmod,v 1.37 2003/01/31 11:55:05 mattias Exp $ + // $Id: Utils.pmod,v 1.38 2003/08/19 14:45:45 mattias Exp $      #if !constant(report_error)   #define report_error werror   #define report_debug werror   #define report_warning werror   #endif      #ifdef SEARCH_DEBUG   # define WERR(X) report_debug("search: "+(X)+"\n");   #else
pike.git/lib/modules/Search.pmod/Utils.pmod:441:    mapping dbp = get_profile_storage(db_name);    if(scheduler_storage[db_name])    return scheduler_storage[db_name];    scheduler_storage[db_name] = Scheduler(dbp);    return scheduler_storage[db_name] = Scheduler(dbp);   }      class Scheduler {       private int next_run; +  private mapping(int:int) entry_queue;    private mapping(int:int) crawl_queue;    private mapping(int:int) compact_queue;    private mapping db_profiles;    private object schedule_process;       void create(mapping _db_profiles) {    db_profiles = _db_profiles;    schedule();    }       //! Call this method to indicate that a new entry has been added    //! to the queue. The scheduler will delay indexing with at most    //! @[latency] minutes.    void new_entry(int latency, array(int) profiles) {    int would_be_indexed = time() + latency*60;    foreach(profiles, int profile) -  crawl_queue[profile] = 0; +  entry_queue[profile] = 0;    WERR("New entry. time: "+(would_be_indexed-time())+" profiles: "+(array(string))profiles*",");    if(next_run && next_run<would_be_indexed && next_run>=time())    return;    next_run = would_be_indexed;    reschedule();    }       void schedule() { -  +  entry_queue = ([]);    crawl_queue = ([]);    compact_queue = ([]);       foreach(indices(db_profiles), int id) {    object dbp = db_profiles[id];    if(!dbp) {    report_warning("Search database profile %d destructed.\n", id);    m_delete(db_profiles, id);    continue;    }
pike.git/lib/modules/Search.pmod/Utils.pmod:491:    }    next = dbp->next_compact();    if(next != -1) {    compact_queue[dbp->id] = next;    WERR(" Compact: "+(next-time()));    }    WERR("\n");    }       if(!sizeof(crawl_queue) && !sizeof(compact_queue)) return; -  next_run = min( @values(crawl_queue)+values(compact_queue) ); +  next_run = min( @values(crawl_queue)+values(compact_queue)+values(entry_queue) );    reschedule();    }      #if constant (roxen)    private void reschedule() {    if( schedule_process )    schedule_process->stop();    WERR("Scheduler runs next event in "+(next_run-time())+" seconds.");    schedule_process =    roxen.BackgroundProcess(next_run-time(), do_scheduled_stuff);
pike.git/lib/modules/Search.pmod/Utils.pmod:523: Inside #if constant (roxen)
   WERR("Running scheduler event.");       int t = time();       WERR(sizeof(crawl_queue)+" profiles in crawl queue.");    foreach(indices(crawl_queue), int id) {    if(crawl_queue[id]>t || !db_profiles[id]) continue;    object dbp = db_profiles[id];    if(dbp && dbp->ready_to_crawl()) {    WERR("Scheduler starts crawling "+id); +  dbp->action_reindex(); +  entry_queue = ([]); +  } +  } +  +  WERR(sizeof(entry_queue)+" profiles in entry queue."); +  foreach(indices(entry_queue), int id) { +  if(entry_queue[id]>t || !db_profiles[id]) continue; +  object dbp = db_profiles[id]; +  if(dbp && dbp->ready_to_crawl()) { +  WERR("Scheduler starts crawling "+id);    dbp->start_indexer();    }    }       WERR(sizeof(compact_queue)+" profiles in compact queue.");    foreach(indices(compact_queue), int id) {    if(compact_queue[id]>t || !db_profiles[id]) continue;    db_profiles[id]->start_compact();    }   
pike.git/lib/modules/Search.pmod/Utils.pmod:559:    WERR("Running scheduler event.");       int t = time();       WERR(sizeof(crawl_queue)+" profiles in crawl queue.");    foreach(indices(crawl_queue), int id) {    if(crawl_queue[id]>t || !db_profiles[id]) continue;    object dbp = db_profiles[id];    if(dbp && dbp->ready_to_crawl()) {    WERR("Scheduler starts crawling "+id); +  dbp->action_reindex(); +  entry_queue = ([]); +  } +  } +  +  WERR(sizeof(crawl_queue)+" profiles in crawl queue."); +  foreach(indices(crawl_queue), int id) { +  if(crawl_queue[id]>t || !db_profiles[id]) continue; +  object dbp = db_profiles[id]; +  if(dbp && dbp->ready_to_crawl()) { +  WERR("Scheduler starts crawling "+id);    dbp->start_indexer();    }    }       WERR(sizeof(compact_queue)+" profiles in compact queue.");    foreach(indices(compact_queue), int id) {    if(compact_queue[id]>t || !db_profiles[id]) continue;    db_profiles[id]->start_compact();    }