pike.git / lib / modules / Sql.pmod / pgsql_util.pmod

version» Context lines:

pike.git/lib/modules/Sql.pmod/pgsql_util.pmod:23:   final Pike.Backend local_backend = Pike.SmallBackend();      private Thread.Mutex backendmux = Thread.Mutex();   private int clientsregistered;      constant emptyarray = ({});   final multiset censoroptions=(<"use_ssl","force_ssl",    "cache_autoprepared_statements","reconnect","text_query","is_superuser",    "server_encoding","server_version","integer_datetimes",    "session_authorization">); - final multiset cachealways=(<"BEGIN","begin","END","end","COMMIT","commit">); +        /* Statements matching createprefix cause the prepared statement cache    * to be flushed to prevent stale references to (temporary) tables    */   final Regexp createprefix=iregexp("^\a*(CREATE|DROP)\a");       /* Statements matching dontcacheprefix never enter the cache    */   private Regexp dontcacheprefix=iregexp("^\a*(FETCH|COPY)\a");       /* Statements not matching paralleliseprefix will cause the driver    * to stall submission until all previously started statements have    * run to completion    */   private Regexp paralleliseprefix    =iregexp("^\a*((SELEC|INSER)T|(UPDA|DELE)TE|(FETC|WIT)H)\a");    -  +  /* Statements matching transbeginprefix will cause the driver +  * insert a sync after the statement. +  * Failure to do so, will result in portal synchronisation errors +  * in the event of an ErrorResponse. +  */ + final Regexp transbeginprefix +  =iregexp("^\a*BEGIN[; \t\f\r\n]*$"); +  +  /* Statements matching transendprefix will cause the driver +  * insert a sync after the statement. +  * Failure to do so, will result in portal synchronisation errors +  * in the event of an ErrorResponse. +  */ + final Regexp transendprefix +  =iregexp("^\a*(COMMIT|ROLLBACK|END)[; \t\f\r\n]*$"); +     /* For statements matching execfetchlimit the resultrows will not be    * fetched in pieces    */   private Regexp execfetchlimit    =iregexp("^\a*((UPDA|DELE)TE|INSERT)\a|\aLIMIT\a+[1-9][; \t\f\r\n]*$");      private Regexp iregexp(string expr) {    Stdio.Buffer ret=Stdio.Buffer();    foreach(expr;;int c)    if(c>='A'&&c<='Z')
pike.git/lib/modules/Sql.pmod/pgsql_util.pmod:311:    if (this) { // Guard against async destructs    towrite -= output_to(socket, towrite);    lock = 0;    if (!i->fillread && !sizeof(this))    close();    }    return 0;    }       final void sendcmd(void|int mode, void|sql_result portal) { -  if (portal) -  queueup(portal); +     Thread.MutexKey lock; -  +  +  int emptystash() { +  int ret = 0;    if (started) {    lock = shortmux->lock();    if (sizeof(stash)) {    add(stash);    stash->clear();    foreach (stashqueue->try_read_array();; sql_result portal)    queueup(portal); -  +  ret = 1;    }    mode = mergemode(this, mode);    stashflushmode = KEEP;    } - nosync: +  return ret; +  }; +  +  emptystash();    do { -  +  if (portal) +  queueup(portal);    switch (mode) {    default: -  break nosync; +  continue;    case SYNCSEND:    PD("%d>Sync %d %d Queue\n",    socket->query_fd(), synctransact, ++queueoutidx);    add(PGSYNC);    mode = SENDOUT;    break;    case FLUSHLOGSEND:    PD("%d>%O %d Queue simplequery %d bytes\n",    socket->query_fd(), portal._portalname, ++queueoutidx, sizeof(this));    mode = FLUSHSEND;    }    qportals->write(synctransact++); -  } while (0); +  } while (!lock && emptystash());    catch {   outer:    do {    switch(mode) {    default:    PD("%d>Skip flush %d Queue %O\n",    socket->query_fd(), mode, (string)this);    break outer;    case FLUSHSEND:    PD("Flush\n");
pike.git/lib/modules/Sql.pmod/pgsql_util.pmod:536:    private object pgsqlsess;    private int eoffound;    private conxion c;    private conxiin cr;    final mixed _delayederror;    final int _state;    final int _fetchlimit;    private int alltext;    final int _forcetext;    private int syncparse; +  private int transtype;       final string _portalname;       private int rowsreceived;    private int inflight;    private int portalbuffersize;    private Thread.Mutex closemux;    private Thread.Queue datarows;    private array(mapping(string:mixed)) datarowdesc;    private array(int) datarowtypes; // types from datarowdesc
pike.git/lib/modules/Sql.pmod/pgsql_util.pmod:569:    private string _sprintf(int type) {    string res=UNDEFINED;    switch(type) {    case 'O':    int fd=-1;    if(c&&c->socket)    catch(fd=c->socket->query_fd());    res=sprintf("sql_result state: %d numrows: %d eof: %d inflight: %d\n"    "query: %O\n"    "fd: %O portalname: %O datarows: %d" -  " laststatus: %s\n", +  " synctransact: %d laststatus: %s\n",    _state,rowsreceived,eoffound,inflight,    _query,fd,_portalname,datarowtypes&&sizeof(datarowtypes), -  +  _synctransact,    statuscmdcomplete||(_unnamedstatementkey?"*parsing*":""));    break;    }    return res;    }       protected void create(object _pgsqlsess,conxion _c,string query,    int _portalbuffersize,int alltyped,array params,int forcetext, -  int _timeout, int _syncparse) { +  int _timeout, int _syncparse, int _transtype) {    pgsqlsess = _pgsqlsess;    cr = (c = _c)->i;    _query = query;    datarows = Thread.Queue();    _ddescribe=Thread.Condition();    _ddescribemux=Thread.Mutex();    closemux=Thread.Mutex();    portalbuffersize=_portalbuffersize;    alltext = !alltyped;    _params = params;    _forcetext = forcetext;    _state = PORTALINIT;    timeout = _timeout;    syncparse = _syncparse;    gottimeout = _pgsqlsess->cancelquery;    c->closecallbacks+=(<destroy>); -  +  transtype = _transtype;    }       //! Returns the command-complete status for this query.    //!    //! @seealso    //! @[affected_rows()]    //!    //! @note    //! This function is PostgreSQL-specific, and thus it is not available    //! through the generic SQL-interface.
pike.git/lib/modules/Sql.pmod/pgsql_util.pmod:946:    lock=0;    PD("Bind portal %O statement %O\n",_portalname,_preparedname);    _fetchlimit=pgsqlsess->_fetchlimit;    _bindportal();    conxsess bindbuffer = c->start();    _unnamedstatementkey=0;    CHAIN(bindbuffer)->add_int8('B')->add_hstring(plugbuffer, 4, 4);    if(!_tprepared && sizeof(_preparedname))    closestatement(CHAIN(bindbuffer), _preparedname);    _sendexecute(_fetchlimit -  && !(cachealways[_query] +  && !(transtype != NOTRANS    || sizeof(_query)>=MINPREPARELENGTH &&    execfetchlimit->match(_query))    && _fetchlimit,bindbuffer);    }    } else    lock=0;    }       final void _processrowdesc(array(mapping(string:mixed)) datarowdesc,    array(int) datarowtypes) {
pike.git/lib/modules/Sql.pmod/pgsql_util.pmod:1063:    _state=CLOSING;    lock=0;    PD("Close portal %O\n",_portalname);    if (_portalname && sizeof(_portalname)) {    plugbuffer->add_int8('C')->add_hstring(({'P',_portalname,0}),4,4);    retval=FLUSHSEND;    } else    _unnamedportalkey=0;    Thread.MutexKey lockc=pgsqlsess->_shortmux->lock();    if(!--pgsqlsess->_portalsinflight) { -  if(!pgsqlsess->_waittocommit && !plugbuffer->stashcount) +  if(!pgsqlsess->_waittocommit && !plugbuffer->stashcount +  && transtype != TRANSBEGIN)    /*    * stashcount will be non-zero if a parse request has been queued    * before the close was initiated.    * It's a bit of a tricky race, but this check should be sufficient.    */    pgsqlsess->_readyforquerycount++, retval=SYNCSEND;    pgsqlsess->_pportalcount=0;    }    lockc=0;    }
pike.git/lib/modules/Sql.pmod/pgsql_util.pmod:1139:    };    }       final void _sendexecute(int fetchlimit,void|bufcon|conxsess plugbuffer) {    int flushmode;    PD("Execute portal %O fetchlimit %d\n",_portalname,fetchlimit);    if(!plugbuffer)    plugbuffer=c->start(1);    CHAIN(plugbuffer)->add_int8('E')->add_hstring(({_portalname,0}), 4, 8)    ->add_int32(fetchlimit); -  if(!fetchlimit) -  flushmode=_closeportal(plugbuffer)==SYNCSEND?SYNCSEND:FLUSHSEND; -  else +  if (!fetchlimit) { +  if (transtype != NOTRANS) +  pgsqlsess._intransaction = transtype == TRANSBEGIN; +  flushmode = _closeportal(plugbuffer) == SYNCSEND +  || transtype == TRANSEND ? SYNCSEND : FLUSHSEND; +  } else    inflight+=fetchlimit, flushmode=FLUSHSEND;    plugbuffer->sendcmd(flushmode,this);    }       //! @returns    //! One result row at a time.    //!    //! When using COPY FROM STDOUT, this method returns one row at a time    //! as a single string containing the entire row.    //!