pike.git / src / interpret.c

version» Context lines:

pike.git/src/interpret.c:2067:    }    really_free_pike_frame(scope);   }      void *lower_mega_apply( INT32 args, struct object *o, ptrdiff_t fun )   {    struct pike_callsite C;    callsite_init(&C, args);    callsite_resolve_fun(&C, o, fun);    if (C.type == CALLTYPE_PIKEFUN) { -  FAST_CHECK_THREADS_ON_CALL(); +     return C.ptr;    }    /* This is only needed for pike functions right now:    * callsite_prepare(&C); */    callsite_execute(&C);    callsite_free(&C);    return NULL;   }      /* Apply a function.
pike.git/src/interpret.c:2117:    callsite_resolve_svalue(&C, Pike_sp - args);    break;    case APPLY_SVALUE_STRICT:    C.flags |= CALL_NEED_NO_RETVAL;    case APPLY_SVALUE:    callsite_resolve_svalue(&C, arg1);    break;    }       if (C.type == CALLTYPE_PIKEFUN) { -  FAST_CHECK_THREADS_ON_CALL(); +     return C.ptr;    }       callsite_execute(&C);    callsite_free(&C);       return NULL;   }      /* TAILCALL optimization variants. They try to reuse the current frame */
pike.git/src/interpret.c:2161:    }       /* If the same frame is still set, either    * - this is a pike function and we can just return    * the jmp target    * - or this is some other calltype which does not    * use a frame at all, so we just set it to NULL    */    if (C.frame == frame) {    if (C.type == CALLTYPE_PIKEFUN) { -  FAST_CHECK_THREADS_ON_CALL(); +     return C.ptr;    } else C.frame = NULL;    }       callsite_prepare(&C);    callsite_execute(&C);    callsite_free(&C);       return NULL;   }
pike.git/src/interpret.c:2188:    callsite_init(&C, args);       if (!(frame->flags & PIKE_FRAME_NO_REUSE) && frame->refs == 1) {    C.frame = Pike_fp;    }       callsite_resolve_fun(&C, o, fun);       if (C.frame == frame) {    if (C.type == CALLTYPE_PIKEFUN) { -  FAST_CHECK_THREADS_ON_CALL(); +     return C.ptr;    } else C.frame = NULL;    }       callsite_prepare(&C);    callsite_execute(&C);    callsite_free(&C);       return NULL;   }
pike.git/src/interpret.c:3388:    } else {    frame->pc = NULL;    }       frame->context = context;    frame->fun = fun;    frame->current_storage = o->storage + context->storage_offset;    frame->args = args;    frame->scope = NULL;    +  FAST_CHECK_THREADS_ON_CALL(); +     check_stack(256);    check_mark_stack(256);       if (PIKE_NEEDS_TRACE())    callsite_trace_call_fun(c);   }      PMOD_EXPORT void callsite_resolve_lfun(struct pike_callsite *c, struct object *o, int lfun) {    struct program *p = o->prog;   
pike.git/src/interpret.c:3470:    {    extern struct object *objects_to_destruct;    if (UNLIKELY(objects_to_destruct))    low_destruct_objects_to_destruct();    }   }      PMOD_EXPORT void callsite_reset_pikecall(struct pike_callsite *c) {    struct pike_frame *frame;    +  FAST_CHECK_THREADS_ON_CALL(); +    #ifdef PIKE_DEBUG    if (c->type != CALLTYPE_PIKEFUN)    Pike_fatal("Calling callsite_reset_pikecall() on non pike frame.\n");   #endif       frame = c->frame;      #ifdef PIKE_DEBUG    if (frame != Pike_fp)    Pike_fatal("Resetting frame which is not Pike_fp\n");
pike.git/src/interpret.c:3533:    * We pop all 'leftover' locals here, since that    * was not done in the last call to callsite_return.    */    if (c->retval + c->args < Pike_sp)    pike_pop_locals(c->retval, c->args);   }      static void callsite_return_slowpath(const struct pike_callsite *c);      PMOD_EXPORT void callsite_execute(const struct pike_callsite *c) { -  FAST_CHECK_THREADS_ON_CALL(); +     switch (c->type) {    default:    case CALLTYPE_NONE:   #ifdef PIKE_DEBUG    Pike_fatal("Unknown callsite type: %d\n", c->type);   #endif    UNREACHABLE(break);    case CALLTYPE_EFUN:    case CALLTYPE_CFUN:    {