pike.git / src / threads.c

version» Context lines:

pike.git/src/threads.c:142: Inside #if defined(__NT__)
   unsigned *id)   {    HANDLE h = (HANDLE)_beginthreadex(NULL, Pike_stack_size, fun, arg, 0, id);    if(h)    {    CloseHandle(h);    return 0;    }    else    { -  return 1; +  return errno;    }   }      #endif      #ifdef SIMULATE_COND_WITH_EVENT   PMOD_EXPORT int co_wait(COND_T *c, MUTEX_T *m)   {    struct cond_t_queue me;    event_init(&me.event);
pike.git/src/threads.c:1380: Inside #if defined(RDTSC) && defined(USE_CLOCK_FOR_SLICES)
   * unsynchronized tsc's between cores, OS tsc resets, etc -    * individual intervals can be off by more than an order of    * magnitude in either direction without affecting the final time    * slice length appreciably.    *    * Note that the real interval lengths will always be longer.    * One reason is that we won't get calls exactly when they run    * out. Another is the often lousy clock(3) resolution. */       if (prev_tsc) { -  clock_t tsc_interval_time = clock_now - prev_clock; -  if (tsc_interval_time > 0) { +  if (clock_now > prev_clock) {    /* Estimate the next interval just by extrapolating the    * tsc/clock ratio of the last one. This adapts very    * quickly but is also very "jumpy". That shouldn't matter    * due to the approach with dividing the time slice into    * ~20 tsc intervals.    *    * Note: The main source of the jumpiness is probably that    * clock(3) has so lousy resolution on many platforms, i.e.    * it may step forward very large intervals very seldom    * (100 times/sec on linux/glibc 2.x). It also has the    * effect that the actual tsc intervals will be closer to    * 1/200 sec. */ -  +  clock_t tsc_interval_time = clock_now - prev_clock;    INT64 new_target_int =    (tsc_elapsed * (CLOCKS_PER_SEC / 400)) / tsc_interval_time;    if (new_target_int < target_int << 2)    target_int = new_target_int;    else {    /* The most likely cause for this is high variance in the    * interval lengths due to low clock(3) resolution. */   #ifdef PROFILE_CHECK_THREADS    fprintf (stderr, "[%d:%f] Capping large TSC interval increase "    "(from %"PRINTINT64"d to %"PRINTINT64"d)\n",
pike.git/src/threads.c:1429: Inside #if defined(RDTSC) && defined(USE_CLOCK_FOR_SLICES) and #if 0
   }   #endif   #endif    }    else {    /* clock(3) can have pretty low resolution and might not    * have advanced during the tsc interval. Just do another    * round on the old estimate, keeping prev_tsc and    * prev_clock fixed to get a longer interval for the next    * measurement. */ -  if (tsc_interval_time < 0) { +  if (clock_now < prev_clock) {    /* clock() wraps around fairly often as well. We still    * keep the old interval but update the baselines in this    * case. */    prev_tsc = tsc_now;    prev_clock = clock_now;    }    target_int += tsc_elapsed;   #ifdef PROFILE_CHECK_THREADS    no_clock_advs++;   #endif
pike.git/src/threads.c:1453: Inside #if defined(RDTSC) && defined(USE_CLOCK_FOR_SLICES) and #if defined(PROFILE_CHECK_THREADS)
  #ifdef PROFILE_CHECK_THREADS    fprintf (stderr, "[%d:%f] Warning: Encountered zero prev_tsc "    "(thread_start_clock: %"PRINTINT64"d, "    "clock_now: %"PRINTINT64"d)\n",    getpid(), get_real_time() * (1.0 / CPU_TIME_TICKS),    (INT64) thread_start_clock, (INT64) clock_now);   #endif    prev_tsc = tsc_now;    }    -  if (clock_now - thread_start_clock < 0) +  if (clock_now < thread_start_clock)    /* clock counter has wrapped since the start of the time    * slice. Let's reset and yield. */    thread_start_clock = 0;    else if (clock_now - thread_start_clock <    (clock_t) (CLOCKS_PER_SEC / 20))    return;    }      #ifdef PROFILE_CHECK_THREADS    {
pike.git/src/threads.c:1545:    timersub(&now, &last_check, &diff);    if (diff.tv_usec < 50000 && diff.tv_sec == 0)    return;    last_check = now;   #endif    }    }   #elif defined (USE_CLOCK_FOR_SLICES)    {    clock_t clock_now = clock(); -  if (clock_now - thread_start_clock < 0) +  if (clock_now < thread_start_clock)    /* clock counter has wrapped since the start of the time slice.    * Let's reset and yield. */    thread_start_clock = 0;    else if (clock_now - thread_start_clock < (clock_t) (CLOCKS_PER_SEC / 20))    return;    }   #else    static int div_;    if(div_++ & 255)    return;
pike.git/src/threads.c:1748:       DEBUG_CHECK_THREAD();       THREADS_FPRINTF(0, (stderr,"new_thread_func(): Thread %p inited\n",    arg.thread_state));       if(SETJMP(back))    {    if(throw_severity <= THROW_ERROR)    call_handle_error(); -  if(throw_severity == THROW_EXIT) +  else if(throw_severity == THROW_EXIT)    {    /* This is too early to get a clean exit if DO_PIKE_CLEANUP is    * active. Otoh it cannot be done later since it requires the    * evaluator stacks in the gc calls. It's difficult to solve    * without handing over the cleanup duty to the main thread. */    pike_do_exit(throw_value.u.integer); -  +  } else if (thread_state->thread_obj) { +  /* Copy the thrown exit value to the thread_state here, +  * if the thread hasn't been destructed. */ +  assign_svalue(&thread_state->result, &throw_value);    }    } else {    INT32 args=arg.args->size;    back.severity=THROW_EXIT;    push_array_items(arg.args);    arg.args=0;    f_call_function(args);       /* Copy return value to the thread_state here, if the thread    * object hasn't been destructed. */