pike.git / src / threads.c

version» Context lines:

pike.git/src/threads.c:346: Inside #if undefined(CONFIGURE_TEST) and #if defined(HAVE_CLOCK) && \
  #if defined(HAVE_CLOCK) && \    (defined (RDTSC) || \    (!defined(HAVE_GETHRTIME) && \    !(defined(HAVE_MACH_TASK_INFO_H) && defined(TASK_THREAD_TIMES_INFO))))   static clock_t thread_start_clock = 0;   static THREAD_T last_clocked_thread = 0;   #define USE_CLOCK_FOR_SLICES   #ifdef RDTSC   static int use_tsc_for_slices;   #define TSC_START_INTERVAL 100000 - static INT64 prev_tsc; + static INT64 prev_tsc; /* TSC and */   static clock_t prev_clock; /* clock() at the beg of the last tsc interval */   #endif   #endif      #define THIS_THREAD ((struct thread_state *)CURRENT_STORAGE)      static struct callback *threads_evaluator_callback=0;      PMOD_EXPORT int num_threads = 1;   PMOD_EXPORT int threads_disabled = 0;
pike.git/src/threads.c:1364: Inside #if defined(RDTSC) && defined(USE_CLOCK_FOR_SLICES)
      /* Aim tsc intervals at 1/20th of a thread time slice interval,    * i.e. at 1/400 sec. That means the time is checked with    * clock(3) approx 20 times per slice. That still cuts the vast    * majority of the clock() calls and we're still fairly safe    * against tsc inconsistencies of different sorts, like cpu clock    * rate changes (on older cpu's with variable tsc),    * unsynchronized tsc's between cores, OS tsc resets, etc -    * individual intervals can be off by more than an order of    * magnitude in either direction without affecting the final time -  * slice length appreciably. */ +  * slice length appreciably. +  * +  * Note that the real interval lengths will always be longer. +  * One reason is that we won't get calls exactly when they run +  * out. Another is the often lousy clock(3) resolution. */       if (prev_tsc) {    clock_t tsc_interval_time = clock_now - prev_clock;    if (tsc_interval_time > 0) {    /* Estimate the next interval just by extrapolating the    * tsc/clock ratio of the last one. This adapts very    * quickly but is also very "jumpy". That shouldn't matter    * due to the approach with dividing the time slice into -  * ~20 tsc intervals. */ +  * ~20 tsc intervals. +  * +  * Note: The main source of the jumpiness is probably that +  * clock(3) has so lousy resolution on many platforms, i.e. +  * it may step forward very large intervals very seldom +  * (100 times/sec on linux/glibc 2.x). It also has the +  * effect that the actual tsc intervals will be closer to +  * 1/200 sec. */    INT64 new_target_int =    (tsc_elapsed * (CLOCKS_PER_SEC / 400)) / tsc_interval_time;    if (new_target_int < target_int << 1)    target_int = new_target_int;    else { -  +  /* The most likely cause for this is high variance in the +  * interval lengths due to low clock(3) resolution. */   #ifdef PROFILE_CHECK_THREADS    fprintf (stderr, "TSC suspect forward jump detected "    "(prev int: %"PRINTINT64"d, "    "calculated int: %"PRINTINT64"d) - "    "capping\n", target_int, new_target_int);   #endif    /* The + 1 is paranoia just in case it has become zero somehow. */    target_int = (target_int << 1) + 1;    }    prev_tsc = tsc_now;