pike.git / src / code / amd64.c

version» Context lines:

pike.git/src/code/amd64.c:1112:   }      static void amd64_push_int_reg(enum amd64_reg reg )   {    amd64_load_sp_reg();    mov_imm_mem( PIKE_T_INT, sp_reg, OFFSETOF(svalue, tu.t.type));    mov_reg_mem( reg, sp_reg, OFFSETOF(svalue, u.integer));    amd64_add_sp( 1 );   }    + static void amd64_get_storage( enum amd64_reg reg, ptrdiff_t offset ) + { +  amd64_load_fp_reg(); + #if 0 +  /* Note: We really should keep pike_frame->current_storage up to date instead.. */ +  mov_mem_reg( fp_reg, OFFSETOF(pike_frame,current_storage), P_REG_RAX ); +  add_reg_imm( reg, offset ); + #else +  /* fp->current_object->storage */ +  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, current_object), P_REG_RAX); +  mov_mem_reg(P_REG_RAX, OFFSETOF(object,storage), reg ); +  mov_mem_reg(fp_reg, OFFSETOF(pike_frame, context), P_REG_RAX); +  /* + fp->context->storage_offset */ +  add_reg_mem( reg, P_REG_RAX, OFFSETOF(inherit,storage_offset) ); +  /* + offset */ +  add_reg_imm( reg, offset); + #endif + } +  +    static void amd64_mark(int offset)   {    amd64_load_sp_reg();    amd64_load_mark_sp_reg();    if (offset) {    add_reg_imm_reg(sp_reg, -offset * sizeof(struct svalue), P_REG_RAX);    mov_reg_mem(P_REG_RAX, mark_sp_reg, 0);    } else {    mov_reg_mem(sp_reg, mark_sp_reg, 0);    }
pike.git/src/code/amd64.c:1217:    /* if RAX > MIN_REF_TYPE+1 */    cmp_reg32_imm(P_REG_RAX, MIN_REF_TYPE );    jl( &label_A );    /* Load pointer to refs -> RAX */    mov_mem_reg( src, OFFSETOF(svalue, u.refs), P_REG_RAX);    /* *RAX++ */    add_mem32_imm( P_REG_RAX, OFFSETOF(pike_string,refs), 1);    LABEL_A;   }    + void amd64_assign_svalue_no_free( enum amd64_reg dst, enum amd64_reg src, ptrdiff_t src_offset ) + { +  if( dst == P_REG_RAX ||src == P_REG_RAX ) +  Pike_fatal( "Clobbering src/dst in amd64_assign_svalue_no_free <%d,%d>\n", dst, src); +  /* Copy src -> dst */ +  mov_mem_reg(src, src_offset, P_REG_RAX); +  mov_reg_mem(P_REG_RAX, dst, 0 ); +  mov_mem_reg(src, src_offset+sizeof(long), P_REG_RAX); +  mov_reg_mem(P_REG_RAX, dst, sizeof(long) ); + } +    void amd64_assign_local( int b )   {    amd64_load_fp_reg();    amd64_load_sp_reg(); -  -  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), ARG1_REG); -  add_reg_imm( ARG1_REG,b*sizeof(struct svalue) ); -  mov_reg_reg( ARG1_REG, P_REG_RBX ); -  -  /* Free old svalue. */ -  amd64_free_svalue(ARG1_REG, 0); -  -  /* Copy sp[-1] -> local */ -  amd64_load_sp_reg(); -  mov_mem_reg(sp_reg, -1*sizeof(struct svalue), P_REG_RAX); -  mov_mem_reg(sp_reg, -1*sizeof(struct svalue)+sizeof(long), P_REG_RCX); -  -  mov_reg_mem( P_REG_RAX, P_REG_RBX, 0 ); -  mov_reg_mem( P_REG_RCX, P_REG_RBX, sizeof(long) ); +  mov_mem_reg( fp_reg, OFFSETOF(pike_frame, locals), P_REG_RBX); +  add_reg_imm( P_REG_RBX, b*sizeof(struct svalue) ); +  amd64_free_svalue(P_REG_RBX, 0); +  amd64_assign_svalue_no_free( P_REG_RBX, sp_reg, -1*sizeof(struct svalue));   }      static void amd64_pop_mark(void)   {    amd64_add_mark_sp( -1 );   }      static void amd64_push_string(int strno, int subtype)   {    amd64_load_fp_reg();
pike.git/src/code/amd64.c:2569:    add_reg_imm_reg(sp_reg, -sizeof(struct svalue), ARG1_REG);    amd64_ref_svalue(ARG1_REG, 0);    return;       case F_ASSIGN_LOCAL_AND_POP:    ins_debug_instr_prologue(a-F_OFFSET, b, 0);    amd64_assign_local(b);    amd64_add_sp(-1);    return;    +  case F_ASSIGN_PRIVATE_GLOBAL_AND_POP: +  case F_ASSIGN_PRIVATE_GLOBAL: +  ins_debug_instr_prologue(a-F_OFFSET, b, 0); +  amd64_get_storage( P_REG_RBX, b ); +  amd64_free_svalue( P_REG_RBX, 0 ); +  amd64_load_sp_reg(); +  +  if( a == F_ASSIGN_PRIVATE_GLOBAL_AND_POP ) +  { +  amd64_add_sp(-1); +  amd64_assign_svalue_no_free( P_REG_RBX, sp_reg, 0); +  } +  else +  { +  amd64_assign_svalue_no_free( P_REG_RBX, sp_reg, -sizeof(struct svalue)); +  amd64_ref_svalue(P_REG_RBX,1); /* already have type in RAX (from assign_svalue) */ +  } +  return; +     case F_ASSIGN_GLOBAL:    case F_ASSIGN_GLOBAL_AND_POP:    /* arg1: pike_fp->current obj    arg2: arg1+idenfier level    arg3: Pike_sp-1    */    /* NOTE: We cannot simply do the same optimization as for    ASSIGN_LOCAL_AND_POP with assign global, since assigning    at times does not add references.   
pike.git/src/code/amd64.c:2640:    jmp( &label_C );    LABEL_B;    /* It's something else, svalue already in ARG1. */    amd64_call_c_function( pike_sizeof );    LABEL_C;/* all done, res in RAX */    /* Store result on stack */    amd64_push_int_reg( P_REG_RAX );    }    return;    +  case F_PRIVATE_GLOBAL: + /* +  This is a private (or final) global which is not a 'short' one. +  +  It is guaranteed to be available as an svalue at +  pike_fp->current_object->storage + pike_fp->context->storage_offset + offset +  +  (I really wish pike_fp->current_storage was kept up to date for pike +  function..) + */ +  ins_debug_instr_prologue(a-F_OFFSET, b, 0); +  amd64_load_sp_reg(); +  amd64_get_storage( P_REG_RBX, b ); +  amd64_push_svaluep( P_REG_RBX ); +  return; +     case F_GLOBAL:    ins_debug_instr_prologue(a-F_OFFSET, b, 0);    amd64_load_fp_reg();    amd64_load_sp_reg();    mov_mem_reg(fp_reg, OFFSETOF(pike_frame, context), ARG3_REG);    mov_mem_reg(fp_reg, OFFSETOF(pike_frame, current_object),    ARG2_REG);    mov_reg_reg(sp_reg, ARG1_REG);    mov_mem16_reg(ARG3_REG, OFFSETOF(inherit, identifier_level),    ARG3_REG);